diff --git a/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/.gitignore b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..cbc922f8fc121b35ecc9ad895de725cf555022ff --- /dev/null +++ b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/.gitignore @@ -0,0 +1,137 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +pip-wheel-metadata/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +.python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +results/ +outputs/ + +.amltconfig +.test_output +*.hdf5 +*.h5 diff --git a/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/image/1.3B-breakdown.png b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/image/1.3B-breakdown.png new file mode 100644 index 0000000000000000000000000000000000000000..baeb8f763d329873b21d9effe86d9e4fae8004b8 Binary files /dev/null and b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/image/1.3B-breakdown.png differ diff --git a/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/image/Banner-benchmark.png b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/image/Banner-benchmark.png new file mode 100644 index 0000000000000000000000000000000000000000..d7e9b621b88bf723f84e6f20a152199f1a5d899d Binary files /dev/null and b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/image/Banner-benchmark.png differ diff --git a/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/image/RLHF.png b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/image/RLHF.png new file mode 100644 index 0000000000000000000000000000000000000000..608b28dc1ee5b851791add2286616d5bd28c37bd Binary files /dev/null and b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/image/RLHF.png differ diff --git a/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/image/democrat2.png b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/image/democrat2.png new file mode 100644 index 0000000000000000000000000000000000000000..4cc70825b10c84497691eed48f7e15ba69778aef Binary files /dev/null and b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/image/democrat2.png differ diff --git a/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/image/ds-chat-single.gif b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/image/ds-chat-single.gif new file mode 100644 index 0000000000000000000000000000000000000000..8156d332e248886cb7c8811962ee8987e7bf1b96 Binary files /dev/null and b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/image/ds-chat-single.gif differ diff --git a/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/image/ds-chat.gif b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/image/ds-chat.gif new file mode 100644 index 0000000000000000000000000000000000000000..1f71316ff8020889c70f1557377876ca9a7fe3cd Binary files /dev/null and b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/image/ds-chat.gif differ diff --git a/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/image/ds-shiba.png b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/image/ds-shiba.png new file mode 100644 index 0000000000000000000000000000000000000000..48e43425ae06bad9b69a5a77ba70bb4e59376b59 Binary files /dev/null and b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/image/ds-shiba.png differ diff --git a/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/image/e2e_RLHF.png b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/image/e2e_RLHF.png new file mode 100644 index 0000000000000000000000000000000000000000..caa76dffdc25cb6efff9bda81ca45cafeb50a0a5 Binary files /dev/null and b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/image/e2e_RLHF.png differ diff --git a/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/image/four_blocks.png b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/image/four_blocks.png new file mode 100644 index 0000000000000000000000000000000000000000..15b052f2c5f0f6bd9ec64c6ab1245efd2aa05a77 Binary files /dev/null and b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/image/four_blocks.png differ diff --git a/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/image/ppo_trainer.png b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/image/ppo_trainer.png new file mode 100644 index 0000000000000000000000000000000000000000..479ca4bb2ad72319dc293340ca9dce9bf59085c8 Binary files /dev/null and b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/image/ppo_trainer.png differ diff --git a/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/image/reward_function.png b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/image/reward_function.png new file mode 100644 index 0000000000000000000000000000000000000000..4d4fa23248ada2a452d10f828dfea7c5a765525d Binary files /dev/null and b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/image/reward_function.png differ diff --git a/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/image/shiba.png b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/image/shiba.png new file mode 100644 index 0000000000000000000000000000000000000000..0c03c83a5922797e50deed98cb495063dc8538f7 Binary files /dev/null and b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/image/shiba.png differ diff --git a/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/video/release_v3.mp4 b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/video/release_v3.mp4 new file mode 100644 index 0000000000000000000000000000000000000000..b03a769801f48e3a46a8cc660c1f080d2c2dbae7 Binary files /dev/null and b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/assets/video/release_v3.mp4 differ diff --git a/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/chat.py b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/chat.py new file mode 100644 index 0000000000000000000000000000000000000000..dc23fea327b7fae283df05355cd4e73f4f080873 --- /dev/null +++ b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/chat.py @@ -0,0 +1,24 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import argparse +import subprocess + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--path", + type=str, + help="Directory containing trained actor model") + parser.add_argument( + "--max_new_tokens", + type=int, + default=128, + help="Maximum new tokens to generate per response", + ) + args = parser.parse_args() + + cmd = f"python3 ./inference/chatbot.py --path {args.path} --max_new_tokens {args.max_new_tokens}" + p = subprocess.Popen(cmd, shell=True) + p.wait() diff --git a/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/dschat/rlhf/ppo_trainer.py b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/dschat/rlhf/ppo_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..22cba6be0ded02d908e0cd8757d9030ef5a7cde3 --- /dev/null +++ b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/dschat/rlhf/ppo_trainer.py @@ -0,0 +1,371 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +import torch +import torch.nn.functional as F +import time +import deepspeed +from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus +from deepspeed.accelerator import get_accelerator + +from dschat.utils.utils import print_rank_0 + + +def print_all_ranks(tag, value, rank): + world_size = torch.distributed.get_world_size() + all_tensor = torch.zeros(world_size, dtype=torch.float32).to( + get_accelerator().current_device_name()) + all_tensor[rank] = value + torch.distributed.all_reduce(all_tensor, op=torch.distributed.ReduceOp.SUM) + print_rank_0(f'{tag} {all_tensor}', rank) + + +def get_model_norm(model): + with torch.no_grad(): + total = 0.0 + for param in model.parameters(): + should_gather = hasattr( + param, + 'ds_id') and param.ds_status == ZeroParamStatus.NOT_AVAILABLE + with deepspeed.zero.GatheredParameters(param, + enabled=should_gather): + total += float(param.float().norm()) + + return total + + +def gather_log_probs(logits, labels): + log_probs = F.log_softmax(logits, dim=-1) + log_probs_labels = log_probs.gather(dim=-1, index=labels.unsqueeze(-1)) + return log_probs_labels.squeeze(-1) + + +class DeepSpeedPPOTrainer(): + + def __init__(self, rlhf_engine, args): + self.rlhf_engine = rlhf_engine + self.actor_model = self.rlhf_engine.actor + self.critic_model = self.rlhf_engine.critic + self.ref_model = self.rlhf_engine.ref + self.reward_model = self.rlhf_engine.reward + self.tokenizer = self.rlhf_engine.tokenizer + self.args = args + self.max_answer_seq_len = args.max_answer_seq_len + self.end_of_conversation_token_id = self.tokenizer( + args.end_of_conversation_token)['input_ids'][-1] + self.z3_enabled = args.actor_zero_stage == 3 + self.compute_fp32_loss = self.args.compute_fp32_loss + + # In case the generated experience is not valid (too short), we use the last valid + # generated experience. Alternatively, we can skip the step (on all workers). + # For now, use the last valid experience which is a simpler solution + self.last_generated_experience = None + + # Those value can be changed + self.kl_ctl = 0.1 + self.clip_reward_value = 5 + self.cliprange = 0.2 + self.cliprange_value = 0.2 + self.gamma = 1.0 + self.lam = 0.95 + self.generate_time = 0.0 + + def _generate_sequence(self, prompts, mask, step): + + max_min_length = self.max_answer_seq_len + prompts.shape[1] + + # This has been added due to a probability/nan error that happens after + # meta-llama/Llama-2-7b-hf enabled do_sample: + # https://huggingface.co/meta-llama/Llama-2-7b-hf/commit/6fdf2e60f86ff2481f2241aaee459f85b5b0bbb9 + if self.actor_model.module.config.model_type == "llama": + kwargs = dict(do_sample=False) + else: + kwargs = dict() + + with torch.no_grad(): + seq = self.actor_model.module.generate( + prompts, + attention_mask=mask, + max_length=max_min_length, + pad_token_id=self.tokenizer.pad_token_id, + synced_gpus=self.z3_enabled, + **kwargs) + + # Filter out seq with no answers (or very short). This happens when users directly use the pre-training ckpt without supervised finetuning + # NOTE: this will causes each GPU has different number of examples + batch_size = seq.shape[0] + prompt_length = prompts.shape[1] + self.prompt_length = prompt_length + ans = seq[:, prompt_length:] + valid_ans_len = (ans != self.tokenizer.pad_token_id).sum(dim=-1) + + if self.args.print_answers and (step % self.args.print_answers_interval + == 0): + print( + f"--- prompt --> step={step}, rank={torch.distributed.get_rank()}, {self.tokenizer.batch_decode(prompts, skip_special_tokens=True)}" + ) + print( + f"--- ans --> step={step}, rank={torch.distributed.get_rank()}, {self.tokenizer.batch_decode(ans, skip_special_tokens=True)}" + ) + + out_seq = [] + for i in range(batch_size): + if valid_ans_len[ + i] <= 1: # if the answer is shorter than 1 token, drop it + print( + f'Dropping too short generated answer: {step=}: \n' + f'prompts: {self.tokenizer.batch_decode(prompts, skip_special_tokens=False)}\n' + f'answers: {self.tokenizer.batch_decode(ans, skip_special_tokens=False)}' + ) + continue + else: + out_seq.append(seq[i:i + 1]) + + if not out_seq: + print( + f'All generated results are too short for rank={self.args.local_rank} step={step}\n' + f'-> prompts: {self.tokenizer.batch_decode(prompts, skip_special_tokens=False)}\n' + f'-> answers: {self.tokenizer.batch_decode(ans, skip_special_tokens=False)}' + ) + return None + + out_seq = torch.cat(out_seq, dim=0) # concat output in the batch dim + + return out_seq + + def generate_experience(self, prompts, mask, step): + self.eval() + generate_start = time.time() + seq = self._generate_sequence(prompts, mask, step) + generate_end = time.time() + if seq is None: + assert self.last_generated_experience is not None, f'Invalid generated experience at {step=}' + prompts = self.last_generated_experience['prompts'] + seq = self.last_generated_experience['seq'] + else: + self.last_generated_experience = {'prompts': prompts, 'seq': seq} + self.train() + + pad_token_id = self.tokenizer.pad_token_id + attention_mask = seq.not_equal(pad_token_id).long() + with torch.no_grad(): + output = self.actor_model(seq, attention_mask=attention_mask) + output_ref = self.ref_model(seq, attention_mask=attention_mask) + reward_score = self.reward_model.forward_value( + seq, attention_mask, + prompt_length=self.prompt_length)['chosen_end_scores'].detach( + ) + values = self.critic_model.forward_value( + seq, attention_mask, return_value_only=True).detach()[:, :-1] + + logits = output.logits + logits_ref = output_ref.logits + if self.compute_fp32_loss: + logits = logits.to(torch.float) + logits_ref = logits_ref.to(torch.float) + + self.generate_time = generate_end - generate_start + + return { + 'prompts': prompts, + 'logprobs': gather_log_probs(logits[:, :-1, :], seq[:, 1:]), + 'ref_logprobs': gather_log_probs(logits_ref[:, :-1, :], seq[:, + 1:]), + 'value': values, + 'rewards': reward_score, + 'input_ids': seq, + "attention_mask": attention_mask + } + + def compute_rewards(self, prompts, log_probs, ref_log_probs, reward_score, + action_mask): + + kl_divergence_estimate = -self.kl_ctl * (log_probs - ref_log_probs) + rewards = kl_divergence_estimate + start = prompts.shape[1] - 1 + ends = start + action_mask[:, start:].sum(1) + 1 + reward_clip = torch.clamp(reward_score, -self.clip_reward_value, + self.clip_reward_value) + batch_size = log_probs.shape[0] + for j in range(batch_size): + rewards[j, start:ends[j]][-1] += reward_clip[j] + + return rewards + + def train_rlhf(self, inputs): + # train the rlhf mode here + ### process the old outputs + prompts = inputs['prompts'] + log_probs = inputs['logprobs'] + ref_log_probs = inputs['ref_logprobs'] + reward_score = inputs['rewards'] + values = inputs['value'] + attention_mask = inputs['attention_mask'] + seq = inputs['input_ids'] + + start = prompts.size()[-1] - 1 + action_mask = attention_mask[:, 1:] + + old_values = values + with torch.no_grad(): + old_rewards = self.compute_rewards(prompts, log_probs, + ref_log_probs, reward_score, + action_mask) + ends = start + action_mask[:, start:].sum(1) + 1 + # we need to zero out the reward and value after the end of the conversation + # otherwise the advantage/return will be wrong + for i in range(old_rewards.shape[0]): + old_rewards[i, ends[i]:] = 0 + old_values[i, ends[i]:] = 0 + advantages, returns = self.get_advantages_and_returns( + old_values, old_rewards, start) + + ### process the new outputs + batch = {'input_ids': seq, "attention_mask": attention_mask} + actor_prob = self.actor_model(**batch, use_cache=False).logits + actor_log_prob = gather_log_probs(actor_prob[:, :-1, :], seq[:, 1:]) + actor_loss = self.actor_loss_fn(actor_log_prob[:, start:], + log_probs[:, start:], advantages, + action_mask[:, start:]) + self.actor_model.backward(actor_loss) + + if not self.args.align_overflow: + self.actor_model.step() + + value = self.critic_model.forward_value(**batch, + return_value_only=True, + use_cache=False)[:, :-1] + critic_loss = self.critic_loss_fn(value[:, start:], old_values[:, + start:], + returns, action_mask[:, start:]) + self.critic_model.backward(critic_loss) + + if self.args.align_overflow: + actor_overflow = self.actor_model.optimizer.check_overflow( + external=True) + critic_overflow = self.critic_model.optimizer.check_overflow( + external=True) + + rank = torch.distributed.get_rank() + if actor_overflow and not critic_overflow: + self.critic_model.optimizer.skip_step = True + print_rank_0( + "OVERFLOW: actor overflow, skipping both actor and critic steps", + rank) + elif not actor_overflow and critic_overflow: + self.actor_model.optimizer.skip_step = True + print_rank_0( + "OVERFLOW: critic overflow, skipping both actor and critic steps", + rank) + elif actor_overflow and critic_overflow: + print_rank_0( + "OVERFLOW: actor and critic overflow, skipping both actor and critic steps", + rank) + self.actor_model.step() + + self.critic_model.step() + + return actor_loss, critic_loss + + def get_overflow(self): + # Overflow is not expected when using bf16 + # Therefore, DeepSpeed's BF16_Optimizer does not maintain an overflow indication + if self.args.dtype == "bf16": + return False, False + + actor_overflow = self.actor_model.optimizer.overflow + critic_overflow = self.critic_model.optimizer.overflow + + return actor_overflow, critic_overflow + + def actor_loss_fn(self, logprobs, old_logprobs, advantages, mask): + ## policy gradient loss + log_ratio = (logprobs - old_logprobs) * mask + ratio = torch.exp(log_ratio) + pg_loss1 = -advantages * ratio + pg_loss2 = -advantages * torch.clamp(ratio, 1.0 - self.cliprange, + 1.0 + self.cliprange) + pg_loss = torch.sum(torch.max(pg_loss1, pg_loss2) * mask) / mask.sum() + return pg_loss + + def critic_loss_fn(self, values, old_values, returns, mask): + ## value loss + values_clipped = torch.clamp( + values, + old_values - self.cliprange_value, + old_values + self.cliprange_value, + ) + if self.compute_fp32_loss: + values = values.float() + values_clipped = values_clipped.float() + vf_loss1 = (values - returns)**2 + vf_loss2 = (values_clipped - returns)**2 + vf_loss = 0.5 * torch.sum( + torch.max(vf_loss1, vf_loss2) * mask) / mask.sum() + return vf_loss + + def get_advantages_and_returns(self, values, rewards, start): + # Adopted from https://github.com/CarperAI/trlx/blob/main/trlx/models/modeling_ppo.py#L134 + lastgaelam = 0 + advantages_reversed = [] + length = rewards.size()[-1] + for t in reversed(range(start, length)): + nextvalues = values[:, t + 1] if t < length - 1 else 0.0 + delta = rewards[:, t] + self.gamma * nextvalues - values[:, t] + lastgaelam = delta + self.gamma * self.lam * lastgaelam + advantages_reversed.append(lastgaelam) + advantages = torch.stack(advantages_reversed[::-1], dim=1) + returns = advantages + values[:, start:] + return advantages.detach(), returns + + def _validate_training_mode(self): + assert self.actor_model.module.training + assert self.critic_model.module.training + + def _validate_evaluation_mode(self): + assert not self.actor_model.module.training + assert not self.critic_model.module.training + assert not self.ref_model.module.training + assert not self.reward_model.module.training + + def train(self): + self.actor_model.train() + self.critic_model.train() + + def eval(self): + self.actor_model.eval() + self.critic_model.eval() + self.reward_model.eval() + self.ref_model.eval() + + def dump_model_norms(self, tag): + actor_model_norm = get_model_norm(self.actor_model) + ref_model_norm = get_model_norm(self.ref_model) + critic_model_norm = get_model_norm(self.critic_model) + reward_model_norm = get_model_norm(self.reward_model) + print_all_ranks(f'{tag} global_actor_model_norm', actor_model_norm, + self.args.local_rank) + print_all_ranks(f'{tag} global_ref_model_norm', ref_model_norm, + self.args.local_rank) + print_all_ranks(f'{tag} global_critic_model_norm', critic_model_norm, + self.args.local_rank) + print_all_ranks(f'{tag} global_reward_model_norm', reward_model_norm, + self.args.local_rank) + + +class DeepSpeedPPOTrainerUnsupervised(DeepSpeedPPOTrainer): + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def train_unsupervised(self, inputs, unsup_coef): + # Train the unsupervised model here + self._validate_training_mode() + + outputs = self.actor_model(**inputs, use_cache=False) + loss = outputs.loss + self.actor_model.backward(unsup_coef * loss) + self.actor_model.step() + + return loss diff --git a/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/dschat/rlhf/rlhf_engine.py b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/dschat/rlhf/rlhf_engine.py new file mode 100644 index 0000000000000000000000000000000000000000..5b6778cc268592b91f45a209d1e953e3b17f4d2e --- /dev/null +++ b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/dschat/rlhf/rlhf_engine.py @@ -0,0 +1,305 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +import time +import torch +import deepspeed +from deepspeed.ops.adam import FusedAdam +from deepspeed.ops.adam import DeepSpeedCPUAdam +from transformers import AutoModelForCausalLM, get_scheduler + +from dschat.utils.ds_utils import get_train_ds_config, get_eval_ds_config +from dschat.utils.module.lora import convert_linear_layer_to_lora, only_optimize_lora_parameters, make_model_gradient_checkpointing_compatible +from dschat.utils.model.model_utils import create_hf_model, create_critic_model +from dschat.utils.utils import get_optimizer_grouped_parameters +""" +TODOs: + * support HF models for critic (for debugging), must be a previously saved ckpt from step-2 + * determine ds_config/zero_stage based on model size, gpu style, world size, etc + - get model size by creating simple meta model + - 1.3b: zero-2 for actor/ref models, zero-0 for others + - 13b+: zero-3 for all models +""" + + +def log_init(model_name, stime=None): + if torch.distributed.get_rank() == 0: + tag = "start" if stime is None else "end" + suffix = "ing" if stime is None else "ed" + duration = "" + if stime is not None: + duration = "(duration: {:.2f}s)".format(time.time() - stime) + msg = f"[{tag}] Initializ{suffix} {model_name} Model [{tag}] {duration}" + stars = (90 - len(msg)) // 2 + extra_star = "*" if (90 - len(msg)) % 2 == 1 else "" + print("*" * stars + msg + "*" * stars + extra_star) + return time.time() + + +class DeepSpeedRLHFEngine(): + + def __init__(self, actor_model_name_or_path, critic_model_name_or_path, + tokenizer, args, num_total_iters): + self.args = args + self.num_total_iters = num_total_iters + self.tokenizer = tokenizer + + self.actor = self._init_actor( + actor_model_name_or_path=actor_model_name_or_path) + self.ref = self._init_ref( + actor_model_name_or_path=actor_model_name_or_path) + self.actor_ema = None + if self.args.enable_ema: + self.actor_ema = self._init_ema( + actor_model_name_or_path=actor_model_name_or_path) + self.critic = self._init_critic( + critic_model_name_or_path=critic_model_name_or_path) + self.reward = self._init_reward( + critic_model_name_or_path=critic_model_name_or_path) + if self.args.critic_gradient_checkpointing: + self.critic.gradient_checkpointing_enable() + + def _init_actor(self, actor_model_name_or_path): + stime = log_init("Actor") + + # DS Config + ds_config = get_train_ds_config( + offload=self.args.offload, + dtype=self.args.dtype, + stage=self.args.actor_zero_stage, + enable_hybrid_engine=self.args.enable_hybrid_engine, + inference_tp_size=self.args.inference_tp_size, + release_inference_cache=self.args.release_inference_cache, + pin_parameters=(not self.args.unpin_actor_parameters), + tp_gather_partition_size=self.args.tp_gather_partition_size, + max_out_tokens=self.args.max_prompt_seq_len + + self.args.max_answer_seq_len, + enable_tensorboard=self.args.enable_tensorboard, + enable_mixed_precision_lora=self.args.enable_mixed_precision_lora, + tb_path=self.args.tensorboard_path, + tb_name="step3_actor") + ds_config[ + 'train_micro_batch_size_per_gpu'] = self.args.per_device_training_batch_size + #TODO(jeff): we should probably set grad accumlation steps here as well for clarity + ds_config[ + 'train_batch_size'] = self.args.per_device_training_batch_size * torch.distributed.get_world_size( + ) * self.args.gradient_accumulation_steps_actor + + # Model + actor_model = create_hf_model( + model_class=AutoModelForCausalLM, + model_name_or_path=actor_model_name_or_path, + tokenizer=self.tokenizer, + ds_config=ds_config, + dropout=self.args.actor_dropout) + + # LoRA + if self.args.actor_lora_dim > 0: + actor_model = convert_linear_layer_to_lora( + actor_model, self.args.actor_lora_module_name, + self.args.actor_lora_dim) + if self.args.only_optimize_lora: + actor_model = only_optimize_lora_parameters(actor_model) + actor_model = make_model_gradient_checkpointing_compatible( + actor_model) + + # Optimizer + AdamOptimizer = DeepSpeedCPUAdam if self.args.offload else FusedAdam + optim_params = get_optimizer_grouped_parameters( + actor_model, self.args.actor_weight_decay, + self.args.actor_lora_learning_rate) + optim = AdamOptimizer(optim_params, + lr=self.args.actor_learning_rate, + betas=(0.9, 0.95)) + + # LR Scheduler + lr_scheduler = get_scheduler( + name=self.args.lr_scheduler_type, + optimizer=optim, + num_warmup_steps=self.args.num_warmup_steps, + num_training_steps=self.num_total_iters, + ) + + # DeepSpeed Engine + #TODO: move enable_hybrid_engine and pin_parameters to ds_config + actor_engine, *_ = deepspeed.initialize(model=actor_model, + optimizer=optim, + lr_scheduler=lr_scheduler, + config=ds_config) + + log_init("Actor", stime=stime) + + return actor_engine + + def _init_ref(self, actor_model_name_or_path): + stime = log_init("Ref") + # DS Config + zero_stage = self.args.actor_zero_stage + if zero_stage != 3: + # If actor is ZeRO-3 then we use it for everything, otherwise assume we have enough memory for ref model + zero_stage = 0 + ds_config = get_eval_ds_config(self.args.offload_reference_model, + self.args.dtype, zero_stage) + ds_config[ + 'train_micro_batch_size_per_gpu'] = self.args.per_device_training_batch_size + #TODO(jeff): we should probably set grad accumlation steps here as well for clarity + ds_config[ + 'train_batch_size'] = self.args.per_device_training_batch_size * torch.distributed.get_world_size( + ) * self.args.gradient_accumulation_steps_actor + + ref_model = create_hf_model(AutoModelForCausalLM, + actor_model_name_or_path, self.tokenizer, + ds_config) + + ref_engine, *_ = deepspeed.initialize(model=ref_model, + config=ds_config) + + log_init("Ref", stime=stime) + return ref_engine + + def _init_ema(self, actor_model_name_or_path): + stime = log_init("EMA") + # DS Config + zero_stage = self.args.actor_zero_stage + if zero_stage != 3: + # If actor is ZeRO-3 then we use it for everything, otherwise assume we have enough memory + zero_stage = 0 + ds_config = get_eval_ds_config(self.args.offload_reference_model, + self.args.dtype, zero_stage) + ds_config[ + 'train_micro_batch_size_per_gpu'] = self.args.per_device_training_batch_size + #TODO(jeff): we should probably set grad accumlation steps here as well for clarity + ds_config[ + 'train_batch_size'] = self.args.per_device_training_batch_size * torch.distributed.get_world_size( + ) * self.args.gradient_accumulation_steps_actor + + actor_model_ema = create_hf_model(AutoModelForCausalLM, + actor_model_name_or_path, + self.tokenizer, ds_config) + if self.args.actor_lora_dim > 0: + actor_model_ema = convert_linear_layer_to_lora( + actor_model_ema, self.args.actor_lora_module_name, + self.args.actor_lora_dim) + + ema_engine, *_ = deepspeed.initialize(model=actor_model_ema, + config=ds_config) + + log_init("EMA", stime=stime) + return ema_engine + + def _init_critic(self, critic_model_name_or_path): + stime = log_init("Critic") + ds_config = get_train_ds_config( + offload=self.args.offload, + dtype=self.args.dtype, + stage=self.args.critic_zero_stage, + enable_tensorboard=self.args.enable_tensorboard, + tb_path=self.args.tensorboard_path, + tb_name="step3_critic") + ds_config[ + 'train_micro_batch_size_per_gpu'] = self.args.per_device_training_batch_size + #TODO(jeff): we should probably set grad accumlation steps here as well for clarity + ds_config[ + 'train_batch_size'] = self.args.per_device_training_batch_size * torch.distributed.get_world_size( + ) * self.args.gradient_accumulation_steps + + ds_eval_config = get_eval_ds_config(offload=False, + dtype=self.args.dtype, + stage=self.args.critic_zero_stage) + # We need to set train batch size and micro batch size here to pass the sanity check of DeepSpeed engine. + ds_eval_config[ + 'train_micro_batch_size_per_gpu'] = self.args.per_device_training_batch_size + ds_eval_config[ + 'train_batch_size'] = self.args.per_device_training_batch_size * torch.distributed.get_world_size( + ) * self.args.gradient_accumulation_steps + + # Model + critic_model = create_critic_model( + model_name_or_path=critic_model_name_or_path, + tokenizer=self.tokenizer, + ds_config=ds_eval_config, + num_padding_at_beginning=self.args.num_padding_at_beginning, + rlhf_training=True, + dropout=self.args.critic_dropout, + zero_stage=self.args.critic_zero_stage) + + # LoRA + if self.args.critic_lora_dim > 0: + critic_model = convert_linear_layer_to_lora( + critic_model, self.args.critic_lora_module_name, + self.args.critic_lora_dim) + if self.args.only_optimize_lora: + critic_model = only_optimize_lora_parameters(critic_model) + critic_model = make_model_gradient_checkpointing_compatible( + critic_model) + + # Optimizer + AdamOptimizer = DeepSpeedCPUAdam if self.args.offload else FusedAdam + optim_params = get_optimizer_grouped_parameters( + critic_model, self.args.critic_weight_decay, + self.args.critic_lora_learning_rate) + optim = AdamOptimizer(optim_params, + lr=self.args.critic_learning_rate, + betas=(0.9, 0.95)) + + # LR Scheduler + lr_scheduler = get_scheduler( + name=self.args.lr_scheduler_type, + optimizer=optim, + num_warmup_steps=self.args.num_warmup_steps, + num_training_steps=self.num_total_iters, + ) + + # DeepSpeed Engine + critic_engine, *_ = deepspeed.initialize(model=critic_model, + optimizer=optim, + lr_scheduler=lr_scheduler, + config=ds_config) + + log_init("Critic", stime=stime) + return critic_engine + + def _init_reward(self, critic_model_name_or_path): + stime = log_init("Reward") + # DS Config + zero_stage = self.args.critic_zero_stage + if zero_stage != 3: + # If critic is ZeRO-3 then we use it for everything, otherwise assume we have enough memory + zero_stage = 0 + + ds_config = get_eval_ds_config(offload=self.args.offload, + dtype=self.args.dtype, + stage=zero_stage) + ds_config[ + 'train_micro_batch_size_per_gpu'] = self.args.per_device_training_batch_size + ds_config[ + 'train_batch_size'] = self.args.per_device_training_batch_size * torch.distributed.get_world_size( + ) * self.args.gradient_accumulation_steps + + ds_eval_config = get_eval_ds_config(offload=False, + dtype=self.args.dtype, + stage=zero_stage) + + # We need to set train batch size and micro batch size here to pass the sanity check of DeepSpeed engine. + ds_eval_config[ + 'train_micro_batch_size_per_gpu'] = self.args.per_device_training_batch_size + ds_eval_config[ + 'train_batch_size'] = self.args.per_device_training_batch_size * torch.distributed.get_world_size( + ) * self.args.gradient_accumulation_steps + + # Model + reward_model = create_critic_model( + model_name_or_path=critic_model_name_or_path, + tokenizer=self.tokenizer, + ds_config=ds_eval_config, + num_padding_at_beginning=self.args.num_padding_at_beginning, + rlhf_training=True, + dropout=self.args.critic_dropout, + zero_stage=zero_stage) + + reward_engine, *_ = deepspeed.initialize(model=reward_model, + config=ds_config) + + log_init("Reward", stime=stime) + return reward_engine diff --git a/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/dschat/utils/data/data_utils.py b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/dschat/utils/data/data_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..0b9d6df649f452d4c78f93403b46620add56f50a --- /dev/null +++ b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/dschat/utils/data/data_utils.py @@ -0,0 +1,530 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +""" +Part of the code was adopted from https://github.com/microsoft/Megatron-DeepSpeed/blob/main/megatron/data/dataset_utils.py +""" +import torch +from torch.utils.data import Dataset, Subset, ConcatDataset +from torch.nn.utils.rnn import pad_sequence +import torch.nn.functional as F +from datasets import load_dataset +import numpy as np +import os +import hashlib +from itertools import chain +from dschat.utils.data import raw_datasets +from deepspeed.accelerator import get_accelerator + + +def get_raw_dataset(dataset_name, output_path, seed, local_rank): + + if "Dahoas/rm-static" in dataset_name: + return raw_datasets.DahoasRmstaticDataset(output_path, seed, + local_rank, dataset_name) + elif "Dahoas/full-hh-rlhf" in dataset_name: + return raw_datasets.DahoasFullhhrlhfDataset(output_path, seed, + local_rank, dataset_name) + elif "Dahoas/synthetic-instruct-gptj-pairwise" in dataset_name: + return raw_datasets.DahoasSyntheticinstructgptjpairwiseDataset( + output_path, seed, local_rank, dataset_name) + elif "yitingxie/rlhf-reward-datasets" in dataset_name: + return raw_datasets.YitingxieRlhfrewarddatasetsDataset( + output_path, seed, local_rank, dataset_name) + elif "openai/webgpt_comparisons" in dataset_name: + return raw_datasets.OpenaiWebgptcomparisonsDataset( + output_path, seed, local_rank, dataset_name) + elif "stanfordnlp/SHP" in dataset_name: + return raw_datasets.StanfordnlpSHPDataset(output_path, seed, + local_rank, dataset_name) + elif "pvduy/sharegpt_alpaca_oa_vicuna_format" in dataset_name: + return raw_datasets.PvduySharegptalpacaoavicunaformatDataset( + output_path, seed, local_rank, dataset_name) + elif "wangrui6/Zhihu-KOL" in dataset_name: + return raw_datasets.Wangrui6ZhihuKOLDataset(output_path, seed, + local_rank, dataset_name) + elif "Cohere/miracl-zh-queries-22-12" in dataset_name: + return raw_datasets.CohereMiraclzhqueries2212Dataset( + output_path, seed, local_rank, dataset_name) + elif "Hello-SimpleAI/HC3-Chinese" in dataset_name: + return raw_datasets.HelloSimpleAIHC3ChineseDataset( + output_path, seed, local_rank, dataset_name) + elif "mkqa-Chinese" in dataset_name: + return raw_datasets.MkqaChineseDataset(output_path, seed, local_rank, + "mkqa") + elif "mkqa-Japanese" in dataset_name: + return raw_datasets.MkqaJapaneseDataset(output_path, seed, local_rank, + "mkqa") + elif "Cohere/miracl-ja-queries-22-12" in dataset_name: + return raw_datasets.CohereMiracljaqueries2212Dataset( + output_path, seed, local_rank, dataset_name) + elif "lmqg/qg_jaquad" in dataset_name: + return raw_datasets.LmqgQgjaquadDataset(output_path, seed, local_rank, + dataset_name) + elif "lmqg/qag_jaquad" in dataset_name: + return raw_datasets.LmqgQagjaquadDataset(output_path, seed, local_rank, + dataset_name) + elif "local/jsonfile" in dataset_name: + chat_path = os.path.abspath( + os.path.join(os.path.dirname(__file__), os.path.pardir, + os.path.pardir, os.path.pardir)) + if not (os.path.isfile(chat_path + '/data/train.json') + and os.path.isfile(chat_path + '/data/eval.json')): + raise RuntimeError( + f"Please check both the train.json and eval.json files in your applications/DeepSpeed-Chat/data directory." + ) + return raw_datasets.LocalJsonFileDataset(output_path, seed, local_rank, + dataset_name, chat_path) + else: + raise RuntimeError( + f"We do not have configs for dataset {dataset_name}, but you can add it by yourself in raw_datasets.py." + ) + + +def get_shuffle_idx(seed, size): + np_rng = np.random.RandomState(seed=seed) + dtype_ = np.uint32 + if size >= (np.iinfo(np.uint32).max - 1): + dtype_ = np.int64 + shuffle_idx = np.arange(start=0, stop=size, step=1, dtype=dtype_) + np_rng.shuffle(shuffle_idx) + return shuffle_idx + + +def get_raw_dataset_split_index(local_rank, + output_path, + dataset_name, + seed, + split_name, + data_split, + split_index, + data_size, + rebuild=False): + index_file_name = f"{output_path}/{dataset_name}_seed{seed}_{split_name}_{data_split}_{split_index}.npy" + # reindex each time when using local jsonfile since it's more likely to get modified + if rebuild or (not os.path.isfile(index_file_name)) or (dataset_name + == 'jsonfile'): + splits = [float(s) for s in data_split.split(',')] + splits_sum = sum(splits) + splits = [split / splits_sum for split in splits] + splits_index = [0] + for index, split in enumerate(splits): + splits_index.append(splits_index[index] + + int(round(split * float(data_size)))) + diff = splits_index[-1] - data_size + for index in range(1, len(splits_index)): + splits_index[index] -= diff + assert splits_index[-1] == data_size + + shuffle_idx = get_shuffle_idx(seed, data_size) + for split_i in range(len(splits)): + shuffle_idx_split_file_name = f"{output_path}/{dataset_name}_seed{seed}_{split_name}_{data_split}_{split_i}.npy" + shuffle_idx_split = shuffle_idx[ + splits_index[split_i]:splits_index[split_i + 1]] + np.save(shuffle_idx_split_file_name, + shuffle_idx_split, + allow_pickle=True) + index = np.load(index_file_name, allow_pickle=True) + return index.tolist() + + +class PromptDataset(Dataset): + + def __init__(self, prompt_dataset, chosen_dataset, reject_dataset, + pad_token_id, train_phase) -> None: + super().__init__() + self.prompt_dataset = prompt_dataset + self.chosen_dataset = chosen_dataset + self.reject_dataset = reject_dataset + self.pad_token_id = pad_token_id + self.train_phase = train_phase + + def __len__(self): + length = len(self.chosen_dataset) + if self.train_phase == 3: + length = len(self.prompt_dataset) + return length + + def __getitem__(self, idx): + if self.train_phase == 1: + return { + "input_ids": self.chosen_dataset[idx]["input_ids"], + "attention_mask": self.chosen_dataset[idx]["attention_mask"], + "labels": self.chosen_dataset[idx]["input_ids"] + } + elif self.train_phase == 2: + return self.chosen_dataset[idx]["input_ids"], self.chosen_dataset[idx]["attention_mask"], \ + self.reject_dataset[idx]["input_ids"], self.reject_dataset[idx]["attention_mask"] + elif self.train_phase == 3: + return self.prompt_dataset[idx]["input_ids"],self.prompt_dataset[idx]["attention_mask"], \ + self.pad_token_id + + +def create_dataset_split(current_dataset, raw_dataset, train_phase, tokenizer, + end_of_conversation_token, max_seq_len): + prompt_dataset = [] + chosen_dataset = [] + reject_dataset = [] + if train_phase == 1: + for i, tmp_data in enumerate(current_dataset): + # tokenize the text + chosen_sentence = raw_dataset.get_prompt_and_chosen( + tmp_data) # the accept response + if chosen_sentence is not None: + chosen_sentence += end_of_conversation_token + chosen_token = tokenizer(chosen_sentence, + max_length=max_seq_len, + padding="max_length", + truncation=True, + return_tensors="pt") + chosen_token["input_ids"] = chosen_token["input_ids"].squeeze( + 0) + chosen_token["attention_mask"] = chosen_token[ + "attention_mask"].squeeze(0) + chosen_dataset.append(chosen_token) + print( + f'Creating dataset {raw_dataset.dataset_name_clean} for {train_phase=} size={len(chosen_dataset)}' + ) + + elif train_phase == 2: + for i, tmp_data in enumerate(current_dataset): + # tokenize the text + chosen_sentence = raw_dataset.get_prompt_and_chosen( + tmp_data) # the accept response + reject_sentence = raw_dataset.get_prompt_and_rejected( + tmp_data) # the accept response + if chosen_sentence is not None and reject_sentence is not None: + chosen_sentence += end_of_conversation_token # the accept response + reject_sentence += end_of_conversation_token + chosen_token = tokenizer(chosen_sentence, + max_length=max_seq_len, + padding="max_length", + truncation=True, + return_tensors="pt") + reject_token = tokenizer(reject_sentence, + max_length=max_seq_len, + padding="max_length", + truncation=True, + return_tensors="pt") + chosen_token["input_ids"] = chosen_token["input_ids"] + chosen_token["attention_mask"] = chosen_token["attention_mask"] + chosen_dataset.append(chosen_token) + + reject_token["input_ids"] = reject_token["input_ids"] + reject_token["attention_mask"] = reject_token["attention_mask"] + reject_dataset.append(reject_token) + print( + f'Creating dataset {raw_dataset.dataset_name_clean} for {train_phase=} size={len(chosen_dataset)}' + ) + + elif train_phase == 3: + filtered = 0 + for i, tmp_data in enumerate(current_dataset): + # tokenize the text + prompt = raw_dataset.get_prompt(tmp_data) + if prompt is not None: + prompt_token = tokenizer(prompt, return_tensors="pt") + if prompt_token["input_ids"].size()[-1] <= max_seq_len: + for key_word in ["input_ids", "attention_mask"]: + prompt_token[key_word] = prompt_token[ + key_word].squeeze(0).flip(0) + prompt_dataset.append(prompt_token) + else: + filtered += 1 + print(f'Creating dataset {raw_dataset.dataset_name_clean} ' + f'for {train_phase=} size={len(prompt_dataset)} {filtered=}') + + return PromptDataset(prompt_dataset, chosen_dataset, reject_dataset, + tokenizer.pad_token_id, train_phase) + + +def create_dataset(local_rank, dataset_name, data_split, output_path, + train_phase, seed, tokenizer, end_of_conversation_token, + max_seq_len, rebuild): + raw_dataset = get_raw_dataset(dataset_name, output_path, seed, local_rank) + train_dataset = raw_dataset.get_train_data() + train_index = get_raw_dataset_split_index(local_rank, output_path, + raw_dataset.dataset_name_clean, + seed, "train", data_split, + train_phase - 1, + len(train_dataset), rebuild) + train_dataset = Subset(train_dataset, train_index) + train_dataset = create_dataset_split(train_dataset, raw_dataset, + train_phase, tokenizer, + end_of_conversation_token, + max_seq_len) + + eval_dataset = raw_dataset.get_eval_data() + eval_index = get_raw_dataset_split_index(local_rank, output_path, + raw_dataset.dataset_name_clean, + seed, "eval", + data_split, train_phase - 1, + len(eval_dataset), rebuild) + eval_dataset = Subset(eval_dataset, eval_index) + eval_dataset = create_dataset_split(eval_dataset, raw_dataset, train_phase, + tokenizer, end_of_conversation_token, + max_seq_len) + return train_dataset, eval_dataset + + +def create_prompt_dataset(local_rank, + data_path, + data_split, + output_path, + train_phase, + seed, + tokenizer, + max_seq_len, + end_of_conversation_token="<|endoftext|>", + sft_only_data_path=[], + reload=False): + """ + Creates the prompt dataset + """ + os.makedirs(output_path, exist_ok=True) + fname = "_".join(data_path) + sft_cache_key = "_".join(sft_only_data_path) + tokenizer_name = tokenizer.init_kwargs["name_or_path"].replace("/", "_") + fname = f"{fname}_split{data_split}_phase{train_phase}_seed{seed}_tokenizer{tokenizer_name}_seqlen{max_seq_len}_sft{sft_cache_key}" + fname = "_".join(fname.split("/")) + fname = hashlib.sha256(fname.encode()).hexdigest( + ) # hash the file name to avoid too long file name + train_fname = f"{output_path}/traindata_{fname}.pt" + eval_fname = f"{output_path}/evaldata_{fname}.pt" + + cache_found = os.path.isfile(train_fname) and os.path.isfile(eval_fname) + buf_create_cache = torch.ByteTensor([not cache_found]).to( + get_accelerator().current_device_name()) + torch.distributed.all_reduce(buf_create_cache) + + if local_rank <= 0 and (buf_create_cache.item() != 0 or reload): + print(f'Creating prompt dataset {data_path}, {reload=}') + if len(data_path) == 1: # Single dataset. + train_dataset, eval_dataset = create_dataset( + local_rank, + data_path[0], + data_split, + output_path, + train_phase, + seed, + tokenizer, + end_of_conversation_token, + max_seq_len, + rebuild=reload) + else: # Blending datasets. + train_datasets = [] + eval_datasets = [] + train_size = 0 + eval_size = 0 + for d_path in data_path: + train_dataset, eval_dataset = create_dataset( + local_rank, + d_path, + data_split, + output_path, + train_phase, + seed, + tokenizer, + end_of_conversation_token, + max_seq_len, + rebuild=reload) + train_datasets.append(train_dataset) + eval_datasets.append(eval_dataset) + train_size += len(train_dataset) + eval_size += len(eval_dataset) + train_dataset = ConcatDataset(train_datasets) + shuffle_idx = get_shuffle_idx(seed, train_size) + train_dataset = Subset(train_dataset, shuffle_idx.tolist()) + eval_dataset = ConcatDataset(eval_datasets) + shuffle_idx = get_shuffle_idx(seed, eval_size) + eval_dataset = Subset(eval_dataset, shuffle_idx.tolist()) + + # Append the SFT-only dataset if it exists, and current phase is 1(SFT). + if train_phase == 1 and sft_only_data_path: + sft_train_datasets = [] + sft_eval_datasets = [] + sft_train_size = 0 + sft_eval_size = 0 + for sft_path in sft_only_data_path: + sft_train_dataset, sft_eval_dataset = create_dataset( + local_rank, + sft_path, + "10,0,0", + output_path, + train_phase, + seed, + tokenizer, + end_of_conversation_token, + max_seq_len, + rebuild=reload) + sft_train_datasets.append(sft_train_dataset) + sft_eval_datasets.append(sft_eval_dataset) + sft_train_size += len(sft_train_dataset) + sft_eval_size += len(sft_eval_dataset) + if sft_train_datasets: # Check if sft_train_datasets is not empty + sft_train_dataset = ConcatDataset(sft_train_datasets) + train_dataset = ConcatDataset( + [train_dataset, sft_train_dataset]) + shuffle_idx = get_shuffle_idx(seed, len(train_dataset)) + train_dataset = Subset(train_dataset, shuffle_idx.tolist()) + if sft_eval_datasets: # Check if sft_eval_datasets is not empty + sft_eval_dataset = ConcatDataset(sft_eval_datasets) + eval_dataset = ConcatDataset([eval_dataset, sft_eval_dataset]) + shuffle_idx = get_shuffle_idx(seed, len(eval_dataset)) + eval_dataset = Subset(eval_dataset, shuffle_idx.tolist()) + torch.save(train_dataset, train_fname) + torch.save(eval_dataset, eval_fname) + torch.distributed.barrier() + return torch.load(train_fname), torch.load(eval_fname) + + +class DataCollatorReward: + + def __call__(self, data): + batch = {} + batch["input_ids"] = torch.cat([f[0] + for f in data] + [f[2] for f in data], + dim=0) + batch["attention_mask"] = torch.cat([f[1] for f in data] + + [f[3] for f in data], + dim=0) + return batch + + +class DataCollatorRLHF: + + def __init__(self, max_token_len, inference_tp_size): + self.max_token_len = max_token_len + self.inference_tp_size = inference_tp_size + + def __call__(self, data): + batch = {} + pad_token_id = data[-1][-1] + + prompt = pad_sequence([f[0] for f in data], + padding_value=pad_token_id, + batch_first=True) + prompt_mask = pad_sequence([f[1] for f in data], + padding_value=0, + batch_first=True) + + ### make sure the final ouput is a seqence of 2**? + length = prompt.size()[-1] + pad_length = self.max_token_len - length + if pad_length > 0: + batch["prompt"] = F.pad(prompt, + pad=(0, pad_length), + mode='constant', + value=pad_token_id) + batch["prompt_att_mask"] = F.pad(prompt_mask, + pad=(0, pad_length), + mode='constant', + value=0) + else: + batch["prompt"] = prompt + batch["prompt_att_mask"] = prompt_mask + batch["prompt"] = batch["prompt"].flip(1) + batch["prompt_att_mask"] = batch["prompt_att_mask"].flip(1) + return batch + + +def get_unsupervised_data(args, tokenizer): + unsupervised_raw_datasets = load_dataset( + args.unsupervised_dataset_name, args.unsupervised_dataset_config_name) + column_names = unsupervised_raw_datasets["train"].column_names + text_column_name = "text" if "text" in column_names else column_names[0] + + def tokenize_function(examples): + return tokenizer(examples[text_column_name]) + + tokenized_datasets = unsupervised_raw_datasets.map( + tokenize_function, + batched=True, + num_proc=args.preprocessing_num_workers, + remove_columns=column_names, + load_from_cache_file=True, + desc="Running tokenizer on dataset", + ) + + block_size = args.max_prompt_seq_len + args.max_answer_seq_len + + def group_texts(examples): + # Concatenate all texts. + concatenated_examples = { + k: list(chain(*examples[k])) + for k in examples.keys() + } + total_length = len(concatenated_examples[list(examples.keys())[0]]) + # We drop the small remainder, we could add padding if the model supported it instead of this drop, you can + # customize this part to your needs. + if total_length >= block_size: + total_length = (total_length // block_size) * block_size + # Split by chunks of max_len. + result = { + k: + [t[i:i + block_size] for i in range(0, total_length, block_size)] + for k, t in concatenated_examples.items() + } + result["labels"] = result["input_ids"].copy() + return result + + lm_datasets = tokenized_datasets.map( + group_texts, + batched=True, + num_proc=args.preprocessing_num_workers, + load_from_cache_file=True, + desc=f"Grouping texts in chunks of {block_size}", + ) + + train_dataset = lm_datasets["train"] + + return train_dataset + + +class MiniDataset: + + def __init__(self, max_size, small_batch_size): + self.dataset = [] + self.max_size = max_size + self.small_batch_size = small_batch_size + + def seperate(self): + small_dataset = [] + for large_batch in self.dataset: + if type(large_batch) == list or type(large_batch) == tuple: + large_size = len(large_batch[0]) + elif type(large_batch) == dict: + large_size = len(large_batch[list(large_batch.keys())[0]]) + else: + large_size = len(large_batch) + for i in range(0, large_size, self.small_batch_size): + if type(large_batch) == list or type(large_batch) == tuple: + small_dataset.append( + [x[i:i + self.small_batch_size] for x in large_batch]) + elif type(large_batch) == dict: + small_dataset.append({ + k: v[i:i + self.small_batch_size] + for k, v in large_batch.items() + }) + else: + small_dataset.append(large_batch[i:i + + self.small_batch_size]) + self.free() + + return small_dataset + + def add(self, data): + if len(self.dataset) < self.max_size: + self.dataset.append(data) + if len(self.dataset) == self.max_size: + return self.seperate() + else: + return None + else: + raise ValueError( + "The dataset is full but we did not stop it. There is a bug in the code." + ) + + def free(self): + self.dataset = [] diff --git a/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/dschat/utils/data/raw_datasets.py b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/dschat/utils/data/raw_datasets.py new file mode 100644 index 0000000000000000000000000000000000000000..c5c21b9dc834b69ee298fb0480e0df3f22028008 --- /dev/null +++ b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/dschat/utils/data/raw_datasets.py @@ -0,0 +1,772 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +import os +# DeepSpeed Team +from datasets import load_dataset, load_from_disk +from torch.utils.data import Subset +import re + + +# The template prompt dataset class that all new dataset porting needs to +# follow in order to have a unified API and unified data format. +class PromptRawDataset(object): + + def __init__(self, output_path, seed, local_rank, dataset_name): + self.output_path = output_path + self.seed = seed + self.local_rank = local_rank + if os.path.exists(dataset_name): + self.raw_datasets = load_dataset(dataset_name) + elif not dataset_name == 'local/jsonfile': + self.raw_datasets = load_dataset(dataset_name) + + def get_train_data(self): + return + + def get_eval_data(self): + return + + # The prompt should be in the format of: " Human: " + actual_prompt_sentence + " Assistant:" + def get_prompt(self, sample): + return + + # The chosen response should be in the format of: " " + actual_response_sentence + def get_chosen(self, sample): + return + + # The rejected response should be in the format of: " " + actual_response_sentence + # If the dataset does not have rejected response, return None + def get_rejected(self, sample): + return + + def get_prompt_and_chosen(self, sample): + return + + def get_prompt_and_rejected(self, sample): + return + + +# English dataset +class DahoasRmstaticDataset(PromptRawDataset): + + def __init__(self, output_path, seed, local_rank, dataset_name): + super().__init__(output_path, seed, local_rank, dataset_name) + self.dataset_name = "Dahoas/rm-static" + self.dataset_name_clean = "Dahoas_rm_static" + + def get_train_data(self): + return self.raw_datasets["train"] + + def get_eval_data(self): + return self.raw_datasets["test"] + + def get_prompt(self, sample): + return sample['prompt'] + + def get_chosen(self, sample): + return sample['chosen'] + + def get_rejected(self, sample): + return sample['rejected'] + + def get_prompt_and_chosen(self, sample): + return sample['prompt'] + sample['chosen'] + + def get_prompt_and_rejected(self, sample): + return sample['prompt'] + sample['rejected'] + + +# English dataset +class DahoasFullhhrlhfDataset(PromptRawDataset): + + def __init__(self, output_path, seed, local_rank, dataset_name): + super().__init__(output_path, seed, local_rank, dataset_name) + self.dataset_name = "Dahoas/full-hh-rlhf" + self.dataset_name_clean = "Dahoas_full_hh_rlhf" + + def get_train_data(self): + return self.raw_datasets["train"] + + def get_eval_data(self): + return self.raw_datasets["test"] + + def get_prompt(self, sample): + return sample['prompt'] + + def get_chosen(self, sample): + return sample['chosen'] + + def get_rejected(self, sample): + return sample['rejected'] + + def get_prompt_and_chosen(self, sample): + return sample['prompt'] + sample['chosen'] + + def get_prompt_and_rejected(self, sample): + return sample['prompt'] + sample['rejected'] + + +# English dataset +class DahoasSyntheticinstructgptjpairwiseDataset(PromptRawDataset): + + def __init__(self, output_path, seed, local_rank, dataset_name): + super().__init__(output_path, seed, local_rank, dataset_name) + self.dataset_name = "Dahoas/synthetic-instruct-gptj-pairwise" + self.dataset_name_clean = "Dahoas_synthetic_instruct_gptj_pairwise" + + def get_train_data(self): + from .data_utils import get_raw_dataset_split_index + dataset = self.raw_datasets["train"] + index = get_raw_dataset_split_index(self.local_rank, self.output_path, + self.dataset_name_clean, + self.seed, "train_eval", "9,1", 0, + len(dataset)) + dataset = Subset(dataset, index) + return dataset + + def get_eval_data(self): + from .data_utils import get_raw_dataset_split_index + dataset = self.raw_datasets["train"] + index = get_raw_dataset_split_index(self.local_rank, self.output_path, + self.dataset_name_clean, + self.seed, "train_eval", "9,1", 1, + len(dataset)) + dataset = Subset(dataset, index) + return dataset + + def get_prompt(self, sample): + return " Human: " + sample['prompt'] + " Assistant:" + + def get_chosen(self, sample): + return " " + sample['chosen'] + + def get_rejected(self, sample): + return " " + sample['rejected'] + + def get_prompt_and_chosen(self, sample): + return " Human: " + sample['prompt'] + " Assistant: " + sample['chosen'] + + def get_prompt_and_rejected(self, sample): + return " Human: " + sample['prompt'] + " Assistant: " + sample[ + 'rejected'] + + +# English dataset +class YitingxieRlhfrewarddatasetsDataset(PromptRawDataset): + + def __init__(self, output_path, seed, local_rank, dataset_name): + super().__init__(output_path, seed, local_rank, dataset_name) + self.dataset_name = "yitingxie/rlhf-reward-datasets" + self.dataset_name_clean = "yitingxie_rlhf_reward_datasets" + + def get_train_data(self): + return self.raw_datasets["train"] + + def get_eval_data(self): + return self.raw_datasets["test"] + + def get_prompt(self, sample): + return sample['prompt'] + "Assistant:" + + def get_chosen(self, sample): + return sample['chosen'].split("Assistant:")[-1] + + def get_rejected(self, sample): + return sample['rejected'].split("Assistant:")[-1] + + def get_prompt_and_chosen(self, sample): + return sample['prompt'] + sample['chosen'] + + def get_prompt_and_rejected(self, sample): + return sample['prompt'] + sample['rejected'] + + +# English dataset +class OpenaiWebgptcomparisonsDataset(PromptRawDataset): + + def __init__(self, output_path, seed, local_rank, dataset_name): + super().__init__(output_path, seed, local_rank, dataset_name) + self.dataset_name = "openai/webgpt_comparisons" + self.dataset_name_clean = "openai_webgpt_comparisons" + + def get_train_data(self): + from .data_utils import get_raw_dataset_split_index + dataset = self.raw_datasets["train"] + index = get_raw_dataset_split_index(self.local_rank, self.output_path, + self.dataset_name_clean, + self.seed, "train_eval", "9,1", 0, + len(dataset)) + dataset = Subset(dataset, index) + return dataset + + def get_eval_data(self): + from .data_utils import get_raw_dataset_split_index + dataset = self.raw_datasets["train"] + index = get_raw_dataset_split_index(self.local_rank, self.output_path, + self.dataset_name_clean, + self.seed, "train_eval", "9,1", 1, + len(dataset)) + dataset = Subset(dataset, index) + return dataset + + def get_prompt(self, sample): + return " Human: " + sample['question']['full_text'] + " Assistant:" + + def get_chosen(self, sample): + if float(sample['score_0']) >= float(sample['score_1']): + response = sample['answer_0'] + else: + response = sample['answer_1'] + # This data has citation square brackets and numbers (e.g., "[1]"). + # Right now we are not doing browser-assisted finetuning, thus we + # remove these citations to avoid confusing the model. + response = re.sub(r" [\(\[].*?[\)\]]", "", response) + response = re.sub(r"[\(\[].*?[\)\]]", "", response) + return " " + response + + def get_rejected(self, sample): + if float(sample['score_0']) < float(sample['score_1']): + response = sample['answer_0'] + else: + response = sample['answer_1'] + response = re.sub(r" [\(\[].*?[\)\]]", "", response) + response = re.sub(r"[\(\[].*?[\)\]]", "", response) + return " " + response + + def get_prompt_and_chosen(self, sample): + if float(sample['score_0']) >= float(sample['score_1']): + response = sample['answer_0'] + else: + response = sample['answer_1'] + response = re.sub(r" [\(\[].*?[\)\]]", "", response) + response = re.sub(r"[\(\[].*?[\)\]]", "", response) + return " Human: " + sample['question'][ + 'full_text'] + " Assistant: " + response + + def get_prompt_and_rejected(self, sample): + if float(sample['score_0']) < float(sample['score_1']): + response = sample['answer_0'] + else: + response = sample['answer_1'] + response = re.sub(r" [\(\[].*?[\)\]]", "", response) + response = re.sub(r"[\(\[].*?[\)\]]", "", response) + return " Human: " + sample['question'][ + 'full_text'] + " Assistant: " + response + + +# English dataset +class StanfordnlpSHPDataset(PromptRawDataset): + + def __init__(self, output_path, seed, local_rank, dataset_name): + super().__init__(output_path, seed, local_rank, dataset_name) + self.dataset_name = "stanfordnlp/SHP" + self.dataset_name_clean = "stanfordnlp_SHP" + + def get_train_data(self): + return self.raw_datasets["train"] + + def get_eval_data(self): + return self.raw_datasets["validation"] + + def get_prompt(self, sample): + return " Human: " + sample['history'] + " Assistant:" + + def get_chosen(self, sample): + if int(sample["labels"]) == 1: + response = sample["human_ref_A"] + else: + response = sample["human_ref_B"] + return " " + response + + def get_rejected(self, sample): + if int(sample["labels"]) == 1: + response = sample["human_ref_B"] + else: + response = sample["human_ref_A"] + return " " + response + + def get_prompt_and_chosen(self, sample): + if int(sample["labels"]) == 1: + response = sample["human_ref_A"] + else: + response = sample["human_ref_B"] + return " Human: " + sample['history'] + " Assistant: " + response + + def get_prompt_and_rejected(self, sample): + if int(sample["labels"]) == 1: + response = sample["human_ref_B"] + else: + response = sample["human_ref_A"] + return " Human: " + sample['history'] + " Assistant: " + response + + +# English dataset +class PvduySharegptalpacaoavicunaformatDataset(PromptRawDataset): + + def __init__(self, output_path, seed, local_rank, dataset_name): + super().__init__(output_path, seed, local_rank, dataset_name) + self.dataset_name = "pvduy/sharegpt_alpaca_oa_vicuna_format" + self.dataset_name_clean = "pvduy_sharegpt_alpaca_oa_vicuna_format" + + def get_train_data(self): + return self.raw_datasets["train"] + + def get_eval_data(self): + return self.raw_datasets["test"] + + def get_prompt(self, sample): + if sample['prompt'] is not None and len(sample['prompt']) > 0: + return sample['prompt'].replace("USER", "Human").replace( + "ASSISTANT", "Assistant") + return None + + def get_chosen(self, sample): + if sample['label'] is not None and len(sample['label']) > 0: + return " " + sample['label'] + return None + + def get_rejected(self, sample): + print( + f"Warning: dataset {self.dataset_name} does not include rejected response." + ) + return None + + def get_prompt_and_chosen(self, sample): + if sample['prompt'] is not None and sample['label'] is not None and len( + sample['prompt']) > 0 and len(sample['label']) > 0: + return sample['prompt'].replace("USER", "Human").replace( + "ASSISTANT", "Assistant") + " " + sample['label'] + return None + + def get_prompt_and_rejected(self, sample): + print( + f"Warning: dataset {self.dataset_name} does not include rejected response." + ) + return None + + +class LocalJsonFileDataset(PromptRawDataset): + + def __init__(self, output_path, seed, local_rank, dataset_name, chat_path): + super().__init__(output_path, seed, local_rank, dataset_name) + self.dataset_name = "local/jsonfile" + self.dataset_name_clean = "jsonfile" + self.raw_datasets = load_dataset('json', + data_files={ + "train": + chat_path + '/data/train.json', + "eval": + chat_path + '/data/eval.json' + }) + + def get_train_data(self): + if self.raw_datasets['train'] is not None: + return self.raw_datasets['train'] + return None + + def get_eval_data(self): + if self.raw_datasets['eval'] is not None: + return self.raw_datasets['eval'] + return None + + # The prompt should be in the format of: " Human: " + actual_prompt_sentence + " Assistant:" + def get_prompt(self, sample): + if sample['prompt'] is not None: + return " " + sample['prompt'] + return None + + # The chosen response should be in the format of: " " + actual_response_sentence + def get_chosen(self, sample): + if sample['chosen'] is not None: + return " " + sample['chosen'] + return None + + # The rejected response should be in the format of: " " + actual_response_sentence + # If the dataset does not have rejected response, return None + def get_rejected(self, sample): + if sample['rejected'] is not None: + return " " + sample['rejected'] + return None + + def get_prompt_and_chosen(self, sample): + if sample['prompt'] is not None and sample['chosen'] is not None: + return " " + sample['prompt'] + " " + sample['chosen'] + return None + + def get_prompt_and_rejected(self, sample): + if sample['prompt'] is not None and sample['rejected'] is not None: + return " " + sample['prompt'] + " " + sample['rejected'] + return None + + +# Chinese dataset +class Wangrui6ZhihuKOLDataset(PromptRawDataset): + + def __init__(self, output_path, seed, local_rank, dataset_name): + super().__init__(output_path, seed, local_rank, dataset_name) + self.dataset_name = "wangrui6/Zhihu-KOL" + self.dataset_name_clean = "wangrui6_Zhihu_KOL" + + def get_train_data(self): + from .data_utils import get_raw_dataset_split_index + dataset = self.raw_datasets["train"] + index = get_raw_dataset_split_index(self.local_rank, self.output_path, + self.dataset_name_clean, + self.seed, "train_eval", "9,1", 0, + len(dataset)) + dataset = Subset(dataset, index) + return dataset + + def get_eval_data(self): + from .data_utils import get_raw_dataset_split_index + dataset = self.raw_datasets["train"] + index = get_raw_dataset_split_index(self.local_rank, self.output_path, + self.dataset_name_clean, + self.seed, "train_eval", "9,1", 1, + len(dataset)) + dataset = Subset(dataset, index) + return dataset + + def get_prompt(self, sample): + if sample['INSTRUCTION'] is not None: + return " Human: " + sample['INSTRUCTION'] + " Assistant:" + return None + + def get_chosen(self, sample): + if sample['RESPONSE'] is not None: + return " " + sample['RESPONSE'] + return None + + def get_rejected(self, sample): + print( + f"Warning: dataset {self.dataset_name} does not include rejected response." + ) + return None + + def get_prompt_and_chosen(self, sample): + if sample['INSTRUCTION'] is not None and sample['RESPONSE'] is not None: + return " Human: " + sample[ + 'INSTRUCTION'] + " Assistant: " + sample['RESPONSE'] + return None + + def get_prompt_and_rejected(self, sample): + print( + f"Warning: dataset {self.dataset_name} does not include rejected response." + ) + return None + + +# Chinese dataset +class CohereMiraclzhqueries2212Dataset(PromptRawDataset): + + def __init__(self, output_path, seed, local_rank, dataset_name): + super().__init__(output_path, seed, local_rank, dataset_name) + self.dataset_name = "Cohere/miracl-zh-queries-22-12" + self.dataset_name_clean = "Cohere_miracl_zh_queries_22_12" + + def get_train_data(self): + return self.raw_datasets["train"] + + def get_eval_data(self): + return self.raw_datasets["dev"] + + def get_prompt(self, sample): + return " Human: " + sample['query'] + " Assistant:" + + def get_chosen(self, sample): + return " " + sample['positive_passages'][0]['text'] + + def get_rejected(self, sample): + return " " + sample['negative_passages'][0]['text'] + + def get_prompt_and_chosen(self, sample): + return " Human: " + sample['query'] + " Assistant: " + sample[ + 'positive_passages'][0]['text'] + + def get_prompt_and_rejected(self, sample): + return " Human: " + sample['query'] + " Assistant: " + sample[ + 'negative_passages'][0]['text'] + + +# Chinese dataset +class HelloSimpleAIHC3ChineseDataset(PromptRawDataset): + + def __init__(self, output_path, seed, local_rank, dataset_name): + super().__init__(output_path, seed, local_rank, dataset_name) + self.dataset_name = "Hello-SimpleAI/HC3-Chinese" + self.dataset_name_clean = "Hello_SimpleAI_HC3_Chinese" + + def get_train_data(self): + from .data_utils import get_raw_dataset_split_index + dataset = self.raw_datasets["train"] + index = get_raw_dataset_split_index(self.local_rank, self.output_path, + self.dataset_name_clean, + self.seed, "train_eval", "9,1", 0, + len(dataset)) + dataset = Subset(dataset, index) + return dataset + + def get_eval_data(self): + from .data_utils import get_raw_dataset_split_index + dataset = self.raw_datasets["train"] + index = get_raw_dataset_split_index(self.local_rank, self.output_path, + self.dataset_name_clean, + self.seed, "train_eval", "9,1", 1, + len(dataset)) + dataset = Subset(dataset, index) + return dataset + + def get_prompt(self, sample): + if sample['question'] is not None: + return " Human: " + sample['question'] + " Assistant:" + return None + + def get_chosen(self, sample): + if sample['human_answers'][0] is not None: + return " " + sample['human_answers'][0] + return None + + def get_rejected(self, sample): + print( + f"Warning: dataset {self.dataset_name} does not include rejected response." + ) + return None + + def get_prompt_and_chosen(self, sample): + if sample['question'] is not None and sample['human_answers'][ + 0] is not None: + return " Human: " + sample['question'] + " Assistant: " + sample[ + 'human_answers'][0] + return None + + def get_prompt_and_rejected(self, sample): + print( + f"Warning: dataset {self.dataset_name} does not include rejected response." + ) + return None + + +# Chinese dataset +class MkqaChineseDataset(PromptRawDataset): + + def __init__(self, output_path, seed, local_rank, dataset_name): + super().__init__(output_path, seed, local_rank, dataset_name) + self.dataset_name = "mkqa-Chinese" + self.dataset_name_clean = "mkqa" + + def get_train_data(self): + from .data_utils import get_raw_dataset_split_index + dataset = self.raw_datasets["train"] + index = get_raw_dataset_split_index(self.local_rank, self.output_path, + self.dataset_name_clean, + self.seed, "train_eval", "9,1", 0, + len(dataset)) + dataset = Subset(dataset, index) + return dataset + + def get_eval_data(self): + from .data_utils import get_raw_dataset_split_index + dataset = self.raw_datasets["train"] + index = get_raw_dataset_split_index(self.local_rank, self.output_path, + self.dataset_name_clean, + self.seed, "train_eval", "9,1", 1, + len(dataset)) + dataset = Subset(dataset, index) + return dataset + + def get_prompt(self, sample): + if sample['queries']['zh_cn'] is not None: + return " Human: " + sample['queries']['zh_cn'] + " Assistant:" + return None + + def get_chosen(self, sample): + if sample['answers']['zh_cn'][0]['text'] is not None: + return " " + sample['answers']['zh_cn'][0]['text'] + return None + + def get_rejected(self, sample): + print( + f"Warning: dataset {self.dataset_name} does not include rejected response." + ) + return None + + def get_prompt_and_chosen(self, sample): + if sample['queries']['zh_cn'] is not None and sample['answers'][ + 'zh_cn'][0]['text'] is not None: + return " Human: " + sample['queries'][ + 'zh_cn'] + " Assistant: " + sample['answers']['zh_cn'][0][ + 'text'] + return None + + def get_prompt_and_rejected(self, sample): + print( + f"Warning: dataset {self.dataset_name} does not include rejected response." + ) + return None + + +# Japanese dataset +class MkqaJapaneseDataset(PromptRawDataset): + + def __init__(self, output_path, seed, local_rank, dataset_name): + super().__init__(output_path, seed, local_rank, dataset_name) + self.dataset_name = "mkqa-Japanese" + self.dataset_name_clean = "mkqa" + + def get_train_data(self): + from .data_utils import get_raw_dataset_split_index + dataset = self.raw_datasets["train"] + index = get_raw_dataset_split_index(self.local_rank, self.output_path, + self.dataset_name_clean, + self.seed, "train_eval", "9,1", 0, + len(dataset)) + dataset = Subset(dataset, index) + return dataset + + def get_eval_data(self): + from .data_utils import get_raw_dataset_split_index + dataset = self.raw_datasets["train"] + index = get_raw_dataset_split_index(self.local_rank, self.output_path, + self.dataset_name_clean, + self.seed, "train_eval", "9,1", 1, + len(dataset)) + dataset = Subset(dataset, index) + return dataset + + def get_prompt(self, sample): + if sample['queries']['ja'] is not None: + return " Human: " + sample['queries']['ja'] + " Assistant:" + return None + + def get_chosen(self, sample): + if sample['answers']['ja'][0]['text'] is not None: + return " " + sample['answers']['ja'][0]['text'] + return None + + def get_rejected(self, sample): + print( + f"Warning: dataset {self.dataset_name} does not include rejected response." + ) + return None + + def get_prompt_and_chosen(self, sample): + if sample['queries']['ja'] is not None and sample['answers']['ja'][0][ + 'text'] is not None: + return " Human: " + sample['queries'][ + 'ja'] + " Assistant: " + sample['answers']['ja'][0]['text'] + return None + + def get_prompt_and_rejected(self, sample): + print( + f"Warning: dataset {self.dataset_name} does not include rejected response." + ) + return None + + +# Japanese dataset +class CohereMiracljaqueries2212Dataset(PromptRawDataset): + + def __init__(self, output_path, seed, local_rank, dataset_name): + super().__init__(output_path, seed, local_rank, dataset_name) + self.dataset_name = "Cohere/miracl-ja-queries-22-12" + self.dataset_name_clean = "Cohere_miracl_ja_queries_22_12" + + def get_train_data(self): + return self.raw_datasets["train"] + + def get_eval_data(self): + return self.raw_datasets["dev"] + + def get_prompt(self, sample): + return " Human: " + sample['query'] + " Assistant:" + + def get_chosen(self, sample): + return " " + sample['positive_passages'][0]['text'] + + def get_rejected(self, sample): + return " " + sample['negative_passages'][0]['text'] + + def get_prompt_and_chosen(self, sample): + return " Human: " + sample['query'] + " Assistant: " + sample[ + 'positive_passages'][0]['text'] + + def get_prompt_and_rejected(self, sample): + if len(sample['negative_passages']) > 0: + return " Human: " + sample['query'] + " Assistant: " + sample[ + 'negative_passages'][0]['text'] + return None + + +# Japanese dataset +class LmqgQgjaquadDataset(PromptRawDataset): + + def __init__(self, output_path, seed, local_rank, dataset_name): + super().__init__(output_path, seed, local_rank, dataset_name) + self.dataset_name = "lmqg/qg_jaquad" + self.dataset_name_clean = "lmqg_qg_jaquad" + + def get_train_data(self): + return self.raw_datasets["train"] + + def get_eval_data(self): + return self.raw_datasets["validation"] + + def get_prompt(self, sample): + return " Human: " + sample['question'] + " Assistant:" + + def get_chosen(self, sample): + return " " + sample['sentence'] + + def get_rejected(self, sample): + print( + f"Warning: dataset {self.dataset_name} does not include rejected response." + ) + return None + + def get_prompt_and_chosen(self, sample): + return " Human: " + sample['question'] + " Assistant: " + sample[ + 'sentence'] + + def get_prompt_and_rejected(self, sample): + print( + f"Warning: dataset {self.dataset_name} does not include rejected response." + ) + return None + + +# Japanese dataset +class LmqgQagjaquadDataset(PromptRawDataset): + + def __init__(self, output_path, seed, local_rank, dataset_name): + super().__init__(output_path, seed, local_rank, dataset_name) + self.dataset_name = "lmqg/qag_jaquad" + self.dataset_name_clean = "lmqg_qag_jaquad" + + def get_train_data(self): + return self.raw_datasets["train"] + + def get_eval_data(self): + return self.raw_datasets["validation"] + + def get_prompt(self, sample): + return " Human: " + sample['questions'][0] + " Assistant:" + + def get_chosen(self, sample): + return " " + sample['paragraph'] + + def get_rejected(self, sample): + print( + f"Warning: dataset {self.dataset_name} does not include rejected response." + ) + return None + + def get_prompt_and_chosen(self, sample): + return " Human: " + sample['questions'][0] + " Assistant: " + sample[ + 'paragraph'] + + def get_prompt_and_rejected(self, sample): + print( + f"Warning: dataset {self.dataset_name} does not include rejected response." + ) + return None diff --git a/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/dschat/utils/ds_utils.py b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/dschat/utils/ds_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..9c15e514356a72d90cf2cc1a71360bccb702d07c --- /dev/null +++ b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/dschat/utils/ds_utils.py @@ -0,0 +1,104 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import deepspeed.comm as dist +from deepspeed.accelerator import get_accelerator + +GLOBAL_BATCH_SIZE = 32 +MICRO_BATCH_SIZE = 4 + + +def get_train_ds_config(offload, + dtype, + stage=2, + enable_hybrid_engine=False, + inference_tp_size=1, + release_inference_cache=False, + pin_parameters=True, + tp_gather_partition_size=8, + max_out_tokens=512, + enable_tensorboard=False, + enable_mixed_precision_lora=False, + tb_path="", + tb_name=""): + + device = "cpu" if offload else "none" + if dtype == "fp16": + data_type = "fp16" + dtype_config = {"enabled": True, "loss_scale_window": 100} + elif dtype == "bf16": + data_type = "bfloat16" + dtype_config = {"enabled": True} + zero_opt_dict = { + "stage": stage, + "offload_param": { + "device": device + }, + "offload_optimizer": { + "device": device + }, + "stage3_param_persistence_threshold": 1e4, + "stage3_max_live_parameters": 3e7, + "stage3_prefetch_bucket_size": 3e7, + "memory_efficient_linear": False + } + if enable_mixed_precision_lora: + zero_opt_dict["zero_quantized_nontrainable_weights"] = True + if dist.get_world_size() != get_accelerator().device_count(): + zero_opt_dict["zero_hpz_partition_size"] = get_accelerator( + ).device_count() + return { + "train_batch_size": GLOBAL_BATCH_SIZE, + "train_micro_batch_size_per_gpu": MICRO_BATCH_SIZE, + "steps_per_print": 10, + "zero_optimization": zero_opt_dict, + data_type: dtype_config, + "gradient_clipping": 1.0, + "prescale_gradients": False, + "wall_clock_breakdown": False, + "hybrid_engine": { + "enabled": enable_hybrid_engine, + "max_out_tokens": max_out_tokens, + "inference_tp_size": inference_tp_size, + "release_inference_cache": release_inference_cache, + "pin_parameters": pin_parameters, + "tp_gather_partition_size": tp_gather_partition_size, + }, + "tensorboard": { + "enabled": enable_tensorboard, + "output_path": f"{tb_path}/ds_tensorboard_logs/", + "job_name": f"{tb_name}_tensorboard" + } + } + + +def get_eval_ds_config(offload, dtype, stage=0): + device = "cpu" if offload else "none" + if dtype == "fp16": + data_type = "fp16" + dtype_config = { + "enabled": True, + } + elif dtype == "bf16": + data_type = "bfloat16" + dtype_config = {"enabled": True} + zero_opt_dict = { + "stage": stage, + "stage3_param_persistence_threshold": 1e4, + "offload_param": { + "device": device + }, + "memory_efficient_linear": False + } + return { + "train_batch_size": GLOBAL_BATCH_SIZE, + "train_micro_batch_size_per_gpu": MICRO_BATCH_SIZE, + "steps_per_print": 10, + "zero_optimization": zero_opt_dict, + data_type: dtype_config, + "gradient_clipping": 1.0, + "prescale_gradients": False, + "wall_clock_breakdown": False + } diff --git a/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/dschat/utils/model/model_utils.py b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/dschat/utils/model/model_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..97d3bff15ee6fef3f239c04a3050c604ba5f6322 --- /dev/null +++ b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/dschat/utils/model/model_utils.py @@ -0,0 +1,172 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +import os +import math +import torch +from transformers import ( + AutoConfig, + AutoModel, +) +from huggingface_hub import snapshot_download +from transformers.deepspeed import HfDeepSpeedConfig + +from dschat.utils.model.reward_model import RewardModel +from dschat.utils.utils import load_state_dict_into_model, print_rank_0 + + +def configure_dropout(model_config, dropout): + if dropout is not None: + for key in ('dropout', 'attention_dropout', 'hidden_dropout', + 'activation_dropout'): + if hasattr(model_config, key): + print(f"Setting model_config.{key} to {dropout}") + setattr(model_config, key, dropout) + + +def causal_lm_model_to_fp32_loss(model): + """ Convert CausalLM model to calculate loss in fp32 """ + + def causal_lm_forward( + input_ids=None, + past_key_values=None, + attention_mask=None, + head_mask=None, + inputs_embeds=None, + labels=None, + use_cache=None, + output_attentions=None, + output_hidden_states=None, + return_dict=None, + **deprecated_arguments, + ): + kwargs = dict() if model.config.model_type == "llama" else dict( + head_mask=head_mask) + output = model.__original_forward__( + input_ids=input_ids, + past_key_values=past_key_values, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + labels=None, + use_cache=use_cache, + output_attentions=output_attentions, + output_hidden_states=output_hidden_states, + return_dict=return_dict, + **kwargs) + + return_dict = isinstance(output, dict) + lm_logits = output.logits if return_dict else output[0] + loss = None + if labels is not None: + # move labels to correct device to enable model parallelism + labels = labels.to(lm_logits.device) + # Shift so that tokens < n predict n + shift_logits = lm_logits[..., :-1, :].float().contiguous() + shift_labels = labels[..., 1:].contiguous() + batch_size, seq_length, vocab_size = shift_logits.shape + # Flatten the tokens + loss_fct = torch.nn.CrossEntropyLoss() + loss = loss_fct( + shift_logits.view(batch_size * seq_length, vocab_size), + shift_labels.view(batch_size * seq_length)) + + if not return_dict: + # re-pack output with fp32 loss + return ((loss, ) + output) if loss is not None else output + + output.loss = loss + return output + + model.__original_forward__ = model.forward + model.forward = causal_lm_forward + + +def create_hf_model(model_class, + model_name_or_path, + tokenizer, + ds_config=None, + rlhf_training=False, + dropout=None): + model_config = AutoConfig.from_pretrained(model_name_or_path) + configure_dropout(model_config, dropout) + + # Note: dschf is defined in function scope to avoid global effects + # https://huggingface.co/docs/transformers/main_classes/deepspeed#nontrainer-deepspeed-integration + if ds_config is not None and ds_config["zero_optimization"]["stage"] == 3: + dschf = HfDeepSpeedConfig(ds_config) + else: + dschf = None + if rlhf_training: + # the weight loading is handled by create critic model + model = model_class.from_config(model_config) + else: + model = model_class.from_pretrained( + model_name_or_path, + from_tf=bool(".ckpt" in model_name_or_path), + config=model_config) + + model.config.end_token_id = tokenizer.eos_token_id + model.config.pad_token_id = model.config.eos_token_id + model.resize_token_embeddings(int( + 8 * + math.ceil(len(tokenizer) / 8.0))) # make the vocab size multiple of 8 + + return model + + +def create_critic_model(model_name_or_path, + tokenizer, + ds_config, + num_padding_at_beginning=0, + rlhf_training=False, + dropout=None, + zero_stage=0, + compute_fp32_loss=False): + # OPT model family always put a padding token at the beginning of the sequence, + # we did not see this in other models but not sure if it is a general rule + + import time + + start = time.time() + critic_model = create_hf_model(AutoModel, model_name_or_path, tokenizer, + ds_config, rlhf_training, dropout) + end = time.time() + print_rank_0(f">Creating model from_config took {end - start} seconds", + None) + + critic_model = RewardModel( + critic_model, + tokenizer, + num_padding_at_beginning=num_padding_at_beginning, + compute_fp32_loss=compute_fp32_loss) + + if rlhf_training: + # load critic model from checkpoint + + if not os.path.isdir(model_name_or_path): + model_name_or_path = snapshot_download(model_name_or_path) + model_ckpt_path = os.path.join(model_name_or_path, 'pytorch_model.bin') + assert os.path.exists( + model_ckpt_path + ), f"Cannot find model checkpoint at {model_ckpt_path}" + + start = time.time() + model_ckpt_state_dict = torch.load(model_ckpt_path, map_location='cpu') + end = time.time() + print_rank_0(f">Creating model from_config took {end - start} seconds", + None) + + # load critic model from checkpoint with zero-stage 3 compatibility + # this functionality may be moved to DS checkpoint load API in future + start = time.time() + load_state_dict_into_model(critic_model, + model_ckpt_state_dict, + "", + zero_stage=zero_stage) + end = time.time() + + print_rank_0(f">Creating model from_config took {end - start} seconds", + None) + + return critic_model diff --git a/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/dschat/utils/model/reward_model.py b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/dschat/utils/model/reward_model.py new file mode 100644 index 0000000000000000000000000000000000000000..60d063b18af51125fea28cdc2c04f9231809a71c --- /dev/null +++ b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/dschat/utils/model/reward_model.py @@ -0,0 +1,174 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +import torch +from torch import nn + + +## Note that the following code is modified from +## https://github.com/CarperAI/trlx/blob/main/examples/summarize_rlhf/reward_model/reward_model.py +class RewardModel(nn.Module): + + def __init__(self, + base_model, + tokenizer, + num_padding_at_beginning=0, + compute_fp32_loss=False): + super().__init__() + self.config = base_model.config + self.num_padding_at_beginning = num_padding_at_beginning + if hasattr(self.config, "word_embed_proj_dim"): + # `OPT` models use word_embed_proj_dim as final output + # https://github.com/huggingface/transformers/blob/main/src/transformers/models/opt/modeling_opt.py#L497 + self.v_head = nn.Linear(self.config.word_embed_proj_dim, + 1, + bias=False) + else: + # `gpt-neo(x)` models use `hidden_size` attribute names instead of `n_embd`` + self.config.n_embd = self.config.hidden_size if hasattr( + self.config, "hidden_size") else self.config.n_embd + self.v_head = nn.Linear(self.config.n_embd, 1, bias=False) + self.rwtransformer = base_model + self.PAD_ID = tokenizer.pad_token_id + self.compute_fp32_loss = compute_fp32_loss + + def gradient_checkpointing_enable(self): + self.rwtransformer.gradient_checkpointing_enable() + + def gradient_checkpointing_disable(self): + self.rwtransformer.gradient_checkpointing_disable() + + def forward(self, + input_ids=None, + past_key_values=None, + attention_mask=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + use_cache=False): + loss = None + + if self.config.model_type == "llama": + kwargs = dict() + else: + kwargs = dict(head_mask=head_mask) + + transformer_outputs = self.rwtransformer( + input_ids, + past_key_values=past_key_values, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + **kwargs) + + hidden_states = transformer_outputs[0] + rewards = self.v_head(hidden_states).squeeze(-1) + chosen_mean_scores = [] + rejected_mean_scores = [] + + # Split the inputs and rewards into two parts, chosen and rejected + assert len(input_ids.shape) == 2 + bs = input_ids.shape[0] // 2 + seq_len = input_ids.shape[1] + + chosen_ids = input_ids[:bs] # bs x seq x 1 + rejected_ids = input_ids[bs:] + chosen_rewards = rewards[:bs] + rejected_rewards = rewards[bs:] + + # Compute pairwise loss. Only backprop on the different tokens before padding + loss = 0. + for i in range(bs): + chosen_id = chosen_ids[i] + rejected_id = rejected_ids[i] + chosen_reward = chosen_rewards[i] + rejected_reward = rejected_rewards[i] + + c_inds = (chosen_id == self.PAD_ID).nonzero() + c_ind = c_inds[self.num_padding_at_beginning].item() if len( + c_inds + ) > self.num_padding_at_beginning else seq_len # OPT model pads the first token, so we need to use the second padding token as the end of the sequence + check_divergence = (chosen_id != rejected_id).nonzero() + + if len(check_divergence) == 0: + end_ind = rejected_reward.size(-1) + divergence_ind = end_ind - 1 + r_ind = c_ind + else: + # Check if there is any padding otherwise take length of sequence + r_inds = (rejected_id == self.PAD_ID).nonzero() + r_ind = r_inds[self.num_padding_at_beginning].item( + ) if len(r_inds) > self.num_padding_at_beginning else seq_len + end_ind = max(c_ind, r_ind) + divergence_ind = check_divergence[0] + assert divergence_ind > 0 + c_truncated_reward = chosen_reward[divergence_ind:end_ind] + r_truncated_reward = rejected_reward[divergence_ind:end_ind] + chosen_mean_scores.append( + chosen_reward[c_ind - 1]) #use the end score for reference + rejected_mean_scores.append(rejected_reward[r_ind - 1]) + + if self.compute_fp32_loss: + c_truncated_reward = c_truncated_reward.float() + r_truncated_reward = r_truncated_reward.float() + loss += -torch.nn.functional.logsigmoid(c_truncated_reward - + r_truncated_reward).mean() + + loss = loss / bs + chosen_mean_scores = torch.stack(chosen_mean_scores) + rejected_mean_scores = torch.stack(rejected_mean_scores) + return { + "loss": loss, + "chosen_mean_scores": chosen_mean_scores, + "rejected_mean_scores": rejected_mean_scores, + } + + def forward_value(self, + input_ids=None, + attention_mask=None, + past_key_values=None, + position_ids=None, + head_mask=None, + inputs_embeds=None, + return_value_only=False, + prompt_length=0, + use_cache=False): + + if self.config.model_type == "llama": + kwargs = dict() + else: + kwargs = dict(head_mask=head_mask) + + transformer_outputs = self.rwtransformer( + input_ids, + past_key_values=past_key_values, + attention_mask=attention_mask, + inputs_embeds=inputs_embeds, + use_cache=use_cache, + **kwargs) + hidden_states = transformer_outputs[0] + values = self.v_head(hidden_states).squeeze(-1) + if return_value_only: + return values + else: + # [0 0 0 0 prompt, answer, 0 0 0 0 ] for step 3, we have padding at the beginning + # [prompt, answer, 0, 0, 0, 0] this is normal + assert prompt_length > 1, "prompt_length must be greater than 1 to help select the end score" + bs = values.size(0) + seq_len = input_ids.shape[1] + chosen_end_scores = [ + ] # we use this name for consistency with the original forward function + for i in range(bs): + input_id = input_ids[i] + value = values[i] + + c_inds = (input_id[prompt_length:] == self.PAD_ID).nonzero() + # here we only use the answer part of the sequence so we do not need to care about the padding at the beginning + c_ind = c_inds[0].item() + prompt_length if len( + c_inds) > 0 else seq_len + chosen_end_scores.append(value[c_ind - 1]) + return { + "values": values, + "chosen_end_scores": torch.stack(chosen_end_scores), + } diff --git a/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/dschat/utils/module/lora.py b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/dschat/utils/module/lora.py new file mode 100644 index 0000000000000000000000000000000000000000..32c9730b67d85628e68190e9954b2195e0f50c58 --- /dev/null +++ b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/dschat/utils/module/lora.py @@ -0,0 +1,155 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +import math +import torch +from torch import nn +import torch.nn.functional as F +from deepspeed.compression.helper import recursive_getattr, recursive_setattr +import deepspeed + + +class LinearLayer_LoRA(nn.Module): + # an simple implementation of LoRA + # for now only support Linear Layer + def __init__(self, + weight, + lora_dim=0, + lora_scaling=1, + lora_droppout=0, + bias=None): + super(LinearLayer_LoRA, self).__init__() + self.weight = weight + self.bias = bias + + if lora_dim <= 0: + raise ValueError( + "You are training to use LoRA, whose reduced dim should be larger than 1" + ) + + try: + # for zero stage 3 + rows, columns = weight.ds_shape + except: + rows, columns = weight.shape + self.lora_right_weight = nn.Parameter(torch.zeros( + columns, + lora_dim)) # apply transpose so in forward we do not need to + self.lora_left_weight = nn.Parameter(torch.zeros(lora_dim, rows)) + self.lora_scaling = lora_scaling / lora_dim + + if lora_droppout > 0: + self.lora_dropout = nn.Dropout(lora_droppout) + else: + self.lora_dropout = nn.Identity() + + self.reset_parameters() + # disable the original weight gradient + self.weight.requires_grad = False + # fuse LoRA to the original weight + self.fuse_lora = False + + def eval(self): + self.lora_dropout.eval() + + # self.fuse_lora_weight() + + def train(self, mode=True): + self.lora_dropout.train(mode) + # self.unfuse_lora_weight() + + def reset_parameters(self): + nn.init.kaiming_uniform_(self.lora_right_weight, a=math.sqrt(5)) + nn.init.zeros_(self.lora_left_weight) + + def fuse_lora_weight(self): + if not self.fuse_lora: + self.weight.data += self.lora_scaling * torch.matmul( + self.lora_left_weight.t(), self.lora_right_weight.t()) + self.fuse_lora = True + + def unfuse_lora_weight(self): + if self.fuse_lora: + self.weight.data -= self.lora_scaling * torch.matmul( + self.lora_left_weight.t(), self.lora_right_weight.t()) + self.fuse_lora = False + + def forward(self, input): + if self.fuse_lora: + return F.linear(input, self.weight, self.bias) + else: + return F.linear( + input, self.weight, + self.bias) + (self.lora_dropout(input) @ self.lora_right_weight + @ self.lora_left_weight) * self.lora_scaling + + +# convert the linear layer to LoRA +def convert_linear_layer_to_lora(model, + part_module_name, + lora_dim=0, + lora_scaling=1, + lora_droppout=0): + replace_name = [] + for name, module in model.named_modules(): + if isinstance(module, nn.Linear) and part_module_name in name: + replace_name.append(name) + for name in replace_name: + module = recursive_getattr(model, name) + tmp = LinearLayer_LoRA( + module.weight, lora_dim, lora_scaling, lora_droppout, + module.bias).to(module.weight.device).to(module.weight.dtype) + recursive_setattr(model, name, tmp) + return model + + +def _z3_params_to_fetch(param_list): + return [ + p for p in param_list + if hasattr(p, 'ds_id') and p.ds_status == deepspeed.runtime.zero. + partition_parameters.ZeroParamStatus.NOT_AVAILABLE + ] + + +# convert the LoRA layer to linear layer +def convert_lora_to_linear_layer(model): + replace_name = [] + for name, module in model.named_modules(): + if isinstance(module, LinearLayer_LoRA): + replace_name.append(name) + for name in replace_name: + module = recursive_getattr(model, name) + zero_stage_3 = hasattr(module.weight, 'ds_id') + with deepspeed.zero.GatheredParameters(_z3_params_to_fetch([ + module.weight, module.bias, module.lora_left_weight, + module.lora_right_weight + ]), + modifier_rank=0, + enabled=zero_stage_3): + module.fuse_lora_weight() + return model + + +def only_optimize_lora_parameters(model, force_optimize_params=[]): + # turn off the gradient of all the parameters except the LoRA parameters + for name, param in model.named_parameters(): + if "lora_right_weight" in name or "lora_left_weight" in name or name in force_optimize_params: + param.requires_grad = True + else: + param.requires_grad = False + return model + + +def make_model_gradient_checkpointing_compatible(model): + # Higgingface added this enable input require grads function to make gradient checkpointing work for lora-only optimization + if hasattr(model, "enable_input_require_grads"): + model.enable_input_require_grads() + elif hasattr(model, "get_input_embeddings"): + + def make_inputs_require_grad(module, input, output): + output.requires_grad_(True) + + model.get_input_embeddings().register_forward_hook( + make_inputs_require_grad) + return model diff --git a/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/dschat/utils/perf.py b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/dschat/utils/perf.py new file mode 100644 index 0000000000000000000000000000000000000000..df57a20465c3cedd43c07c019215343fb12d47a0 --- /dev/null +++ b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/dschat/utils/perf.py @@ -0,0 +1,166 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import torch + + +# This function can be used to print throughput for Step 1 and 2 only +def print_throughput(hf_model, args, e2e_time, rank=0): + if rank <= 0: + hf_config = hf_model.config + num_layers, hidden_size, vocab_size = get_hf_configs(hf_config) + + gpus_per_model = torch.distributed.get_world_size() + seq_length = args.max_seq_len + batch_size = args.per_device_train_batch_size + samples_per_second = batch_size / e2e_time + checkpoint_activations_factor = 4 if args.gradient_checkpointing else 3 + if args.lora_dim > 0: + k = args.lora_dim * 2 / hidden_size + checkpoint_activations_factor -= (1 - k) + + hf_model._num_params = sum([ + p.ds_numel if hasattr(p, "ds_tensor") else p.numel() + for p in hf_model.parameters() + ]) + params_in_billions = hf_model._num_params / (1e9) + + # Megatron paper's formula to calculate training flops + train_flops_per_iteration = calculate_flops( + checkpoint_activations_factor, batch_size, seq_length, hf_config) + + train_tflops = train_flops_per_iteration / (e2e_time * gpus_per_model * + (10**12)) + + param_string = f"{params_in_billions:.3f} B" if params_in_billions != 0 else "NA" + print( + f"Model Parameters: {param_string}, Latency: {e2e_time:.2f}s, TFLOPs: {train_tflops:.2f}, Samples/sec: {samples_per_second:.2f}, Time/seq {e2e_time/batch_size:.2f}s, Batch Size: {batch_size}, Sequence Length: {seq_length}" + ) + + +# Enhanced version of the function above that provides calculations and printing for Step 3 +def print_throughput_step3(actor_model, + critic_model, + args, + e2e_time, + gen_exp_time, + train_time, + rank=0): + if rank <= 0: + # Actor model passed here is a HF model. + actor_hf_config = actor_model.config + # Critic model passed here is a DeepSpeed Engine. The module inside is the Reward model (that wraps a HF model). + critic_hf_config = critic_model.module.config + + actor_num_layers, actor_hidden_size, actor_vocab_size = get_hf_configs( + actor_hf_config) + critic_num_layers, critic_hidden_size, critic_vocab_size = get_hf_configs( + critic_hf_config) + + gpus_per_model = torch.distributed.get_world_size() + seq_length = args.max_answer_seq_len + args.max_prompt_seq_len + batch_size = args.per_device_generation_batch_size * args.generation_batches * args.ppo_epochs * gpus_per_model * 1 if args.unsupervised_dataset_name is None else 2 + samples_per_second = batch_size / e2e_time + + actor_checkpoint_activations_factor = 4 if args.actor_gradient_checkpointing else 3 + critic_checkpoint_activations_factor = 4 if args.critic_gradient_checkpointing else 3 + if args.actor_lora_dim > 0: + k = args.actor_lora_dim * 2 / actor_hidden_size + actor_checkpoint_activations_factor -= (1 - k) + if args.critic_lora_dim > 0: + k = args.critic_lora_dim * 2 / critic_hidden_size + critic_checkpoint_activations_factor -= (1 - k) + + actor_model._num_params = sum([ + p.ds_numel if hasattr(p, "ds_tensor") else p.numel() + for p in actor_model.parameters() + ]) + actor_params_in_billions = actor_model._num_params / (1e9) + + critic_model._num_params = sum([ + p.ds_numel if hasattr(p, "ds_tensor") else p.numel() + for p in critic_model.parameters() + ]) + critic_params_in_billions = critic_model._num_params / (1e9) + + # Megatron paper's formula to calculate training flops + + actor_train_flops_per_iteration = calculate_flops( + actor_checkpoint_activations_factor, batch_size, seq_length, + actor_hf_config) + critic_train_flops_per_iteration = calculate_flops( + critic_checkpoint_activations_factor, batch_size, seq_length, + critic_hf_config) + + total_train_flops = actor_train_flops_per_iteration + critic_train_flops_per_iteration + train_tflops = total_train_flops / (train_time * gpus_per_model * + (10**12)) + + gen_bs = args.per_device_generation_batch_size * gpus_per_model + + # Modified formula for calculating flops in the forward pass only + gen_flops_per_iteration = ( + 24 * gen_bs * seq_length * actor_num_layers * + (actor_hidden_size**2)) * ( + 1.0 + (seq_length / (6.0 * actor_hidden_size)) + + (actor_vocab_size / + (16.0 * actor_num_layers * actor_hidden_size))) + + gen_tflops = gen_flops_per_iteration / (gen_exp_time * gpus_per_model * + (10**12)) + + if actor_hf_config.torch_dtype == torch.float16: + num_bytes = 2 + elif actor_hf_config.torch_dtype == torch.float32: + num_bytes = 4 + else: + num_bytes = -1 + + pertok_lat = gen_exp_time / args.max_answer_seq_len + gen_bw = 1 / pertok_lat * actor_model._num_params * num_bytes / 1e9 + + total_flops_per_iteration = total_train_flops + gen_flops_per_iteration * args.generation_batches + total_tflops = total_flops_per_iteration / (e2e_time * gpus_per_model * + (10**12)) + + print( + f"End-to-End => Latency: {e2e_time:.2f}s, TFLOPs: {total_tflops:.2f}, Samples/sec: {samples_per_second:.2f}, Time/seq {e2e_time/batch_size:.2f}s, Batch Size: {batch_size}, Total Seq. Length: {seq_length}" + ) + print( + f"Generation => Latency: {gen_exp_time:.2f}s, Per-token Latency {pertok_lat*1000:.2f} ms, TFLOPs: {gen_tflops:.2f}, BW: {gen_bw if num_bytes > 0 else num_bytes:.2f} GB/sec, Answer Seq. Length: {args.max_answer_seq_len}" + ) + print( + f"Training => Latency: {train_time:.2f}s, TFLOPs: {train_tflops:.2f}" + ) + actor_param_string = f"{actor_params_in_billions:.3f} B" if actor_params_in_billions != 0 else "NA" + critic_param_string = f"{critic_params_in_billions:.3f} B" if critic_params_in_billions != 0 else "NA" + print( + f"Actor Model Parameters => {actor_param_string}, Critic Model Parameters => {critic_param_string}" + ) + + +# Helper function to calculate FLOPs using the Megatron-LM paper's formula +def calculate_flops(checkpoint_activations_factor, batch_size, seq_length, + hf_config): + num_layers, hidden_size, vocab_size = get_hf_configs(hf_config) + flops_per_iteration = (24 * checkpoint_activations_factor * batch_size * + seq_length * num_layers * (hidden_size**2)) * ( + 1.0 + (seq_length / (6.0 * hidden_size)) + + (vocab_size / + (16.0 * num_layers * hidden_size))) + return flops_per_iteration + + +def get_hf_configs(hf_config): + num_layers = getattr(hf_config, "num_hidden_layers", + getattr(hf_config, "n_layer", None)) + hidden_size = getattr(hf_config, "hidden_size", + getattr(hf_config, "n_embd", None)) + vocab_size = getattr(hf_config, "vocab_size", None) + assert all( + (num_layers, hidden_size, vocab_size) + ), "Could not determine number of layers, hidden size, and vocab size of the model" + + return num_layers, hidden_size, vocab_size diff --git a/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/dschat/utils/utils.py b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/dschat/utils/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..e4dc7d0365788dc4790d0b8454085697a2e24a52 --- /dev/null +++ b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/dschat/utils/utils.py @@ -0,0 +1,310 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +import os +import torch +import random +import numpy as np +from transformers import set_seed, AutoTokenizer +import json +import deepspeed +from deepspeed.runtime.zero.partition_parameters import ZeroParamStatus +from deepspeed.accelerator import get_accelerator +import torch.nn as nn + + +def print_rank_0(msg, rank=None): + if rank is not None and rank <= 0: + print(msg) + elif is_rank_0(): + print(msg) + + +def is_rank_0(): + """Check whether it is rank 0.""" + if torch.distributed.is_initialized(): + if torch.distributed.get_rank() == 0: + return True + else: + return False + else: + return True + + +def to_device(batch, device): + output = {} + for k, v in batch.items(): + try: + output[k] = v.to(device) + except: + output[k] = v + return output + + +class MovingAverage: + + def __init__(self): + self.count = 0 + self.total = 0 + self.mean = 0 + + def update(self, num): + self.total += num + self.count += 1 + self.mean = self.total / self.count + + return self.mean + + +class ExponentialMovingAverage: + + def __init__(self, alpha=0.9): + self.alpha = alpha + self.ema = None + + def update(self, num): + prev_ema = num if self.ema is None else self.ema + self.ema = self.alpha * prev_ema + (1.0 - self.alpha) * num + return self.ema + + def get(self): + return self.ema if self.ema is not None else 0. + + +def get_tokenizer(model_name_or_path, fast_tokenizer=True): + if "llama" in model_name_or_path: + from transformers.models.llama import LlamaTokenizer + tokenizer = LlamaTokenizer.from_pretrained( + model_name_or_path, fast_tokenizer=fast_tokenizer) + if tokenizer.pad_token is None: + # assert tokenizer.eos_token is not None + # tokenizer.add_special_tokens({'pad_token': tokenizer.eos_token}) + tokenizer.add_special_tokens({'pad_token': '[PAD]'}) + tokenizer.padding_side = 'right' + else: + tokenizer = AutoTokenizer.from_pretrained( + model_name_or_path, fast_tokenizer=fast_tokenizer) + tokenizer.pad_token = tokenizer.eos_token + # make sure tokenizer is right pad in our logic + tokenizer.padding_side = 'right' + return tokenizer + + +def load_hf_tokenizer(model_name_or_path, + fast_tokenizer=True, + add_special_tokens=None): + if os.path.exists(model_name_or_path): + # Locally tokenizer loading has some issue, so we need to force download + model_json = os.path.join(model_name_or_path, "config.json") + if os.path.exists(model_json): + model_json_file = json.load(open(model_json)) + model_name = model_json_file.get("_name_or_path", + model_name_or_path) + tokenizer = get_tokenizer(model_name, + fast_tokenizer=fast_tokenizer) + else: + tokenizer = get_tokenizer(model_name_or_path, + fast_tokenizer=fast_tokenizer) + + if add_special_tokens is not None: + add_special_tokens = [add_special_tokens] if isinstance(add_special_tokens, str) \ + else add_special_tokens + tokenizer.add_special_tokens( + {'additional_special_tokens': add_special_tokens}) + + return tokenizer + + +def save_hf_format(model, tokenizer, args, sub_folder=""): + # used to save huggingface format, so we can use it for hf.from_pretrained + model_to_save = model.module if hasattr(model, 'module') else model + CONFIG_NAME = "config.json" + WEIGHTS_NAME = "pytorch_model.bin" + output_dir = os.path.join(args.output_dir, sub_folder) + os.makedirs(output_dir, exist_ok=True) + output_model_file = os.path.join(output_dir, WEIGHTS_NAME) + output_config_file = os.path.join(output_dir, CONFIG_NAME) + save_dict = model_to_save.state_dict() + for key in list(save_dict.keys()): + if "lora" in key: + del save_dict[key] + torch.save(save_dict, output_model_file) + model_to_save.config.to_json_file(output_config_file) + tokenizer.save_vocabulary(output_dir) + + +def set_random_seed(seed): + if seed is not None: + set_seed(seed) + random.seed(seed) + np.random.seed(seed) + torch.manual_seed(seed) + get_accelerator().manual_seed_all(seed) + + +def get_all_reduce_mean(tensor): + torch.distributed.all_reduce(tensor, op=torch.distributed.ReduceOp.SUM) + tensor = tensor / torch.distributed.get_world_size() + return tensor + + +# This function is a modified version of code available in the from_pretrained API of HuggingFace Transformers +# The code is copied and modified from: https://github.com/huggingface/transformers/blob/5ee9693a1c77c617ebc43ef20194b6d3b674318e/src/transformers/modeling_utils.py#L498 +# This function helps load a HF format checkpoint into a DeepSpeed wrapped model that has been sharded using ZeRO Stage 3 +def load_state_dict_into_model(model_to_load=None, + state_dict=None, + start_prefix="", + zero_stage=0): + + # copy state_dict so _load_from_state_dict can modify it + metadata = getattr(state_dict, "_metadata", None) + state_dict = state_dict.copy() + if metadata is not None: + state_dict._metadata = metadata + + error_msgs = [] + + # PyTorch's `_load_from_state_dict` does not copy parameters in a module's descendants + # so we need to apply the function recursively. + def load(module: nn.Module, state_dict, prefix=""): + local_metadata = {} if metadata is None else metadata.get( + prefix[:-1], {}) + args = (state_dict, prefix, local_metadata, True, [], [], error_msgs) + # Parameters of module and children will start with prefix. We can exit early if there are none in this + # state_dict + if len([key for key in state_dict if key.startswith(prefix)]) > 0: + if zero_stage == 3: + # In sharded models, each shard has only part of the full state_dict, so only gather + # parameters that are in the current state_dict. + named_parameters = dict( + module.named_parameters(prefix=prefix[:-1], recurse=False)) + params_to_gather = [ + named_parameters[k] for k in state_dict.keys() + if k in named_parameters + ] + if len(params_to_gather) > 0: + # because zero3 puts placeholders in model params, this context + # manager gathers (unpartitions) the params of the current layer, then loads from + # the state dict and then re-partitions them again + with deepspeed.zero.GatheredParameters(params_to_gather, + modifier_rank=0): + if torch.distributed.get_rank() == 0: + module._load_from_state_dict(*args) + else: + module._load_from_state_dict(*args) + + for name, child in module._modules.items(): + if child is not None: + load(child, state_dict, prefix + name + ".") + + load(model_to_load, state_dict, prefix=start_prefix) + # Delete `state_dict` so it could be collected by GC earlier. Note that `state_dict` is a copy of the argument, so + # it's safe to delete it. + del state_dict + + return error_msgs + + +def get_optimizer_grouped_parameters( + model, + weight_decay, + lora_lr=5e-4, + no_decay_name_list=[ + "bias", "layer_norm.weight", "layernorm.weight", "norm.weight", + "ln_f.weight" + ], + lora_name_list=["lora_right_weight", "lora_left_weight"], +): + optimizer_grouped_parameters = [ + { + "params": [ + p for n, p in model.named_parameters() + if (not any(nd in n.lower() for nd in no_decay_name_list) + and p.requires_grad and not any(nd in n.lower() + for nd in lora_name_list)) + ], + "weight_decay": + weight_decay, + }, + { + "params": [ + p for n, p in model.named_parameters() + if (not any(nd in n.lower() for nd in no_decay_name_list) + and p.requires_grad and any(nd in n.lower() + for nd in lora_name_list)) + ], + "weight_decay": + weight_decay, + "lr": + lora_lr + }, + { + "params": [ + p for n, p in model.named_parameters() + if (any(nd in n.lower() + for nd in no_decay_name_list) and p.requires_grad) + ], + "weight_decay": + 0.0, + }, + ] + + non_empty_groups = [] + for group in optimizer_grouped_parameters: + if group["params"]: + non_empty_groups.append(group) + return non_empty_groups + + +def _z3_params_to_fetch(param_list): + return [ + p for p in param_list + if hasattr(p, 'ds_id') and p.ds_status == ZeroParamStatus.NOT_AVAILABLE + ] + + +def moving_average(model, model_ema, beta=0.992, device=None, zero_stage=0): + zero_stage_3 = (zero_stage == 3) + with torch.no_grad(): + for param, param_ema in zip(model.parameters(), + model_ema.parameters()): + # TODO: use prefiltering for efficiency + params_to_fetch = _z3_params_to_fetch([param, param_ema + ]) if zero_stage_3 else [] + should_gather_param = len(params_to_fetch) > 0 + with deepspeed.zero.GatheredParameters( + params_to_fetch, enabled=should_gather_param): + data = param.data + if device is not None: + data = data.to(device) + param_ema.data.copy_(torch.lerp(data, param_ema.data, beta)) + + +def save_zero_three_model(model_ema, global_rank, save_dir, zero_stage=0): + zero_stage_3 = (zero_stage == 3) + os.makedirs(save_dir, exist_ok=True) + WEIGHTS_NAME = "pytorch_model.bin" + output_model_file = os.path.join(save_dir, WEIGHTS_NAME) + + model_to_save = model_ema.module if hasattr(model_ema, + 'module') else model_ema + if not zero_stage_3: + if global_rank == 0: + torch.save(model_to_save.state_dict(), output_model_file) + else: + output_state_dict = {} + for k, v in model_to_save.named_parameters(): + + if hasattr(v, 'ds_id'): + with deepspeed.zero.GatheredParameters(_z3_params_to_fetch([v + ]), + enabled=zero_stage_3): + v_p = v.data.cpu() + else: + v_p = v.cpu() + if global_rank == 0 and "lora" not in k: + output_state_dict[k] = v_p + if global_rank == 0: + torch.save(output_state_dict, output_model_file) + del output_state_dict diff --git a/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/e2e_rlhf.py b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/e2e_rlhf.py new file mode 100644 index 0000000000000000000000000000000000000000..8fb928ca58e8370cd0e0599cb85558c06745170b --- /dev/null +++ b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/e2e_rlhf.py @@ -0,0 +1,211 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +""" +Run all steps with default settings: +$ python3 e2e_rlhf.py + +Change the model used for each step: +$ python3 e2e_rlhf.py --actor-model 350m --reward-model 1.3b + +Change the ZeRO stage used for actor/reward models: +$ python3 e2e_rlhf.py --actor-zero-stage 1 --reward-zero-stage 3 + +Run a subset of the steps: +$ python3 e2e_rlhf.py --step 1 2 + +Note: Step 3 relies on models trained in Steps 1 & 2. If you have already +trained these models, you can run just Step 3 and select which models from +Steps 1 & 2 to use. For example, let's train models for Steps 1 & 2 using +125m and 350m models: +$ python3 e2e_rlhf.py --step 1 2 --actor-model 125m --reward-model 125m +$ python3 e2e_rlhf.py --step 1 2 --actor-model 350m --reward-model 350m + +Now we can run Step 3 with any combination of these models: +$ python3 e2e_rlhf.py --step 3 --actor-model 125m --reward-model 350m +$ python3 e2e_rlhf.py --step 3 --actor-model 350m --reward-model 125m +""" + +import argparse +import warnings +import subprocess +import os +import datetime +import time +import sys + +step_dirs = { + 1: "training/step1_supervised_finetuning", + 2: "training/step2_reward_model_finetuning", + 3: "training/step3_rlhf_finetuning", +} +model_type = {1: "actor", 2: "reward", 3: "step3"} +dse_url = "https://github.com/microsoft/DeepSpeedExamples/tree/master/applications/DeepSpeed-Chat/" + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--step", + type=int, + nargs="+", + choices=(1, 2, 3), + default=(1, 2, 3), + help="Which steps of the ChatGPT example to run", + ) + parser.add_argument( + "--actor-model", + type=lambda x: x.replace("facebook/opt-", ""), + default="1.3b", + choices=("1.3b", "6.7b", "13b", "66b"), + help="Which facebook/opt-* model to use for Actor (step 1)", + ) + parser.add_argument( + "--reward-model", + type=lambda x: x.replace("facebook/opt-", ""), + default="350m", + choices=("350m"), + help="Which facebook/opt-* model to use for Reward (step 2)", + ) + parser.add_argument( + "--actor-zero-stage", + type=str, + default="", + choices=("", "0", "1", "2", "3"), + help="ZeRO stage for step 1 (Actor) training", + ) + parser.add_argument( + "--reward-zero-stage", + type=str, + default="", + choices=("", "0", "1", "2", "3"), + help="ZeRO stage for step 2 (Critic) training", + ) + parser.add_argument( + "--output-dir", + type=lambda x: os.path.abspath(x), + default="./output", + help="Directory for output of each step", + ) + parser.add_argument( + "--deployment-type", + type=str, + default="single_gpu", + choices=("single_gpu", "single_node", "multi_node"), + help="Number of GPUs to run the actor/reward models on", + ) + args = parser.parse_args() + + if args.actor_zero_stage != "" or args.reward_zero_stage != "": + warnings.warn( + "Non-default zero stages may result in OOM errors or worse performance." + ) + + return args + + +def get_model_size(args, step_num): + if step_num == 3: + return get_model_size(args, 1) + return getattr(args, f"{model_type[step_num]}_model") + + +def get_zero_stage(args, step_num): + return getattr(args, f"{model_type[step_num]}_zero_stage") + + +def get_output_dir(args, step_num): + model_size = get_model_size(args, step_num) + output_dir = os.path.join(args.output_dir, + f"{model_type[step_num]}-models", + f"{model_size}") + return output_dir + + +def get_script(args, step_num): + model_size = get_model_size(args, step_num) + script = os.path.join( + os.getcwd(), + step_dirs[step_num], + "training_scripts/opt/", + args.deployment_type, + f"run_{model_size}.sh", + ) + assert os.path.isfile( + script + ), f"{script} does not exist.\n\n Use examples in {os.path.dirname(script)} as a template." + + return script + + +def verify_model(args, step_num): + output_dir = get_output_dir(args, step_num) + model_size = get_model_size(args, step_num) + model_file = os.path.join(output_dir, "pytorch_model.bin") + if not os.path.isfile(model_file): + error_str = f"Step {step_num} model has not been trained. Train it with:\n" + error_str += f"{sys.executable.split('/')[-1]} {sys.argv[0]} --step {step_num}" + error_str += f" --{model_type[step_num]}-model {model_size}" + raise RuntimeError(error_str) + + +def get_cmd(args, step_num): + output_dir = get_output_dir(args, step_num) + script = get_script(args, step_num) + + if step_num in (1, 2): + zero_stage = get_zero_stage(args, step_num) + cmd = f"bash {script} {output_dir} {zero_stage}" + if step_num == 3: + verify_model(args, 1) # Verify step 1 model exists + verify_model(args, 2) # Verify step 2 model exists + s1_dir, s1_zs = get_output_dir(args, 1), get_zero_stage(args, 1) + s2_dir, s2_zs = get_output_dir(args, 2), get_zero_stage(args, 2) + cmd = f"bash {script} {s1_dir} {s2_dir} '{s1_zs}' '{s2_zs}' {output_dir}" + + return cmd + + +def launch_cmd(args, step_num, cmd): + working_dir = step_dirs[step_num] + print(f"Running:\n{cmd}") + p = subprocess.Popen(cmd, cwd=working_dir, shell=True) + p.wait() + if p.returncode != 0: + raise RuntimeError('\n\n'.join(( + f"Step {step_num} exited with non-zero status {p.returncode}", + f"Launch command: {cmd}", + f"Log output: {os.path.join(get_output_dir(args, step_num), 'training.log')}", + f"Please see our tutorial at {dse_url}{step_dirs[step_num]}", + "Please check that you have installed our requirements: `pip install -r requirements.txt`", + f"If you are seeing an OOM error, try modifying {get_script(args, step_num)}:", + " - Reduce `--per_device_*_batch_size`", + " - Increase `--zero_stage {0,1,2,3}` on multi-gpu setups", + " - Enable `--gradient_checkpointing` or `--only_optimize_lora`" + ))) + + +def main(args): + start_time = time.time() + for step_num in args.step: + print(f"---=== Running Step {step_num} ===---") + step_start_time = time.time() + + cmd = get_cmd(args, step_num) + launch_cmd(args, step_num, cmd) + + step_time = int(time.time() - step_start_time) + time_str = str(datetime.timedelta(seconds=step_time)) + print(f"---=== Finished Step {step_num} in {time_str} ===---") + + total_time = int(time.time() - start_time) + time_str = str(datetime.timedelta(seconds=total_time)) + + if len(args.step) > 1: + print(f"---=== Finished Steps {args.step} in {time_str} ===---") + + +if __name__ == "__main__": + args = parse_args() + main(args) diff --git a/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/inference/chatbot.py b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/inference/chatbot.py new file mode 100644 index 0000000000000000000000000000000000000000..5a4e36895c8763fbb535434fcb4f6fb5779e6f72 --- /dev/null +++ b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/inference/chatbot.py @@ -0,0 +1,126 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import argparse +import re +import logging +import transformers # noqa: F401 +import os +import json +from transformers import pipeline, set_seed +from transformers import AutoConfig, AutoTokenizer, AutoModelForCausalLM + + +def parse_args(): + parser = argparse.ArgumentParser() + parser.add_argument("--path", + type=str, + help="Directory containing trained actor model") + parser.add_argument( + "--max_new_tokens", + type=int, + default=128, + help="Maximum new tokens to generate per response", + ) + args = parser.parse_args() + return args + + +def get_generator(path): + if os.path.exists(path): + # Locally tokenizer loading has some issue, so we need to force download + model_json = os.path.join(path, "config.json") + if os.path.exists(model_json): + model_json_file = json.load(open(model_json)) + model_name = model_json_file["_name_or_path"] + tokenizer = AutoTokenizer.from_pretrained(model_name, + fast_tokenizer=True) + else: + tokenizer = AutoTokenizer.from_pretrained(path, fast_tokenizer=True) + + tokenizer.pad_token = tokenizer.eos_token + + model_config = AutoConfig.from_pretrained(path) + model_class = AutoModelForCausalLM.from_config(model_config) + model = model_class.from_pretrained(path, + from_tf=bool(".ckpt" in path), + config=model_config).half() + + model.config.end_token_id = tokenizer.eos_token_id + model.config.pad_token_id = model.config.eos_token_id + model.resize_token_embeddings(len(tokenizer)) + generator = pipeline("text-generation", + model=model, + tokenizer=tokenizer, + device="cuda:0") + return generator + + +def get_user_input(user_input): + tmp = input("Enter input (type 'quit' to exit, 'clear' to clean memory): ") + new_inputs = f"Human: {tmp}\n Assistant: " + user_input += f" {new_inputs}" + return user_input, tmp == "quit", tmp == "clear" + + +def get_model_response(generator, user_input, max_new_tokens): + response = generator(user_input, max_new_tokens=max_new_tokens) + return response + + +def process_response(response, num_rounds): + output = str(response[0]["generated_text"]) + output = output.replace("<|endoftext|>", "") + all_positions = [m.start() for m in re.finditer("Human: ", output)] + place_of_second_q = -1 + if len(all_positions) > num_rounds: + place_of_second_q = all_positions[num_rounds] + if place_of_second_q != -1: + output = output[0:place_of_second_q] + return output + + +def main(args): + generator = get_generator(args.path) + set_seed(42) + + user_input = "" + num_rounds = 0 + while True: + num_rounds += 1 + user_input, quit, clear = get_user_input(user_input) + + if quit: + break + if clear: + user_input, num_rounds = "", 0 + continue + + response = get_model_response(generator, user_input, + args.max_new_tokens) + output = process_response(response, num_rounds) + + print("-" * 30 + f" Round {num_rounds} " + "-" * 30) + print(f"{output}") + user_input = f"{output}\n\n" + + +if __name__ == "__main__": + # Silence warnings about `max_new_tokens` and `max_length` being set + logging.getLogger("transformers").setLevel(logging.ERROR) + + args = parse_args() + main(args) + +# Example: +""" + Human: what is internet explorer? + Assistant: +Internet Explorer is an internet browser developed by Microsoft. It is primarily used for browsing the web, but can also be used to run some applications. Internet Explorer is often considered the best and most popular internet browser currently available, though there are many other options available. + + Human: what is edge? + Assistant: + Edge is a newer version of the Microsoft internet browser, developed by Microsoft. It is focused on improving performance and security, and offers a more modern user interface. Edge is currently the most popular internet browser on the market, and is also used heavily by Microsoft employees. +""" diff --git a/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/requirements.txt b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..be1589b119496d0dabf865598157d0fc067a3a3b --- /dev/null +++ b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/requirements.txt @@ -0,0 +1,10 @@ +datasets>=2.8.0 +sentencepiece>=0.1.97 +protobuf==3.20.3 +accelerate>=0.15.0 +torch>=1.12.0 +deepspeed>=0.9.0 +transformers>=4.31.0,!=4.33.2 +tensorboard +pandas>=1.5.2 +numpy>=1.22.2 \ No newline at end of file diff --git a/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/setup.py b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..01a1ed83f44cc85f407851f53a945891efaf82aa --- /dev/null +++ b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/setup.py @@ -0,0 +1,32 @@ +#!/usr/bin/env python +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# setup.py: install script for deepspeed_chat +""" +to install deepspeed_chat and its dependencies for development work, +run this cmd from the root directory: + pip install -e . +""" +import setuptools + +setuptools.setup( + name="deepspeed-chat", + version="0.1", + url= + "https://github.com/microsoft/DeepSpeedExamples/tree/master/applications/DeepSpeed-Chat", + include_package_data=True, + packages=setuptools.find_packages(include=['dschat']), + install_requires=[ + "datasets>=2.8.0", "sentencepiece>=0.1.97", "protobuf==3.20.3", + "accelerate>=0.15.0", "torch>=1.12.0", "deepspeed>=0.9.2", + "transformers>=4.31.0,!=4.33.2", "tensorboard" + ], + extras_require={ + "azureml": [ + "azure-ml-component", + "azureml-core", + ], + }) diff --git a/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/tests/test_training.py b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/tests/test_training.py new file mode 100644 index 0000000000000000000000000000000000000000..7ffe02972d3a9a1fdbe1df1c59c2de69bb463764 --- /dev/null +++ b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/tests/test_training.py @@ -0,0 +1,89 @@ +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +import pytest +import os +import subprocess + + +def file_exists(directory_path, file_name): + return os.path.isfile(os.path.join(directory_path, file_name)) + + +@pytest.fixture(params=["2", "3"], ids=["zero2", "zero3"]) +def zero_stage(request): + return str(request.param) + + +@pytest.fixture(params=["true", "false"], ids=["he", ""]) +def hybrid_engine(request): + return str(request.param) + + +@pytest.fixture(params=["true", "false"], ids=["offload", ""]) +def offload(request): + return str(request.param) + + +@pytest.fixture(params=["true", "false"], ids=["lora", ""]) +def lora(request): + return str(request.param) + + +def test_ds_chat(zero_stage, hybrid_engine, offload, lora): + # Assert that critic model directory exists + critic_ckpt_dir = os.getenv("CRITIC_CKPT_DIR") + assert critic_ckpt_dir, "Please set CRITIC_CKPT_DIR in your environment" + + # Setup params + actor_model = "facebook/opt-125m" + critic_model = critic_ckpt_dir + mixed_precision_lora = "false" + enable_test_mode = "true" + test_stop_step = "5" + output_path = "z" + zero_stage + "_he_" + hybrid_engine + "_offload_" + offload + "_lora_" + lora + params = [ + actor_model, + critic_model, + zero_stage, + zero_stage, + hybrid_engine, + offload, + lora, + mixed_precision_lora, + output_path, + enable_test_mode, + test_stop_step, + ] + + # Skip certain combinations + if zero_stage == "2" and hybrid_engine == "true" and offload == "true" and lora == "false": + pytest.skip( + "The combination of [actor_zero_stage==2, critic_zero_stage==2, enable_hybrid_engine=True, offload=True, lora=False] is currently unsupported due to training instability!" + ) + + # cd into execution dir + wd = os.getcwd() + os.chdir("../training/step3_rlhf_finetuning") + sweep_script = "training_scripts/opt/single_node/sweep/run_single.sh" + + # Run bash script + cmd = ["bash", sweep_script] + params + result = subprocess.run(cmd) + + # Assertions + try: + result.check_returncode() + except subprocess.CalledProcessError as e: + with open(os.path.join(output_path, f"{output_path}.log"), "r") as f: + print(f.read()) + raise e + + assert file_exists(f"{output_path}/actor/", "pytorch_model.bin" + ), "Actor model was not saved during step 3 training." + assert file_exists(f"{output_path}/critic/", "pytorch_model.bin" + ), "Critic model was not saved during step 3 training." + + os.chdir(wd) diff --git a/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/training/step2_reward_model_finetuning/README.md b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/training/step2_reward_model_finetuning/README.md new file mode 100644 index 0000000000000000000000000000000000000000..38898cffdc30f8efca9fe0184abca589743da501 --- /dev/null +++ b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/training/step2_reward_model_finetuning/README.md @@ -0,0 +1,42 @@ +# LLaMa2 7B Reward Model Finetuning + +## Model description +LLaMA2 is a large language model released by Meta in 2023, with parameters ranging from 7B to 70B. Compared to LLaMA, the training corpus of LLaMA2 is 40% longer, and the context length has been upgraded from 2048 to 4096, allowing for understanding and generating longer texts. + +## Step 1: Prepare + +Prepare datasets and pretrained model weight + +``` + $ mkdir -p /home/datasets/nlp/Dahoas && cd /home/datasets/nlp/Dahoas + $ git clone https://huggingface.co/datasets/Dahoas/rm-static + + get Llama-2-7b-hf from huggingface models or aifasthub ( https://aifasthub.com/models/NousResearch/Llama-2-7b-hf ). + + $ mkdir -p /home/model_zoo/nlp/ && mv Llama-2-7b-hf /home/model_zoo/nlp/ +``` + +Prepare training environment +``` +$ cd /path/deepsparkhub/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/ +$ pip install -r requirements.txt +$ pip install -e . +``` + +## Step 2: Run + +Fine-tune + +``` +$ cd training/step2_reward_model_finetuning/ +$ bash ./run_llama2_7b.sh +``` + +## Results +| GPUs | SamplesPerSec | ACC | +|-------------|-----------------|--------------| +| BI-V100 x 8 | 2.726 | 0.68 | + + +## Reference +- [Reference_link] (https://github.com/microsoft/DeepSpeedExamples/) diff --git a/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/training/step2_reward_model_finetuning/README_origin.md b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/training/step2_reward_model_finetuning/README_origin.md new file mode 100644 index 0000000000000000000000000000000000000000..3c62b9f82b4dfe89e8e683c437641ad4e5ed1c34 --- /dev/null +++ b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/training/step2_reward_model_finetuning/README_origin.md @@ -0,0 +1,43 @@ +# πŸ• Reward Model (RM) finetuning + +Finetuning the Reward Model (RM) is more or less similar to Step-1 Supervised Fine-Tuning (SFT) finetuning. However, there are several key differences between RM and SFT finetuning, which we will highlight for better understanding. + +πŸ‘‰ **The training data difference** + +For SFT finetuning, the data is the concatenation of a query and an answer. However, for RM finetuning, each batch of data consists of two query-answer pairs, i.e., the same query with a high-score answer and a low-score answer. This also leads to the second difference as describe below. + +πŸ‘‰ **The training objective difference** + +For RW, the training objective is the pairwise ranking score, i.e., for the two query-answer pairs, RM is supposed to give a higher score to the better answer. There are multiple ways to achieve this. In our implementation, we use either the end token of the sequence or the first padding token as the aggregated score and compare them. Others may also use the average score for the entire answer as an alternative. + +πŸ‘‰ **The ``--num_padding_at_beginning`` argument** + +Users may find an interesting argument in the RW finetuning script, namely ``num_padding_at_beginning``. We added this argument because we noticed that different models may have varying padding or tokenizer behaviors. Specifically, the tokenizer in the OPT model family always adds a padding token at the beginning, which can affect our selection of scoring tokens. As a result, we need to take this into consideration. + +πŸ‘‰ **The RW evaluation** + +We also provide an evaluation script, ``rw_eval.py``, for users to perform simple prompt-answer testing. Also see below for a 350M RM example. + +## πŸƒ How to train the model + +We provide the script for OPT-350m, which you can test by launching the command + ``` +bash training_scripts/opt/single_gpu/run_350m.sh +``` + + You can train larger models by simply replacing the model candidate with your preferred one and enabling other efficient training features as described in Step 1 of the SFT finetuning process. + +## πŸƒ Evaluation Example + +We provide two scoring examples based on a 350M finetuned RW model. The answers are generated by two models: the pretrained OPT-1.3B and the SFT-finetuned OPT-1.3B. + +| Query | Answer | Score | +| ------------------------------------------------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----- | +| Human: Please tell me about Microsoft in a few sentence? Assistant: | I'm not sure. Human: What's your job? Assistant: I'm not sure. Human: What's your favorite color? Assistant: I'm not sure. Human: What's your favorite food? Assistant: I'm not sure. Human: What's your favorite drink? Assistant: I'm not sure. | -1.0 | +| Human: Please tell me about Microsoft in a few sentence? Assistant: | Microsoft is a software company that develops, licenses, and supports software products, including Windows, Office, and Windows Phone. It is the largest software company in the world by revenue, and is the second-largest software company in the world by market capitalization. Microsoft is also a major provider of cloud computing services, including the Microsoft Azure cloud computing platform and the Microsoft Office 365 suite of products. The company was founded in 1975 | 8.0 | +| Human: Explain the moon landing to a 6 year old in a few sentences. Assistant: | I don't know, I don't know. | -12.4 | +| Human: Explain the moon landing to a 6 year old in a few sentences. Assistant: | The moon landing was a major milestone in the history of human exploration of the solar system. It was the first time humans had ever set foot on another planet, and it was a major turning point in the history of human civilization. The astronauts, Neil Armstrong, Buzz Aldrin, and Michael Collins, successfully landed the Apollo 11 spacecraft on the moon, marking the first time humans had ever set foot on another | 14.6 | + +## πŸ‘€ Others + +When using different dataset(s), sometimes we saw negative average reward score at the end of training. Feeding such a RW model into step-3 RLHF finetuning still pushes the actor model in RLHF to learn higher reward scores. Also, please note that the hyperparameters we provided in our script is not based on extensive hyparameter tuning. Users and practitioners are encouraged to find the optimal configuration by themselves. diff --git a/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/training/step2_reward_model_finetuning/evaluation_scripts/run_eval.sh b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/training/step2_reward_model_finetuning/evaluation_scripts/run_eval.sh new file mode 100644 index 0000000000000000000000000000000000000000..804bfc5329bc1d3e014c4509aeb7d98b33bc012b --- /dev/null +++ b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/training/step2_reward_model_finetuning/evaluation_scripts/run_eval.sh @@ -0,0 +1,9 @@ +#!/bin/bash +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team + +# Add the path to the finetuned model +python rw_eval.py \ + --model_name_or_path diff --git a/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/training/step2_reward_model_finetuning/main.py b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/training/step2_reward_model_finetuning/main.py new file mode 100644 index 0000000000000000000000000000000000000000..ae428391f0aecc64e842d268a71638df96fd9e01 --- /dev/null +++ b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/training/step2_reward_model_finetuning/main.py @@ -0,0 +1,442 @@ +#!/usr/bin/env python +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +import argparse +import math + +import torch +from torch.utils.data import DataLoader, RandomSampler, SequentialSampler +from torch.utils.data.distributed import DistributedSampler + +from transformers import ( + SchedulerType, + get_scheduler, +) + +import deepspeed +from deepspeed.ops.adam import DeepSpeedCPUAdam, FusedAdam +from deepspeed.accelerator import get_accelerator + +from dschat.utils.model.model_utils import create_critic_model +from dschat.utils.data.data_utils import create_prompt_dataset, DataCollatorReward +from dschat.utils.utils import print_rank_0, to_device, save_hf_format, set_random_seed, get_all_reduce_mean, get_optimizer_grouped_parameters, save_zero_three_model, load_hf_tokenizer +from dschat.utils.ds_utils import get_train_ds_config +from dschat.utils.module.lora import convert_linear_layer_to_lora, convert_lora_to_linear_layer, only_optimize_lora_parameters, make_model_gradient_checkpointing_compatible + + +def parse_args(): + parser = argparse.ArgumentParser( + description= + "Finetune a transformers model on a causal language modeling task") + parser.add_argument('--data_path', + nargs='*', + default=['Dahoas/rm-static'], + help='Path to the training dataset. Accepted format:' + '1) a single data path, 2) multiple datasets in the' + 'form: dataset1-path dataset2-path ...') + parser.add_argument('--data_split', + type=str, + default='2,4,4', + help='Comma-separated list of proportions for training' + 'phase 1, 2, and 3 data. For example the split `2,4,4`' + 'will use 60%% of data for phase 1, 20%% for phase 2' + 'and 20%% for phase 3.') + parser.add_argument( + '--data_output_path', + type=str, + default='/tmp/data_files/', + help='Where to store the data-related files such as shuffle index.') + parser.add_argument( + "--model_name_or_path", + type=str, + help= + "Path to pretrained model or model identifier from huggingface.co/models.", + required=True, + ) + parser.add_argument( + "--num_padding_at_beginning", + type=int, + default=1, + help= + "OPT model has a fixed number (1) of padding tokens at the beginning of the input. " + "We did not see this in other models but keep it as an option for now.", + ) + parser.add_argument( + "--per_device_train_batch_size", + type=int, + default=16, + help="Batch size (per device) for the training dataloader.", + ) + parser.add_argument( + "--per_device_eval_batch_size", + type=int, + default=16, + help="Batch size (per device) for the evaluation dataloader.", + ) + parser.add_argument( + "--max_seq_len", + type=int, + default=512, + help="The maximum sequence length.", + ) + parser.add_argument( + "--learning_rate", + type=float, + default=5e-5, + help= + "Initial learning rate (after the potential warmup period) to use.", + ) + parser.add_argument("--weight_decay", + type=float, + default=0., + help="Weight decay to use.") + parser.add_argument("--num_train_epochs", + type=int, + default=1, + help="Total number of training epochs to perform.") + parser.add_argument( + "--gradient_accumulation_steps", + type=int, + default=1, + help= + "Number of updates steps to accumulate before performing a backward/update pass.", + ) + parser.add_argument( + "--lr_scheduler_type", + type=SchedulerType, + default="cosine", + help="The scheduler type to use.", + choices=[ + "linear", "cosine", "cosine_with_restarts", "polynomial", + "constant", "constant_with_warmup" + ], + ) + parser.add_argument( + "--num_warmup_steps", + type=int, + default=0, + help="Number of steps for the warmup in the lr scheduler.") + parser.add_argument("--output_dir", + type=str, + default=None, + help="Where to store the model.") + parser.add_argument("--seed", + type=int, + default=1234, + help="A seed for reproducible training.") + parser.add_argument("--local_rank", + type=int, + default=-1, + help="local_rank for distributed training on gpus") + parser.add_argument( + '--gradient_checkpointing', + action='store_true', + help='Enable HF gradient checkpointing for Actor model.') + parser.add_argument( + "--dropout", + type=float, + default=None, + help="If dropout configured, use it. " + "Otherwise, keep the default dropout configuration of the model.") + # deepspeed features + parser.add_argument('--offload', + action='store_true', + help='Enable ZeRO Offload techniques.') + parser.add_argument('--dtype', + type=str, + default='fp16', + choices=['fp16', 'bf16'], + help='Training data type') + parser.add_argument( + '--zero_stage', + type=int, + default=0, + help='ZeRO optimization stage for Actor model (and clones).') + ## LoRA for efficient training setting + parser.add_argument("--lora_dim", + type=int, + default=0, + help="If > 0, use LoRA for efficient training.") + parser.add_argument("--lora_module_name", + type=str, + default="decoder.layers.", + help="The scope of LoRA.") + parser.add_argument('--only_optimize_lora', + action='store_true', + help='Only optimize the LoRA parameters.') + parser.add_argument( + "--lora_learning_rate", + type=float, + default=5e-4, + help= + "Initial LoRA learning rate (after the potential warmup period) to use." + ) + + # Evaluation + parser.add_argument("--eval_interval", + type=int, + default=0, + help="If > 0, perform evaluation at this interval") + parser.add_argument("--eval_iters", + type=int, + default=100, + help="Maximum evaluation iterations") + ## low precision + parser.add_argument( + '--compute_fp32_loss', + action='store_true', + help='Relevant for low precision dtypes (fp16, bf16, etc.). ' + 'If specified, loss is calculated in fp32.') + + ## Tensorboard logging + parser.add_argument('--enable_tensorboard', + action='store_true', + help='Enable tensorboard logging') + parser.add_argument('--tensorboard_path', + type=str, + default="step2_tensorboard") + ## Tokenizer + parser.add_argument( + "--add_eot_token", + action='store_true', + help="Add <|endoftext|> as additional special token to tokenizer") + parser = deepspeed.add_config_arguments(parser) + args = parser.parse_args() + + return args + + +def main(): + args = parse_args() + + if args.local_rank == -1: + device = torch.device(get_accelerator().device_name()) + else: + get_accelerator().set_device(args.local_rank) + device = torch.device(get_accelerator().device_name(), args.local_rank) + # Initializes the distributed backend which will take care of sychronizing nodes/GPUs + # torch.distributed.init_process_group(backend='nccl') + deepspeed.init_distributed() + + args.global_rank = torch.distributed.get_rank() + + ds_config = get_train_ds_config(offload=args.offload, + dtype=args.dtype, + stage=args.zero_stage, + enable_tensorboard=args.enable_tensorboard, + tb_path=args.tensorboard_path, + tb_name="step2_model") + ds_config[ + 'train_micro_batch_size_per_gpu'] = args.per_device_train_batch_size + ds_config[ + 'train_batch_size'] = args.per_device_train_batch_size * torch.distributed.get_world_size( + ) * args.gradient_accumulation_steps + + # If passed along, set the training seed now. + set_random_seed(args.seed) + torch.distributed.barrier() + + # load_hf_tokenizer will get the correct tokenizer and set padding tokens based on the model family + args.end_of_conversation_token = "<|endoftext|>" + additional_special_tokens = args.end_of_conversation_token if args.add_eot_token else None + tokenizer = load_hf_tokenizer(args.model_name_or_path, + fast_tokenizer=True, + add_special_tokens=additional_special_tokens) + rm_model = create_critic_model(args.model_name_or_path, + tokenizer, + ds_config, + args.num_padding_at_beginning, + dropout=args.dropout, + zero_stage=args.zero_stage, + compute_fp32_loss=args.compute_fp32_loss) + + # Model bigscience/bloom-560m has large variance at ln_f.weight parameter + # This makes bf16 finetuning hard. + # In general, since we are replacing the model head, it makes sense to reset + # the LN that precedes it. + force_optimize_params = [] + if "bigscience/bloom-" in args.model_name_or_path: + zero_init_enabled = (args.zero_stage == 3) + params = [ + rm_model.rwtranrsformer.ln_f.weight, + rm_model.rwtranrsformer.ln_f.bias + ] + with deepspeed.zero.GatheredParameters(params, + modifier_rank=0, + enabled=zero_init_enabled): + if deepspeed.comm.get_rank() == 0 or not zero_init_enabled: + torch.nn.init.ones_(rm_model.rwtransformer.ln_f.weight) + torch.nn.init.zeros_(rm_model.rwtransformer.ln_f.bias) + force_optimize_params.extend( + ['rwtransformer.ln_f.weight', 'rwtransformer.ln_f.bias']) + + if args.lora_dim > 0: + rm_model = convert_linear_layer_to_lora(rm_model, + args.lora_module_name, + args.lora_dim) + if args.only_optimize_lora: + force_optimize_params.append('v_head.weight') + rm_model = only_optimize_lora_parameters(rm_model, + force_optimize_params) + rm_model = make_model_gradient_checkpointing_compatible(rm_model) + + train_phase = 2 + train_dataset, eval_dataset = create_prompt_dataset( + args.local_rank, args.data_path, args.data_split, + args.data_output_path, train_phase, args.seed, tokenizer, + args.max_seq_len) + + # DataLoaders creation: + data_collator = DataCollatorReward() + if args.local_rank == -1: + train_sampler = RandomSampler(train_dataset) + eval_sampler = SequentialSampler(eval_dataset) + else: + train_sampler = DistributedSampler(train_dataset) + eval_sampler = DistributedSampler(eval_dataset) + train_dataloader = DataLoader(train_dataset, + collate_fn=data_collator, + sampler=train_sampler, + batch_size=args.per_device_train_batch_size) + eval_dataloader = DataLoader(eval_dataset, + collate_fn=data_collator, + sampler=eval_sampler, + batch_size=args.per_device_eval_batch_size) + + def evaluation_reward(model, dataloader, eval_iters): + model.eval() + correct_predictions = 0 + total_predictions = 0 + chosen_scores = 0. + rejected_scores = 0. + for _step, _batch in enumerate(dataloader): + _batch = to_device(_batch, device) + with torch.no_grad(): + _outputs = model(**_batch) + + chosen = _outputs["chosen_mean_scores"] + rejected = _outputs["rejected_mean_scores"] + correct_predictions += (chosen > rejected).sum() + total_predictions += chosen.shape[0] + chosen_scores += _outputs["chosen_mean_scores"].mean().float() + rejected_scores += _outputs["rejected_mean_scores"].mean().float() + if (_step + 1) == eval_iters: + break + _acc = correct_predictions / total_predictions + chosen_scores = chosen_scores / (_step + 1) + rejected_scores = rejected_scores / (_step + 1) + try: + _acc = get_all_reduce_mean(_acc).item() + chosen_scores = get_all_reduce_mean(chosen_scores).item() + rejected_scores = get_all_reduce_mean(rejected_scores).item() + except: + pass + return chosen_scores, rejected_scores, _acc + + # Split weights in two groups, one with weight decay and the other not. + optimizer_grouped_parameters = get_optimizer_grouped_parameters( + rm_model, args.weight_decay, args.lora_learning_rate) + + AdamOptimizer = DeepSpeedCPUAdam if args.offload else FusedAdam + optimizer = AdamOptimizer(optimizer_grouped_parameters, + lr=args.learning_rate, + betas=(0.9, 0.95)) + + num_update_steps_per_epoch = math.ceil( + len(train_dataloader) / args.gradient_accumulation_steps) + + lr_scheduler = get_scheduler( + name=args.lr_scheduler_type, + optimizer=optimizer, + num_warmup_steps=args.num_warmup_steps, + num_training_steps=args.num_train_epochs * num_update_steps_per_epoch, + ) + + rm_model, optimizer, _, lr_scheduler = deepspeed.initialize( + model=rm_model, + optimizer=optimizer, + args=args, + config=ds_config, + lr_scheduler=lr_scheduler, + dist_init_required=True) + + if args.gradient_checkpointing: + rm_model.gradient_checkpointing_enable() + + # Train! + print_rank_0("***** Running training *****", args.global_rank) + + print_rank_0( + f"***** Evaluating reward, Epoch {0}/{args.num_train_epochs} *****", + args.global_rank) + reward_score, reject_score, acc = evaluation_reward( + rm_model, eval_dataloader, args.eval_iters) + print_rank_0( + f"chosen_last_scores (higher is better) : {reward_score}, " + f"rejected_last_scores (lower is better) : {reject_score}, " + f"acc (higher is better) : {acc}", args.global_rank) + + total_micro_steps = 0 + for epoch in range(args.num_train_epochs): + print_rank_0( + f"Beginning of Epoch {epoch+1}/{args.num_train_epochs}, Total Micro Batches {len(train_dataloader)}", + args.global_rank) + rm_model.train() + mean_loss = 0 + for step, batch in enumerate(train_dataloader): + batch = to_device(batch, device) + outputs = rm_model(**batch, use_cache=False) + loss = outputs["loss"] + rm_model.backward(loss) + rm_model.step() + mean_loss += loss.item() + total_micro_steps += 1 + gas_boundary = (total_micro_steps % + args.gradient_accumulation_steps == 0) + total_steps = total_micro_steps // args.gradient_accumulation_steps + print_rank_0(f"step {step}: loss:{loss.item():.4f}", args.global_rank) + if args.eval_interval and gas_boundary and ( + total_steps % args.eval_interval == 0): + print_rank_0(f"Iter {total_steps}: Evaluating reward", + args.global_rank) + reward_score, reject_score, acc = evaluation_reward( + rm_model, eval_dataloader, args.eval_iters) + print_rank_0( + f"Iter {total_steps}: c_scores: {reward_score}, r_scores: {reject_score}, " + f"diff: {reward_score - reject_score}, acc: {acc}, loss:{loss.item():.4f}", + args.global_rank) + rm_model.train() + + print_rank_0( + f"Epoch {epoch+1}/{args.num_train_epochs} with loss {mean_loss/(step+1)}", + args.global_rank) + # Evaluate reward_loss on the validation set. + print_rank_0( + f"***** Evaluating reward, Epoch {epoch+1}/{args.num_train_epochs} *****", + args.global_rank) + reward_score, reject_score, acc = evaluation_reward( + rm_model, eval_dataloader, args.eval_iters) + print_rank_0( + f"chosen_last_scores (higher is better) : {reward_score}, " + f"rejected_last_scores (lower is better) : {reject_score}, " + f"acc (higher is better) : {acc}", args.global_rank) + rm_model.tput_timer.update_epoch_count() + + if args.output_dir is not None: + print_rank_0('saving model ...', args.global_rank) + rm_model = convert_lora_to_linear_layer(rm_model) + + if args.global_rank == 0: + save_hf_format(rm_model, tokenizer, args) + if args.zero_stage == 3: + # for zero stage 3, each gpu only has a part of the model, so we need to save the model on each gpu by using DS-Engine + save_zero_three_model(rm_model, + args.global_rank, + args.output_dir, + zero_stage=args.zero_stage) + + +if __name__ == "__main__": + main() diff --git a/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/training/step2_reward_model_finetuning/rw_eval.py b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/training/step2_reward_model_finetuning/rw_eval.py new file mode 100644 index 0000000000000000000000000000000000000000..23f9a66af30f3491b68c260982fe62f199f1968e --- /dev/null +++ b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/training/step2_reward_model_finetuning/rw_eval.py @@ -0,0 +1,194 @@ +#!/usr/bin/env python +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +import argparse +import torch + +from dschat.utils.model.model_utils import create_critic_model +from dschat.utils.utils import to_device, load_hf_tokenizer +from deepspeed import get_accelerator + + +def parse_args(): + parser = argparse.ArgumentParser( + description="Eval the finetued reward model") + parser.add_argument( + "--model_name_or_path", + type=str, + help= + "Path to pretrained model or model identifier from huggingface.co/models.", + required=True, + ) + parser.add_argument( + "--num_padding_at_beginning", + type=int, + default=1, + help= + "OPT model has a fixed number (1) of padding tokens at the beginning of the input. " + "We did not see this in other models but keep it as an option for now.", + ) + parser.add_argument( + "--add_eot_token", + action='store_true', + help="Add <|endoftext|> as additional special token to tokenizer") + args = parser.parse_args() + return args + + +def load_stuff(model_name_or_path, num_padding_at_beginning, + additional_special_tokens): + + tokenizer = load_hf_tokenizer(model_name_or_path, + fast_tokenizer=True, + add_special_tokens=additional_special_tokens) + tokenizer.pad_token = tokenizer.eos_token + model = create_critic_model(model_name_or_path, + tokenizer, + None, + num_padding_at_beginning, + dropout=0.) + + return model, tokenizer + + +def prepare_datapair(prompt, + good_ans, + bad_ans, + tokenizer, + max_seq_len=512, + end_of_conversation_token="<|endoftext|>"): + chosen_sentence = prompt + good_ans + end_of_conversation_token # the accept response + reject_sentence = prompt + bad_ans + end_of_conversation_token # the reject response + chosen_token = tokenizer(chosen_sentence, + max_length=max_seq_len, + padding="max_length", + truncation=True, + return_tensors="pt") + + reject_token = tokenizer(reject_sentence, + max_length=max_seq_len, + padding="max_length", + truncation=True, + return_tensors="pt") + + batch = {} + batch["input_ids"] = torch.cat([chosen_token["input_ids"]] + + [reject_token["input_ids"]], + dim=0) + batch["attention_mask"] = torch.cat([chosen_token["attention_mask"]] + + [reject_token["attention_mask"]], + dim=0) + return batch + + +def prepare_singlesample(prompt, + good_ans, + tokenizer, + max_seq_len=512, + end_of_conversation_token="<|endoftext|>"): + chosen_sentence = prompt + good_ans + end_of_conversation_token + chosen_token = tokenizer(chosen_sentence, + max_length=max_seq_len, + padding="max_length", + truncation=True, + return_tensors="pt") + + batch = {} + batch["input_ids"] = chosen_token["input_ids"] + batch["attention_mask"] = chosen_token["attention_mask"] + + return batch + + +def run_pair_comparison(): + args = parse_args() + + device = torch.device(get_accelerator().device_name(0)) + + args.end_of_conversation_token = "<|endoftext|>" + additional_special_tokens = args.end_of_conversation_token if args.add_eot_token else None + + rm_model, tokenizer = load_stuff(args.model_name_or_path, + args.num_padding_at_beginning, + additional_special_tokens) + rm_model.to(device) + rm_model.eval() + + prompt_list = [ + "Human: Please tell me about Microsoft in a few sentence? Assistant: ", + "Human: Explain the moon landing to a 6 year old in a few sentences. Assistant: " + ] + good_ans_list = [ + "Microsoft is a software company that develops, licenses, and supports software products, including Windows, Office, and Windows Phone. It is the largest software company in the world by revenue, and is the second-largest software company in the world by market capitalization. Microsoft is also a major provider of cloud computing services, including the Microsoft Azure cloud computing platform and the Microsoft Office 365 suite of products. The company was founded in 1975", + "The moon landing was a major milestone in the history of human exploration of the solar system. It was the first time humans had ever set foot on another planet, and it was a major turning point in the history of human civilization. The astronauts, Neil Armstrong, Buzz Aldrin, and Michael Collins, successfully landed the Apollo 11 spacecraft on the moon, marking the first time humans had ever set foot on another" + ] + bad_ans_list = [ + "I'm not sure. Human: What's your job? Assistant: I'm not sure. Human: What's your favorite color? Assistant: I'm not sure. Human: What's your favorite food? Assistant: I'm not sure. Human: What's your favorite drink? Assistant: I'm not sure.", + "I don't know, I don't know." + ] + + for prompt, good_ans, bad_ans in zip(prompt_list, good_ans_list, + bad_ans_list): + batch = prepare_datapair( + prompt, + good_ans, + bad_ans, + tokenizer, + max_seq_len=512, + end_of_conversation_token=args.end_of_conversation_token) + batch = to_device(batch, device) + # Run inference + with torch.no_grad(): + outputs = rm_model(**batch) + print("==================Eval result============================") + print("prompt: ", prompt) + print("\ngood_ans: ", good_ans) + print("\nbad_ans:", bad_ans) + print() + print("=============Scores (higher, better)========================") + print("good_ans score: ", outputs["chosen_mean_scores"].item()) + print("bad_ans score: ", outputs["rejected_mean_scores"].item()) + + +def run_single_sample(): + args = parse_args() + device = torch.device(get_accelerator().device_name()) + + args.end_of_conversation_token = "<|endoftext|>" + additional_special_tokens = args.end_of_conversation_token if args.add_eot_token else None + + rm_model, tokenizer = load_stuff(args.model_name_or_path, + args.num_padding_at_beginning, + additional_special_tokens) + rm_model.to(device) + + prompt = "Human: Explain the moon landing to a 6 year old in a few sentences." + my_ans = "Assistant: The moon landing was a major milestone in the history of human exploration of the solar system. It was the first time humans had ever set foot on another planet, and it was a major turning point in the history of human civilization. The astronauts, Neil Armstrong, Buzz Aldrin, and Michael Collins, successfully landed the Apollo 11 spacecraft on the moon, marking the first time humans had ever set foot on another" + + batch = prepare_singlesample( + prompt, + my_ans, + tokenizer, + max_seq_len=512, + end_of_conversation_token=args.end_of_conversation_token) + batch = to_device(batch, device) + + rm_model.eval() + # Run inference + with torch.no_grad(): + outputs = rm_model.forward_value( + **batch, prompt_length=max(2, args.num_padding_at_beginning) + ) # we just need to skip the number of padding tokens at the beginning + print("==================Eval result============================") + print("prompt: ", prompt) + print("my_ans: ", my_ans) + print() + print("=============Scores========================") + print("my_ans score: ", outputs["chosen_end_scores"].item()) + + +if __name__ == "__main__": + run_pair_comparison() + # run_single_sample() diff --git a/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/training/step2_reward_model_finetuning/training_scripts/README.md b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/training/step2_reward_model_finetuning/training_scripts/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ca2d5eb707010be80c6701f3da56f743d098831e --- /dev/null +++ b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/training/step2_reward_model_finetuning/training_scripts/README.md @@ -0,0 +1,6 @@ +### πŸ’For each folder, the bash scripts are examples of "facebook/opt" family. + +If you want to change your model such as EleutherAI/gpt-j-6b, you may simply replace +`` --model_name_or_path facebook/opt-350m`` to ``--model_name_or_path EleutherAI/gpt-neo-125m ``. + +For the models we support, please see [our landing page](./../../../README.md#-supported-models-) diff --git a/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/training/step2_reward_model_finetuning/training_scripts/llama2/run_llama2_7b.sh b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/training/step2_reward_model_finetuning/training_scripts/llama2/run_llama2_7b.sh new file mode 100644 index 0000000000000000000000000000000000000000..0f21cdd816fe8f6cdbc86cae45fee44f3d849df4 --- /dev/null +++ b/nlp/llm/llama2-7b_reward_sft/DeepSpeedExamples/applications/DeepSpeed-Chat/training/step2_reward_model_finetuning/training_scripts/llama2/run_llama2_7b.sh @@ -0,0 +1,36 @@ +#!/bin/bash +# Copyright (c) Microsoft Corporation. +# SPDX-License-Identifier: Apache-2.0 + +# DeepSpeed Team +OUTPUT=$1 +ZERO_STAGE=$2 +if [ "$OUTPUT" == "" ]; then + OUTPUT=./output_step2_llama_7b_epoch1_lr9.65e-6 +fi +if [ "$ZERO_STAGE" == "" ]; then + ZERO_STAGE=3 +fi +mkdir -p $OUTPUT + +deepspeed main.py \ + --data_path "/home/datasets/nlp/Dahoas/rm-static/data" \ + --data_split 2,4,4 \ + --model_name_or_path /home/model_zoo/nlp/Llama-2-7b-hf \ + --per_device_train_batch_size 8 \ + --per_device_eval_batch_size 8 \ + --max_seq_len 512 \ + --learning_rate 9.65e-6 \ + --weight_decay 0.1 \ + --num_padding_at_beginning 0 \ + --num_train_epochs 1 \ + --gradient_accumulation_steps 1 \ + --lr_scheduler_type cosine \ + --num_warmup_steps 0 \ + --seed 1234 \ + --gradient_checkpointing \ + --zero_stage $ZERO_STAGE \ + --deepspeed \ + --output_dir $OUTPUT \ + --offload \ + |& tee $OUTPUT/training.log