Skip to content

Commit e97d67a

Browse files
authored
Support for Prodigy(Dadapt variety for Dylora) (kohya-ss#585)
* Update train_util.py for DAdaptLion * Update train_README-zh.md for dadaptlion * Update train_README-ja.md for DAdaptLion * add DAdatpt V3 * Alignment * Update train_util.py for experimental * Update train_util.py V3 * Update train_README-zh.md * Update train_README-ja.md * Update train_util.py fix * Update train_util.py * support Prodigy * add lower
1 parent f0bb3ae commit e97d67a

8 files changed

+41
-7
lines changed

docs/train_README-ja.md

+1
Original file line numberDiff line numberDiff line change
@@ -622,6 +622,7 @@ masterpiece, best quality, 1boy, in business suit, standing at street, looking b
622622
- DAdaptAdanIP : 引数は同上
623623
- DAdaptLion : 引数は同上
624624
- DAdaptSGD : 引数は同上
625+
- Prodigy : https://github.com/konstmish/prodigy
625626
- AdaFactor : [Transformers AdaFactor](https://huggingface.co/docs/transformers/main_classes/optimizer_schedules)
626627
- 任意のオプティマイザ
627628

docs/train_README-zh.md

+2-1
Original file line numberDiff line numberDiff line change
@@ -555,9 +555,10 @@ masterpiece, best quality, 1boy, in business suit, standing at street, looking b
555555
- DAdaptAdam : 参数同上
556556
- DAdaptAdaGrad : 参数同上
557557
- DAdaptAdan : 参数同上
558-
- DAdaptAdanIP : 引数は同上
558+
- DAdaptAdanIP : 参数同上
559559
- DAdaptLion : 参数同上
560560
- DAdaptSGD : 参数同上
561+
- Prodigy : https://github.com/konstmish/prodigy
561562
- AdaFactor : [Transformers AdaFactor](https://huggingface.co/docs/transformers/main_classes/optimizer_schedules)
562563
- 任何优化器
563564

fine_tune.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -397,7 +397,7 @@ def fn_recursive_set_mem_eff(module: torch.nn.Module):
397397
current_loss = loss.detach().item() # 平均なのでbatch sizeは関係ないはず
398398
if args.logging_dir is not None:
399399
logs = {"loss": current_loss, "lr": float(lr_scheduler.get_last_lr()[0])}
400-
if args.optimizer_type.lower().startswith("DAdapt".lower()): # tracking d*lr value
400+
if args.optimizer_type.lower().startswith("DAdapt".lower()) or args.optimizer_type.lower() == "Prodigy": # tracking d*lr value
401401
logs["lr/d*lr"] = (
402402
lr_scheduler.optimizers[0].param_groups[0]["d"] * lr_scheduler.optimizers[0].param_groups[0]["lr"]
403403
)

library/train_util.py

+32
Original file line numberDiff line numberDiff line change
@@ -2808,6 +2808,38 @@ def get_optimizer(args, trainable_params):
28082808

28092809
optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs)
28102810

2811+
elif optimizer_type == "Prodigy".lower():
2812+
# Prodigy
2813+
# check Prodigy is installed
2814+
try:
2815+
import prodigyopt
2816+
except ImportError:
2817+
raise ImportError("No Prodigy / Prodigy がインストールされていないようです")
2818+
2819+
# check lr and lr_count, and print warning
2820+
actual_lr = lr
2821+
lr_count = 1
2822+
if type(trainable_params) == list and type(trainable_params[0]) == dict:
2823+
lrs = set()
2824+
actual_lr = trainable_params[0].get("lr", actual_lr)
2825+
for group in trainable_params:
2826+
lrs.add(group.get("lr", actual_lr))
2827+
lr_count = len(lrs)
2828+
2829+
if actual_lr <= 0.1:
2830+
print(
2831+
f"learning rate is too low. If using Prodigy, set learning rate around 1.0 / 学習率が低すぎるようです。1.0前後の値を指定してください: lr={actual_lr}"
2832+
)
2833+
print("recommend option: lr=1.0 / 推奨は1.0です")
2834+
if lr_count > 1:
2835+
print(
2836+
f"when multiple learning rates are specified with Prodigy (e.g. for Text Encoder and U-Net), only the first one will take effect / Prodigyで複数の学習率を指定した場合(Text EncoderとU-Netなど)、最初の学習率のみが有効になります: lr={actual_lr}"
2837+
)
2838+
2839+
print(f"use Prodigy optimizer | {optimizer_kwargs}")
2840+
optimizer_class = prodigyopt.Prodigy
2841+
optimizer = optimizer_class(trainable_params, lr=lr, **optimizer_kwargs)
2842+
28112843
elif optimizer_type == "Adafactor".lower():
28122844
# 引数を確認して適宜補正する
28132845
if "relative_step" not in optimizer_kwargs:

train_db.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -384,7 +384,7 @@ def train(args):
384384
current_loss = loss.detach().item()
385385
if args.logging_dir is not None:
386386
logs = {"loss": current_loss, "lr": float(lr_scheduler.get_last_lr()[0])}
387-
if args.optimizer_type.lower().startswith("DAdapt".lower()): # tracking d*lr value
387+
if args.optimizer_type.lower().startswith("DAdapt".lower()) or args.optimizer_type.lower() == "Prodigy".lower(): # tracking d*lr value
388388
logs["lr/d*lr"] = (
389389
lr_scheduler.optimizers[0].param_groups[0]["d"] * lr_scheduler.optimizers[0].param_groups[0]["lr"]
390390
)

train_network.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ def generate_step_logs(
5757
logs["lr/textencoder"] = float(lrs[0])
5858
logs["lr/unet"] = float(lrs[-1]) # may be same to textencoder
5959

60-
if args.optimizer_type.lower().startswith("DAdapt".lower()): # tracking d*lr value of unet.
60+
if args.optimizer_type.lower().startswith("DAdapt".lower()) or args.optimizer_type.lower() == "Prodigy".lower(): # tracking d*lr value of unet.
6161
logs["lr/d*lr"] = lr_scheduler.optimizers[-1].param_groups[0]["d"] * lr_scheduler.optimizers[-1].param_groups[0]["lr"]
6262
else:
6363
idx = 0
@@ -67,7 +67,7 @@ def generate_step_logs(
6767

6868
for i in range(idx, len(lrs)):
6969
logs[f"lr/group{i}"] = float(lrs[i])
70-
if args.optimizer_type.lower().startswith("DAdapt".lower()):
70+
if args.optimizer_type.lower().startswith("DAdapt".lower()) or args.optimizer_type.lower() == "Prodigy".lower():
7171
logs[f"lr/d*lr/group{i}"] = (
7272
lr_scheduler.optimizers[-1].param_groups[i]["d"] * lr_scheduler.optimizers[-1].param_groups[i]["lr"]
7373
)

train_textual_inversion.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -476,7 +476,7 @@ def remove_model(old_ckpt_name):
476476
current_loss = loss.detach().item()
477477
if args.logging_dir is not None:
478478
logs = {"loss": current_loss, "lr": float(lr_scheduler.get_last_lr()[0])}
479-
if args.optimizer_type.lower().startswith("DAdapt".lower()): # tracking d*lr value
479+
if args.optimizer_type.lower().startswith("DAdapt".lower()) or args.optimizer_type.lower() == "Prodigy".lower(): # tracking d*lr value
480480
logs["lr/d*lr"] = (
481481
lr_scheduler.optimizers[0].param_groups[0]["d"] * lr_scheduler.optimizers[0].param_groups[0]["lr"]
482482
)

train_textual_inversion_XTI.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -515,7 +515,7 @@ def remove_model(old_ckpt_name):
515515
current_loss = loss.detach().item()
516516
if args.logging_dir is not None:
517517
logs = {"loss": current_loss, "lr": float(lr_scheduler.get_last_lr()[0])}
518-
if args.optimizer_type.lower().startswith("DAdapt".lower()): # tracking d*lr value
518+
if args.optimizer_type.lower().startswith("DAdapt".lower()) or args.optimizer_type.lower() == "Prodigy".lower(): # tracking d*lr value
519519
logs["lr/d*lr"] = (
520520
lr_scheduler.optimizers[0].param_groups[0]["d"] * lr_scheduler.optimizers[0].param_groups[0]["lr"]
521521
)

0 commit comments

Comments
 (0)