update trainers
This commit is contained in:
@@ -13,7 +13,7 @@ from ...extras.callbacks import FixValueHeadModelCallback
|
||||
from ...extras.misc import fix_valuehead_checkpoint
|
||||
from ...extras.ploting import plot_loss
|
||||
from ...model import load_model, load_tokenizer
|
||||
from ..utils import create_custom_optimzer, create_ref_model, create_reward_model
|
||||
from ..utils import create_custom_optimzer, create_custom_scheduler, create_ref_model, create_reward_model
|
||||
from .trainer import CustomPPOTrainer
|
||||
|
||||
|
||||
@@ -70,7 +70,8 @@ def run_ppo(
|
||||
total_train_batch_size = backward_batch_size * finetuning_args.ppo_buffer_size * training_args.world_size
|
||||
num_training_steps = training_args.num_train_epochs * math.ceil(len(dataset) / total_train_batch_size)
|
||||
|
||||
optimizer = create_custom_optimzer(model, training_args, finetuning_args, num_training_steps)
|
||||
optimizer = create_custom_optimzer(model, training_args, finetuning_args)
|
||||
create_custom_scheduler(training_args, num_training_steps, optimizer)
|
||||
if optimizer is None:
|
||||
optimizer = AdamW(filter(lambda p: p.requires_grad, model.parameters()), lr=training_args.learning_rate)
|
||||
|
||||
|
||||
Reference in New Issue
Block a user