>微調大語模型(LLMS),例如Bert,Llama,Bart,以及Mistral AI和其他人的 該解決方案涉及使用Google驅動器存儲中間結果和模型檢查點。 這可以確保您的工作仍然存在,即使在Colab環境重置之後。 您需要一個具有足夠驅動空間的Google帳戶。 在驅動器中創建兩個文件夾:“數據”(用於培訓數據集)和“檢查點”(用於存儲模型檢查點)。 >在COLAB中安裝Google Drive: 首先使用此命令將Google Drive安裝在Colab筆記本中:
>
解決方案的核心在於創建功能以保存和加載模型檢查點。 這些功能將序列您的模型的狀態,優化器,調度程序和其他相關信息。
保存檢查點函數:
>
將這些功能集成到您的培訓循環中。循環在開始培訓之前應檢查現有檢查點。如果找到了檢查站,它將恢復從保存的時期進行的培訓。 >
即使Colab會話終止,這種結構也可以無縫恢復訓練。 請記住要調整 )。from google.colab import drive
drive.mount('/content/drive')
!ls /content/drive/MyDrive/data
!ls /content/drive/MyDrive/checkpoints
import torch
import os
def save_checkpoint(epoch, model, optimizer, scheduler, loss, model_name, overwrite=True):
checkpoint = {
'epoch': epoch,
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'scheduler_state_dict': scheduler.state_dict(),
'loss': loss
}
direc = get_checkpoint_dir(model_name) #Assumed function to construct directory path
if overwrite:
file_path = os.path.join(direc, 'checkpoint.pth')
else:
file_path = os.path.join(direc, f'epoch_{epoch}_checkpoint.pth')
os.makedirs(direc, exist_ok=True) # Create directory if it doesn't exist
torch.save(checkpoint, file_path)
print(f"Checkpoint saved at epoch {epoch}")
#Example get_checkpoint_dir function (adapt to your needs)
def get_checkpoint_dir(model_name):
return os.path.join("/content/drive/MyDrive/checkpoints", model_name)
import torch
import os
def load_checkpoint(model_name, model, optimizer, scheduler):
direc = get_checkpoint_dir(model_name)
if os.path.exists(direc):
#Find checkpoint with highest epoch (adapt to your naming convention)
checkpoints = [f for f in os.listdir(direc) if f.endswith('.pth')]
if checkpoints:
latest_checkpoint = max(checkpoints, key=lambda x: int(x.split('_')[-2]) if '_' in x else 0)
file_path = os.path.join(direc, latest_checkpoint)
checkpoint = torch.load(file_path, map_location=torch.device('cpu'))
model.load_state_dict(checkpoint['model_state_dict'])
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
epoch = checkpoint['epoch']
loss = checkpoint['loss']
print(f"Checkpoint loaded from epoch {epoch}")
return epoch, loss
else:
print("No checkpoints found in directory.")
return 0, None
else:
print(f"No checkpoint directory found for {model_name}, starting from epoch 1.")
return 0, None
EPOCHS = 10
for exp in experiments: # Assuming 'experiments' is a list of your experiment configurations
model, optimizer, scheduler = initialise_model_components(exp) # Your model initialization function
train_loader, val_loader = generate_data_loaders(exp) # Your data loader function
start_epoch, prev_loss = load_checkpoint(exp, model, optimizer, scheduler)
for epoch in range(start_epoch, EPOCHS):
print(f'Epoch {epoch + 1}/{EPOCHS}')
# YOUR TRAINING CODE HERE... (training loop)
save_checkpoint(epoch + 1, model, optimizer, scheduler, train_loss, exp) #Save after each epoch
以上是培訓語言模型在Google Colab上的詳細內容。更多資訊請關注PHP中文網其他相關文章!