import torch import torch.nn as nn from torchvision import datasets, transforms from torch.utils.data import DataLoader from torch.utils.data import random_split from torch.utils.data import Subset import torch.nn.functional as F import os import pandas as pd import time # Normalisation ImageNet - indispensable pour un modèle pré-entraîné imagenet_mean = [0.485, 0.456, 0.406] imagenet_std = [0.229, 0.224, 0.225] SIZE = 224 # Transform pour le training T_train = transforms.Compose([ transforms.Resize(SIZE), transforms.CenterCrop(SIZE), transforms.ToTensor(), transforms.Normalize(mean=imagenet_mean, std=imagenet_std), ]) import torchvision.transforms as transforms ''' T_train = transforms.Compose([ transforms.RandomResizedCrop(SIZE, scale=(0.8, 1.0)), transforms.RandomHorizontalFlip(p=0.5), transforms.ColorJitter( brightness=0.2, contrast=0.2, saturation=0.2, hue=0.05 ), transforms.RandomRotation(10), transforms.ToTensor(), transforms.Normalize( mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225] ) ]) ''' # Transform pour la validation T_valid = transforms.Compose([ transforms.Resize(SIZE), transforms.CenterCrop(SIZE), transforms.ToTensor(), transforms.Normalize(mean=imagenet_mean, std=imagenet_std), ]) ################################################################ from torchvision import models class ResNet18FTransfer(nn.Module): def __init__(self, num_classes: int = 37): super().__init__() weights = models.ResNet18_Weights.DEFAULT self.model = models.resnet18(weights=weights) in_features = self.model.fc.in_features self.model.fc = nn.Linear(in_features, num_classes) self.freeze_backbone() def freeze_backbone(self): for param in self.model.parameters(): param.requires_grad = False for param in self.model.fc.parameters(): param.requires_grad = True def train(self, mode: bool = True): super().train(mode) if mode: # train self.model.eval() # fige backbone + head self.model.fc.train(True) # on réactive head else: # eval self.model.eval() # en validation, tout est en eval return self # convention PyTorch def forward(self, x): return self.model(x) ################################################################# class Scenario : pass def createScenario(lr): S = Scenario() S.log_folder = r"C:\log\test1" S.CSVname = os.path.join(S.log_folder, "LR_" + format(lr, ".0e") +".csv") # train/val S.epochs = 30 S.batch_size = 32 S.train_batch = DataLoader(train_ds, batch_size=S.batch_size, num_workers=8,persistent_workers=True, shuffle=True) S.valid_batch = DataLoader(valid_ds, batch_size=S.batch_size, shuffle=False) # model / optimizer S.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") S.model = ResNet18FTransfer() S.loss_fn = nn.CrossEntropyLoss() S.lr = lr #S.optimizer = torch.optim.SGD(S.model.parameters() , lr = lr) S.optimizer = torch.optim.Adam(S.model.parameters(), lr=lr ) S.scheduler = None # S.scheduler = torch.optim.lr_scheduler.StepLR(S.optimizer, step_size=10, gamma=0.1 ) print("Device : ",S.device) print("Epochs : ",S.epochs) print("LR : ",S.lr) print("Optim : ",S.optimizer.__class__.__name__) print("Res : ", SIZE) print("Schedul: ",S.scheduler.__class__.__name__) print("Train : ", T_train.transforms[0]) for i in range(1,len(T_train.transforms)) : print(" : ", T_train.transforms[i]) return S ####################################################################### def train(S,name): info = [ ] S.model = S.model.to(S.device) for epoch in range(S.epochs): t0 = time.time() # TRAIN train_GlobLoss = train_accuracy = 0 S.model.train() for x, y in S.train_batch: x = x.to(S.device) y = y.to(S.device) S.optimizer.zero_grad() logits = S.model(x) loss = S.loss_fn(logits,y) loss.backward() train_GlobLoss += loss.item() * x.size(0) S.optimizer.step() # accuracy predictions = logits.argmax(dim=1) train_accuracy += (predictions == y).sum().item() # VALIDATION valid_GlobLoss = valid_accuracy = 0 S.model.eval() with torch.no_grad(): for x, y in S.valid_batch: x = x.to(S.device) y = y.to(S.device) logits = S.model(x) loss = S.loss_fn(logits, y) valid_GlobLoss += loss.item() * x.size(0) # accuracy predictions = logits.argmax(dim=1) valid_accuracy += (predictions == y).sum().item() # indicateurs nbtrain, nbvalid = len(S.train_batch.dataset), len(S.valid_batch.dataset) train_GlobLoss /= nbtrain train_accuracy /= nbtrain valid_GlobLoss /= nbvalid valid_accuracy /= nbvalid dT = time.time() - t0 current_lr = S.optimizer.param_groups[0]['lr'] print( f"{epoch+1:02d}/{S.epochs} - " f"tLoss {train_GlobLoss:.3f} - vLoss {valid_GlobLoss:.3f} - " f"tAcc {train_accuracy:.3f} - vAcc {valid_accuracy:.3f} dT : {dT:.2f} LR : {current_lr}") info.append([epoch+1,train_accuracy,valid_accuracy,train_GlobLoss,valid_GlobLoss ]) if S.scheduler : S.scheduler.step() # CSV columns = [ "epoch", "acc/train", "acc/valid", "loss/train", "loss/valid" ] df = pd.DataFrame(info, columns=columns) df.to_csv( S.CSVname , index = False ) ##################################################### # Dataset Oxford-IIIT Pet train_ds = datasets.OxfordIIITPet(root="../data", split="trainval", download=True, transform=T_train) valid_ds = datasets.OxfordIIITPet(root="../data", split="test", download=True, transform=T_valid) L = [ 1e-2, 3e-3, 3e-4, 1e-4 ] L = [ 1e-3 ] def temps_ecoule(t0): delta = int(time.time() - t0) minutes = delta // 60 secondes = delta % 60 return f"{minutes:02d}:{secondes:02d}" for lr in L : t0 = time.time() S = createScenario(lr = lr) train(S,"__" + str(lr)) print("Temps écoulé : ", temps_ecoule(t0))