import torch import torch.nn as nn from torchvision import datasets, transforms from torch.utils.data import DataLoader from torch.utils.data import random_split from torch.utils.data import Subset import torch.nn.functional as F import os import pandas as pd mean= (0.4914, 0.4822, 0.4465) std = (0.2023, 0.1994, 0.2010) T_train = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=mean,std=std) ]) T_valid = transforms.Compose([ transforms.ToTensor(), transforms.Normalize(mean=mean,std=std) ]) ############################################################### import torch from torchvision import datasets, transforms from torch.utils.data import random_split from torch.utils.data import Subset class SubDataset(torch.utils.data.Dataset): def __init__(self, base_dataset, indices, transform=None): self.base_dataset = base_dataset self.indices = indices self.transform = transform def __len__(self): return len(self.indices) def __getitem__(self, i): x, y = self.base_dataset[self.indices[i]] if self.transform: x = self.transform(x) return x, y def LoadDS(dataset, transform_train, transform_valid, split=0.8) : nb = len(dataset) n_train = int(split * nb) g = torch.Generator().manual_seed(0) idx = torch.randperm(nb, generator=g).tolist() train_idx = idx[:n_train] val_idx = idx[n_train:] train_ds = SubDataset(dataset, train_idx, transform_train) # 80% des images val_ds = SubDataset(dataset, val_idx, transform_valid) # 20% des images print("Dataset LOADED") return train_ds, val_ds ######################################################### class ConvBlock(nn.Sequential): def __init__(self, c_in, c_out): super().__init__( nn.Conv2d(c_in, c_out, 3, padding=1), nn.ReLU(inplace=True), nn.MaxPool2d(2) ) class CifarCNNBase(nn.Module): def __init__(self, num_classes=10): super().__init__() # L -> L/2 -> L/4 -> L/8 self.features = nn.Sequential( ConvBlock(3, 64), # 64 x L/2 x L/2 ConvBlock(64, 128), # 128 x L/4 x L/4 ConvBlock(128, 256), # 256 x L/8 x L/8 ) self.classifier = nn.Sequential( nn.AdaptiveAvgPool2d((1, 1)), # 256 x 1 x 1 nn.Flatten(), # 256 nn.Linear(256, num_classes) # 10 ) def forward(self, x): x = self.features(x) x = self.classifier(x) return x ################################################################ class Scenario : pass def createScenario(lr): S = Scenario() S.log_folder = r"C:\log\test1" S.CSVname = os.path.join(S.log_folder, "LR_" + format(lr, ".0e") +".csv") # train/val S.epochs = 100 S.batch_size = 32 S.train_batch = DataLoader(train_ds, batch_size=S.batch_size, shuffle=True) S.valid_batch = DataLoader(val_ds, batch_size=S.batch_size, shuffle=False) # model / optimizer S.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") print(S.device) S.model = CifarCNNBase(num_classes=10) S.loss_fn = nn.CrossEntropyLoss() S.lr = lr #S.optimizer = torch.optim.SGD(S.model.parameters() , lr = lr) S.optimizer = torch.optim.Adam(S.model.parameters(), lr=lr ) return S ####################################################################### from torch.utils.tensorboard import SummaryWriter def train(S,name): info = [ ] S.model = S.model.to(S.device) for epoch in range(S.epochs): # TRAIN train_GlobLoss = train_accuracy = 0 S.model.train() for x, y in S.train_batch: x = x.to(S.device) y = y.to(S.device) S.optimizer.zero_grad() logits = S.model(x) loss = S.loss_fn(logits,y) loss.backward() train_GlobLoss += loss.item() * x.size(0) S.optimizer.step() # accuracy predictions = logits.argmax(dim=1) train_accuracy += (predictions == y).sum().item() # VALIDATION val_conf = valid_GlobLoss = valid_accuracy = 0 S.model.eval() with torch.no_grad(): for x, y in S.valid_batch: x = x.to(S.device) y = y.to(S.device) logits = S.model(x) loss = S.loss_fn(logits, y) valid_GlobLoss += loss.item() * x.size(0) # accuracy predictions = logits.argmax(dim=1) valid_accuracy += (predictions == y).sum().item() #val mean confidence probs = F.softmax(logits, dim=1) conf, pred = probs.max(dim=1) val_conf += conf.sum().item() # indicateurs nbtrain, nbvalid = len(S.train_batch.dataset), len(S.valid_batch.dataset) train_GlobLoss /= nbtrain train_accuracy /= nbtrain valid_GlobLoss /= nbvalid valid_accuracy /= nbvalid val_conf /= nbvalid print( f"{epoch+1}/{S.epochs} - " f"tLoss {train_GlobLoss:.3f} - vLoss {valid_GlobLoss:.3f} - " f"tAcc {train_accuracy:.3f} - vAcc {valid_accuracy:.3f} - cAcc {val_conf:.3f}") info.append([epoch+1,train_accuracy,valid_accuracy,train_GlobLoss,valid_GlobLoss,val_conf]) # CSV columns = [ "epoch", "acc/train", "acc/valid", "loss/train", "loss/valid", "conf/val" ] df = pd.DataFrame(info, columns=columns) df.to_csv( S.CSVname , index = False ) ##################################################### dataset = datasets.CIFAR10(root="./data",train=True,download=True,transform=None) train_ds, val_ds = LoadDS(dataset, T_train, T_valid) L = [ 1e-5, 1e-4, 1e-3, 1e-2, 1e-1 ] for lr in L : S = createScenario(lr = lr) train(S,"__" + str(lr))