画像識別コード(他サイト引用)のエラーをなおし、実行後表示された結果から読み取れる事を知りたい。

python

1#ここから新たにしてコマンドで2#colabと違って先に別個に?、!pip install timmを消して、pip install timm、pip install pandas、seabornでいいっぽい、でそれを消した。3#from futureはコメントより優先っぽい4 5#先にコマンドで以下打っとく6#pip install timm7#pip install pandas8#pip install seaborn9 10#ここから11from __future__ import print_function 12 13import glob 14import os 15 16import random 17 18import matplotlib.pyplot as plt 19import numpy as np 20import pandas as pd 21import torch 22import torch.nn as nn 23import torch.nn.functional as F 24import torch.optim as optim 25from PIL import Image 26from torch.optim.lr_scheduler import StepLR 27from torch.utils.data import DataLoader, Dataset 28from torchvision import datasets, transforms 29from tqdm.notebook import tqdm 30 31from pathlib import Path 32import seaborn as sns 33import timm 34from pprint import pprint 35 36import copy 37from tqdm import tqdm 38 39# Training settings40epochs = 5041lr = 3e-542gamma = 0.743seed = 4244 45def seed_everything(seed):46 random.seed(seed)47 os.environ['PYTHONHASHSEED'] = str(seed)48 np.random.seed(seed)49 torch.manual_seed(seed)50 torch.cuda.manual_seed(seed)51 torch.cuda.manual_seed_all(seed)52 torch.backends.cudnn.deterministic = True53 54seed_everything(seed)55 56device = 'cpu'57train_dataset_dir = Path('./Gender01/train')58val_dataset_dir = Path('./Gender01/validation')59test_dataset_dir = Path('./Gender01/test')60 61files = glob.glob('./Gender01/*/*/*.png')62random_idx = np.random.randint(1, len(files), size=9)63 64fig, axes = plt.subplots(3, 3, figsize=(8, 6))65 66for idx, ax in enumerate(axes.ravel()):67 img = Image.open(files[idx])68 ax.imshow(img)69 70train_transforms = transforms.Compose(71 [72 transforms.Resize((224, 224)),73 transforms.RandomHorizontalFlip(),74 transforms.ToTensor(),75 transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])76 ]77)78 79val_transforms = transforms.Compose(80 [81 transforms.Resize((224, 224)),82 transforms.ToTensor(),83 transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])84 ]85)86 87test_transforms = transforms.Compose(88 [89 transforms.Resize((224, 224)),90 transforms.ToTensor(),91 transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])92 ]93)94 95train_data = datasets.ImageFolder(train_dataset_dir,train_transforms)96valid_data = datasets.ImageFolder(val_dataset_dir, val_transforms)97test_data = datasets.ImageFolder(test_dataset_dir, test_transforms)98 99train_loader = DataLoader(dataset = train_data, batch_size=16, shuffle=True )100valid_loader = DataLoader(dataset = valid_data, batch_size=16, shuffle=False)101test_loader = DataLoader(dataset = test_data, batch_size=16, shuffle=False)102 103model_names = timm.list_models(pretrained=True)104pprint(model_names)105 106model = timm.create_model('tf_efficientnetv2_s_in21ft1k', pretrained=True, num_classes=2)107model = model.to(device)108 109# loss function110criterion = nn.CrossEntropyLoss()111# optimizer112optimizer = optim.Adam(model.parameters(), lr=lr)113# scheduler114scheduler = StepLR(optimizer, step_size=1, gamma=gamma)115 116best_loss = None117 118# Accuracy計算用の関数119def calculate_accuracy(output, target):120 output = (torch.sigmoid(output) >= 0.5)121 target = (target == 1.0)122 accuracy = torch.true_divide((target == output).sum(dim=0), output.size(0)).item()123 return accuracy 124 125train_acc_list = []126val_acc_list = []127train_loss_list = []128val_loss_list = []129 130for epoch in range(epochs):131 epoch_loss = 0132 epoch_accuracy = 0133 134 for data, label in tqdm(train_loader):135 data = data.to(device)136 label = label.to(device)137 138 output = model(data)139 loss = criterion(output, label)140 141 optimizer.zero_grad()142 loss.backward()143 optimizer.step()144 145 acc = (output.argmax(dim=1) == label).float().mean()146 epoch_accuracy += acc / len(train_loader)147 epoch_loss += loss / len(train_loader) 148 149 with torch.no_grad():150 epoch_val_accuracy = 0151 epoch_val_loss = 0152 for data, label in valid_loader:153 data = data.to(device)154 label = label.to(device)155 156 val_output = model(data)157 val_loss = criterion(val_output, label)158 159 acc = (val_output.argmax(dim=1) == label).float().mean()160 epoch_val_accuracy += acc / len(valid_loader)161 epoch_val_loss += val_loss / len(valid_loader)162 163 print(164 f"Epoch : {epoch+1} - loss : {epoch_loss:.4f} - acc: {epoch_accuracy:.4f} - val_loss : {epoch_val_loss:.4f} - val_acc: {epoch_val_accuracy:.4f}\n"165 )166 167 train_acc_list.append(epoch_accuracy)168 val_acc_list.append(epoch_val_accuracy)169 train_loss_list.append(epoch_loss)170 val_loss_list.append(epoch_val_loss)171 172 if (best_loss is None) or (best_loss > val_loss):173 best_loss = val_loss 174 model_path = './Gender01/save/bestViTmodel.pth'175 torch.save(model.state_dict(), model_path)176 177 print()178 179device2 = torch.device('cpu')180 181train_acc = []182train_loss = []183val_acc = []184val_loss = []185 186for i in range(epochs):187 train_acc2 = train_acc_list[i].to(device2)188 train_acc3 = train_acc2.clone().numpy()189 train_acc.append(train_acc3)190 191 train_loss2 = train_loss_list[i].to(device2)192 train_loss3 = train_loss2.clone().detach().numpy()193 train_loss.append(train_loss3)194 195 val_acc2 = val_acc_list[i].to(device2)196 val_acc3 = val_acc2.clone().numpy()197 val_acc.append(val_acc3)198 199 val_loss2 = val_loss_list[i].to(device2)200 val_loss3 = val_loss2.clone().numpy()201 val_loss.append(val_loss3)202 203#取得したデータをグラフ化する204sns.set()205num_epochs = epochs 206 207fig = plt.subplots(figsize=(12, 4), dpi=80)208 209ax1 = plt.subplot(1,2,1)210ax1.plot(range(num_epochs), train_acc, c='b', label='train acc')211ax1.plot(range(num_epochs), val_acc, c='r', label='val acc')212ax1.set_xlabel('epoch', fontsize='12')213ax1.set_ylabel('accuracy', fontsize='12')214ax1.set_title('training and val acc', fontsize='14')215ax1.legend(fontsize='12')216 217ax2 = plt.subplot(1,2,2)218ax2.plot(range(num_epochs), train_loss, c='b', label='train loss')219ax2.plot(range(num_epochs), val_loss, c='r', label='val loss')220ax2.set_xlabel('epoch', fontsize='12')221ax2.set_ylabel('loss', fontsize='12')222ax2.set_title('training and val loss', fontsize='14')223ax2.legend(fontsize='12')224plt.show()225 226model.eval() # モデルを評価モードにする227 228loss_sum = 0229correct = 0230 231with torch.no_grad():232 for data, labels in test_loader:233 234 # GPUが使えるならGPUにデータを送る235 data = data.to(device)236 labels = labels.to(device)237 238 # ニューラルネットワークの処理を実施239 outputs = model(data)240 241 # テスト画像のそれぞれがどちらに推定されたかではなく、どれくらいの自信をもって推定されたのか、たとえば0.9:0.1で「圧倒的にリンゴ」なのか、あるいは0.6:0.4で「リンゴかな?」なのか、を知る242 print(outputs)243 244 # 損失(出力とラベルとの誤差)の計算245 loss_sum += criterion(outputs, labels)246 247 # 正解の値を取得248 pred = outputs.argmax(1)249 250 # テスト画像のそれぞれがリンゴとオレンジのどちらに推定されたのかを知る251 print(pred)252 253 # 正解数をカウント254 correct += pred.eq(labels.view_as(pred)).sum().item()255 256print(f"Loss: {loss_sum.item() / len(test_loader)}, Accuracy: {100*correct/len(test_data)}% ({correct}/{len(test_data)})")257 258# 検証画像でも同様なことを知りたい場合は、学習終了後の状態で分かればいいのなら。259 260loss_sum = 0261correct = 0262 263with torch.no_grad():264 for data, labels in valid_loader:265 266 # GPUが使えるならGPUにデータを送る267 data = data.to(device)268 labels = labels.to(device)269 270 # ニューラルネットワークの処理を実施271 outputs = model(data)272 273 # テスト画像のそれぞれがどちらに推定されたかではなく、どれくらいの自信をもって推定されたのか、たとえば0.9:0.1で「圧倒的にリンゴ」なのか、あるいは0.6:0.4で「リンゴかな?」なのか、を知る274 print(outputs)275 276 # 損失(出力とラベルとの誤差)の計算277 loss_sum += criterion(outputs, labels)278 279 # 正解の値を取得280 pred = outputs.argmax(1)281 282 # テスト画像のそれぞれがリンゴとオレンジのどちらに推定されたのかを知る283 print(pred)284 285 # 正解数をカウント286 correct += pred.eq(labels.view_as(pred)).sum().item()287 288print(f"Loss: {loss_sum.item() / len(valid_loader)}, Accuracy: {100*correct/len(valid_data)}% ({correct}/{len(valid_data)})")

コメントを投稿

0 コメント