ニューラルネットワークの「train」と「validation」の精度が一定の原因がわからない

python

def test_eeg_classifier(X, Y): n_classes = 3 in_chans = X.shape[1] device = 'cuda' # Set if you want to use GPU # You can also use torch.cuda.is_available() to determine if cuda is available on your machine. cuda = True set_random_seeds(seed=20220109, cuda=cuda) # This will determine how many crops are processed in parallel input_window_samples = X.shape[2] n_classes = 3 in_chans = X.shape[1] # final_conv_length determines the size of the receptive field of the ConvNet model = Deep4Net(in_chans=in_chans, n_classes=n_classes, input_window_samples=input_window_samples, final_conv_length='auto').cuda() to_dense_prediction_model(model) # determine output size test_input = np_to_th( np.ones((2, in_chans, input_window_samples, 1), dtype=np.float32) ).cuda() out = model(test_input) n_preds_per_input = out.cpu().data.numpy().shape[2] #print(out) train_set = create_from_X_y(X[:1140], Y[:1140], drop_last_window=False, sfreq=250, window_size_samples=input_window_samples, window_stride_samples=n_preds_per_input) valid_set = create_from_X_y(X[1140:1260], Y[1140:1260], drop_last_window=False, sfreq=250, window_size_samples=input_window_samples, window_stride_samples=n_preds_per_input) cropped_cb_train = CroppedTrialEpochScoring( "accuracy", name="train_trial_accuracy", lower_is_better=False, on_train=True, ) cropped_cb_valid = CroppedTrialEpochScoring( "accuracy", on_train=False, name="valid_trial_accuracy", lower_is_better=False, ) clf = EEGClassifier( model, cropped=True, criterion=CroppedLoss, criterion__loss_function=nll_loss, optimizer=optim.AdamW, optimizer__lr = 0.05, optimizer__weight_decay = 5*0.01, train_split=predefined_split(valid_set), batch_size=32, callbacks=[ ("train_trial_accuracy", cropped_cb_train), ("valid_trial_accuracy", cropped_cb_valid), ], ) clf.fit(train_set, y=None, epochs=200)

コメントを投稿

0 コメント