Chainerでの機械学習の実装中でしたが、コマンドプロンプトでpython train_imagenet.py -g 0 --train train.txt --val test.txtと打ち込んだ際にエラーが出てしまいました。
初歩的な問題だと思いますがどうかよろしくお願いします。
実現したいこと
ここに実現したいことを箇条書きで書いてください。
発生している問題・エラーメッセージ
エラーメッセージ ```Traceback (most recent call last): File "train_imagenet.py", line 102, in <module> model = nin.NIN() File "C:\Users\owner\実験データ\20201114\nin.py", line 18, in __init__ 3, (96, 96, 96), 11, stride=4, wscale=w), File "C:\Users\owner\AppData\Roaming\Python\Python37\site-packages\chainer\links\connection\mlp_convolution_2d.py", line 77, in __init__ argument.check_unexpected_kwargs(kwargs, wscale=msg) File "C:\Users\owner\AppData\Roaming\Python\Python37\site-packages\chainer\utils\argument.py", line 7, in check_unexpected_kwargs raise ValueError(message) ValueError: wscale is not supported anymore. Use conv_init and bias_init argument to change the scale of initial parameters. ### 該当のソースコード ```ここに言語名を入力 python ソースコード ``` train_imagenet.py from __future__ import print_function import argparse import datetime import json import multiprocessing import os import random import sys import threading import time import numpy as np from PIL import Image import six import six.moves.cPickle as pickle from six.moves import queue from chainer import computational_graph from chainer import cuda from chainer import optimizers from chainer import serializers parser = argparse.ArgumentParser(description='Learning convnet from ILSVRC2012 dataset') parser.add_argument('--train', help=r'D:\numadalab\2020\実験データ\20201114') parser.add_argument('--val', help=r'D:\numadalab\2020\実験データ\20201207_day') parser.add_argument('--mean', '-m', default='mean.npy', help='Path to the mean file (computed by compute_mean.py)') parser.add_argument('--arch', '-a', default='nin', help='Convnet architecture \ (nin, alex, alexbn, googlenet, googlenetbn)') parser.add_argument('--batchsize', '-B', type=int, default=32, help='Learning minibatch size') parser.add_argument('--val_batchsize', '-b', type=int, default=250, help='Validation minibatch size') parser.add_argument('--epoch', '-E', default=50, type=int, help='Number of epochs to learn') parser.add_argument('--gpu', '-g', default=-1, type=int, help='GPU ID (negative value indicates CPU)') parser.add_argument('--loaderjob', '-j', default=20, type=int, help='Number of parallel data loading processes') parser.add_argument('--root', '-r', default='.', help='Root directory path of image files') parser.add_argument('--out', '-o', default='model', help='Path to save model on each validation') parser.add_argument('--outstate', '-s', default='state', help='Path to save optimizer state on each validation') parser.add_argument('--initmodel', default='', help='Initialize the model from given file') parser.add_argument('--resume', default='', help='Resume the optimization from snapshot') parser.add_argument('--test', dest='test', action='store_true') parser.set_defaults(test=False) #args = parser.parse_args(args=[]) args = parser.parse_args() if args.gpu >= 0: cuda.check_cuda_available() xp = cuda.cupy if args.gpu >= 0 else np assert 50000 % args.val_batchsize == 0 if args.test: denominator = 1 else: denominator = 100000 def load_image_list(path, root): tuples = [] for line in open(path): pair = line.strip().split() tuples.append((os.path.join(root, pair[0]), np.int32(pair[1]))) return tuples # Prepare dataset print(args.train) train_list = load_image_list(args.train, args.root) val_list = load_image_list(args.val, args.root) mean_image = np.load(args.mean,allow_pickle=True) **# Prepare model ** if args.arch == 'nin': import nin model = nin.NIN() elif args.arch == 'i2vvgg': import i2vvgg model = i2vvgg.i2vVGG() elif args.arch == 'alex': import alex model = alex.Alex() elif args.arch == 'alexbn': import alexbn model = alexbn.AlexBN() elif args.arch == 'googlenet': import googlenet model = googlenet.GoogLeNet() elif args.arch == 'googlenetbn': import googlenetbn model = googlenetbn.GoogLeNetBN() else: raise ValueError('Invalid architecture name') if args.gpu >= 0: cuda.get_device(args.gpu).use() model.to_gpu() # Setup optimizer optimizer = optimizers.MomentumSGD(lr=0.01, momentum=0.9) optimizer.setup(model) # Init/Resume if args.initmodel: print('Load model from', args.initmodel) serializers.load_npz(args.initmodel, model) if args.resume: print('Load optimizer state from', args.resume) serializers.load_npz(args.resume, optimizer) ```ここに言語を入力 python コード nin.py import math import chainer import chainer.functions as F import chainer.links as L class NIN(chainer.Chain): """Network-in-Network example model.""" insize = 227 def __init__(self): w = math.sqrt(2) # MSRA scaling super(NIN, self).__init__( mlpconv1=L.MLPConvolution2D( 3, (96, 96, 96), 11, stride=4, wscale=w), mlpconv2=L.MLPConvolution2D( 96, (256, 256, 256), 5, pad=2, wscale=w), mlpconv3=L.MLPConvolution2D( 256, (384, 384, 384), 3, pad=1, wscale=w), mlpconv4=L.MLPConvolution2D( 384, (1024, 1024, 1000), 3, pad=1, wscale=w), ) self.train = True def clear(self): self.loss = None self.accuracy = None def __call__(self, x, t): self.clear() h = F.max_pooling_2d(F.relu(self.mlpconv1(x)), 3, stride=2) h = F.max_pooling_2d(F.relu(self.mlpconv2(h)), 3, stride=2) h = F.max_pooling_2d(F.relu(self.mlpconv3(h)), 3, stride=2) h = self.mlpconv4(F.dropout(h, train=self.train)) h = F.reshape(F.average_pooling_2d(h, 6), (x.data.shape[0], 1000)) self.loss = F.softmax_cross_entropy(h, t) self.accuracy = F.accuracy(h, t) return self.loss def predict(self, x_data, train=False): x = chainer.Variable(x_data, volatile=True) h = F.relu(self.conv1(x)) h = F.relu(self.conv1a(h)) h = F.relu(self.conv1b(h)) h = F.max_pooling_2d(h, 3, stride=2) h = F.relu(self.conv2(h)) h = F.relu(self.conv2a(h)) h = F.relu(self.conv2b(h)) h = F.max_pooling_2d(h, 3, stride=2) h = F.relu(self.conv3(h)) h = F.relu(self.conv3a(h)) h = F.relu(self.conv3b(h)) h = F.max_pooling_2d(h, 3, stride=2) h = F.dropout(h, train=train) h = F.relu(self.conv4(h)) h = F.relu(self.conv4a(h)) h = F.relu(self.conv4b(h)) h = F.reshape(F.average_pooling_2d(h, 6), (x_data.shape[0], 1000)) return F.softmax(h) ### 補足情報(FW/ツールのバージョンなど) ここにより詳細な情報を記載してください。
0 コメント