TypeError: invalid file: None

前提

ここに質問の内容を詳しく書いてください。
(例)
Chainerでの機械学習の実装中に以下のエラーメッセージが発生しました。初歩的な問題だと思いますがどうかよろしくお願いします。

エラーメッセージ

File "train_imagenet.py", line 96, in <module> train_list = load_image_list(args.train, args.root) File "train_imagenet.py", line 89, in load_image_list for line in open(path): TypeError: invalid file: None ### 該当のソースコード ```ここに言語名を入力 Python ソースコード

#!/usr/bin/env python
"""Example code of learning a large scale convnet from ILSVRC2012 dataset.

Prerequisite: To run this example, crop the center of ILSVRC2012 training and
validation images and scale them to 256x256, and make two lists of space-
separated CSV whose first column is full path to image and second column is
zero-origin label (this format is same as that used by Caffe's ImageDataLayer).

"""
from future import print_function
import argparse
import datetime
import json
import multiprocessing
import os
import random
import sys
import threading
import time

import numpy as np
from PIL import Image
import six
import six.moves.cPickle as pickle
from six.moves import queue

import chainer
from chainer import serializers
from chainer import computational_graph
from chainer import cuda
from chainer import optimizers
from chainer import serializers

#追加
import alex
import googlenet
import googlenetbn
import nin

parser = argparse.ArgumentParser(description='Learning convnet from ILSVRC2012 dataset')

parser.add_argument('--train', help='Path to training image-label list file')
parser.add_argument('--val', help='Path to validation image-label list file')

parser.add_argument('--mean', '-m', default='mean.npy',
help='Path to the mean file (computed by compute_mean.py)')

parser.add_argument('--arch', '-a', default='nin',
help='Convnet architecture
(nin, alex, alexbn, googlenet, googlenetbn)')
parser.add_argument('--batchsize', '-B', type=int, default=32,
help='Learning minibatch size')
parser.add_argument('--val_batchsize', '-b', type=int, default=250,
help='Validation minibatch size')
parser.add_argument('--epoch', '-E', default=50, type=int,
help='Number of epochs to learn')
parser.add_argument('--gpu', '-g', default=-1, type=int,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--loaderjob', '-j', default=20, type=int,
help='Number of parallel data loading processes')
parser.add_argument('--root', '-r', default='.',
help='Root directory path of image files')
parser.add_argument('--out', '-o', default='model',
help='Path to save model on each validation')
parser.add_argument('--outstate', '-s', default='state',
help='Path to save optimizer state on each validation')
parser.add_argument('--initmodel', default='',
help='Initialize the model from given file')
parser.add_argument('--resume', default='',
help='Resume the optimization from snapshot')
parser.add_argument('--test', dest='test', action='store_true')
parser.set_defaults(test=False)
args = parser.parse_args(args=[])
if args.gpu >= 0:
cuda.check_cuda_available()
xp = cuda.cupy if args.gpu >= 0 else np

assert 50000 % args.val_batchsize == 0

if args.test:
denominator = 1
else:
denominator = 100000

def load_image_list(path, root):
tuples = []
for line in open(path):
pair = line.strip().split()
tuples.append((os.path.join(root, pair[0]), np.int32(pair[1])))
return tuples

train_list = load_image_list(args.train, args.root)

val_list = load_image_list(args.val, args.root)

mean_image = np.load(args.mean)

if args.arch == 'nin':
import nin
model = nin.NIN()
elif args.arch == 'i2vvgg':
import i2vvgg
model = i2vvgg.i2vVGG()
elif args.arch == 'alex':
import alex
model = alex.Alex()
elif args.arch == 'alexbn':
import alexbn
model = alexbn.AlexBN()
elif args.arch == 'googlenet':
import googlenet
model = googlenet.GoogLeNet()
elif args.arch == 'googlenetbn':
import googlenetbn
model = googlenetbn.GoogLeNetBN()
else:
raise ValueError('Invalid architecture name')

if args.gpu >= 0:
cuda.get_device(args.gpu).use()
model.to_gpu()

optimizer = optimizers.MomentumSGD(lr=0.01, momentum=0.9)
optimizer.setup(model)

if args.initmodel:
print('Load model from', args.initmodel)
serializers.load_npz(args.initmodel, model)
if args.resume:
print('Load optimizer state from', args.resume)
serializers.load_npz(args.resume, optimizer)

data_q = queue.Queue(maxsize=1)
res_q = queue.Queue()

cropwidth = 256 - model.insize

def read_image(path, center=False, flip=False):
# Data loading routine
image = np.asarray(Image.open(path)).transpose(2, 0, 1)
if center:
top = left = cropwidth // 2
else:
top = random.randint(0, cropwidth - 1)
left = random.randint(0, cropwidth - 1)
bottom = model.insize + top
right = model.insize + left

image = image[:, top:bottom, left:right].astype(np.float32) image -= mean_image[:, top:bottom, left:right] image /= 255 if flip and random.randint(0, 1) == 0: return image[:, :, ::-1] else: return image

def feed_data():
# Data feeder
i = 0
count = 0

x_batch = np.ndarray( (args.batchsize, 3, model.insize, model.insize), dtype=np.float32) y_batch = np.ndarray((args.batchsize,), dtype=np.float32) val_x_batch = np.ndarray( (args.val_batchsize, 3, model.insize, model.insize), dtype=np.float32) val_y_batch = np.ndarray((args.val_batchsize,), dtype=np.float32) batch_pool = [None] * args.batchsize val_batch_pool = [None] * args.val_batchsize pool = multiprocessing.Pool(args.loaderjob) data_q.put('train') for epoch in six.moves.range(1, 1 + args.epoch): print('epoch', epoch, file=sys.stderr) print('learning rate', optimizer.lr, file=sys.stderr) perm = np.random.permutation(len(train_list)) for idx in perm: path, label = train_list[idx] batch_pool[i] = pool.apply_async(read_image, (path, False, True)) y_batch[i] = label i += 1 if i == args.batchsize: for j, x in enumerate(batch_pool): x_batch[j] = x.get() data_q.put((x_batch.copy(), y_batch.copy())) i = 0 count += 1 if count % denominator == 0: data_q.put('val') j = 0 for path, label in val_list: val_batch_pool[j] = pool.apply_async( read_image, (path, True, False)) val_y_batch[j] = label j += 1 if j == args.val_batchsize: for k, x in enumerate(val_batch_pool): val_x_batch[k] = x.get() data_q.put((val_x_batch.copy(), val_y_batch.copy())) j = 0 data_q.put('train') optimizer.lr *= 0.97 pool.close() pool.join() data_q.put('end')

コメントを投稿

0 コメント