首页 > 解决方案 > 为 CNN 编写训练模型

问题描述

我正在为双流卷积神经网络TwoStream-IQA编写训练代码。该模型预测通过网络的两个流评估的补丁的质量得分。在下面的训练中,我使用了上面 GitHub 链接中提供的测试数据集。

训练代码如下:

import os
import time
import numpy as np
import argparse

import chainer

chainer.global_config.train=True

from chainer import cuda
from chainer import serializers
from chainer import optimizers
from chainer import iterators
from chainer import training 
from chainer.training import extensions
from PIL import Image
from sklearn.feature_extraction.image import extract_patches

from model import Model

parser = argparse.ArgumentParser(description='train.py')
parser.add_argument('--model', '-m', default='', 
                    help='path to the trained model')
parser.add_argument('--gpu', '-g', default=0, type=int, help='GPU ID')

args = parser.parse_args()


model = Model()

cuda.cudnn_enabled = True
cuda.check_cuda_available()
xp = cuda.cupy
model.to_gpu()

## prepare training data 
test_label_path = 'data_list/test.txt'
test_img_path = 'data/live/'
test_Graimg_path = 'data/live_grad/'
save_model_path = '/models/nr_sana_2stream.model'

patches_per_img = 256
patchSize = 32

print('-------------Load data-------------')
final_train_set = []
with open(test_label_path, 'rt') as f:
    for l in f:
        line, la = l.strip().split()  # for debug

        tic = time.time()
        full_path = os.path.join(test_img_path, line)
        Grafull_path = os.path.join(test_Graimg_path, line)

        inputImage = Image.open(full_path)
        Graf = Image.open(Grafull_path)
        img = np.asarray(inputImage, dtype=np.float32)
        Gra = np.asarray(Graf, dtype=np.float32)
        img = img.transpose(2, 0, 1)
        Gra = Gra.transpose(2, 0, 1)

        img1 = np.zeros((1, 3, Gra.shape[1], Gra.shape[2]))
        img1[0, :, :, :] = img
        Gra1 = np.zeros((1, 3, Gra.shape[1], Gra.shape[2]))
        Gra1[0, :, :, :] = Gra

        patches = extract_patches(img, (3, patchSize, patchSize), patchSize)
        Grapatches = extract_patches(Gra, (3, patchSize, patchSize), patchSize)

        X = patches.reshape((-1, 3, patchSize, patchSize))
        GraX = Grapatches.reshape((-1, 3, patchSize, patchSize))

        temp_slice1 = [X[int(float(index))] for index in range(256)]
        temp_slice2 = [GraX[int(float(index))] for index in range(256)]
        ##############################################  
        for j in range(len(temp_slice1)):
            temp_slice1[j] = xp.array(temp_slice1[j].astype(np.float32))
            temp_slice2[j] = xp.array(temp_slice2[j].astype(np.float32))

            final_train_set.append((
                np.asarray((temp_slice1[j], temp_slice2[j])).astype(np.float32),
                int(la)
                ))      
        ##############################################  
print('--------------Done!----------------')

print('--------------Iterator!----------------')    
train_iter = iterators.SerialIterator(final_train_set, batch_size=4)
optimizer = optimizers.Adam()
optimizer.use_cleargrads()
optimizer.setup(model)

updater = training.StandardUpdater(train_iter, optimizer, device=0)

print('--------------Trainer!----------------') 
trainer = training.Trainer(updater, (50, 'epoch'), out='result')

trainer.extend(extensions.LogReport())

trainer.extend(extensions.PrintReport(['epoch', 'iteration', 'main/loss', 'elapsed_time']))

print('--------------Running trainer!----------------') 
trainer.run()

但是代码在线产生错误trainer.run()

-------------Load data-------------
--------------Done!----------------
--------------Iterator!----------------
--------------Trainer!----------------
--------------Running trainer!----------------
Exception in main training loop: Unsupported dtype object
Traceback (most recent call last):
  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/training/trainer.py", line 316, in run
    update()
  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/training/updaters/standard_updater.py", line 149, in update
    self.update_core()
  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/training/updaters/standard_updater.py", line 154, in update_core
    in_arrays = self.converter(batch, self.device)
  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/dataset/convert.py", line 149, in concat_examples
    return to_device(device, _concat_arrays(batch, padding))
  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/dataset/convert.py", line 37, in to_device
    return cuda.to_gpu(x, device)
  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/backends/cuda.py", line 285, in to_gpu
    return _array_to_gpu(array, device_, stream)
  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/backends/cuda.py", line 333, in _array_to_gpu
    return cupy.asarray(array)
  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/cupy/creation/from_data.py", line 60, in asarray
    return core.array(a, dtype, False)
  File "cupy/core/core.pyx", line 2049, in cupy.core.core.array
  File "cupy/core/core.pyx", line 2083, in cupy.core.core.array
Will finalize trainer extensions and updater before reraising the exception.
Traceback (most recent call last):

  File "<ipython-input-69-12b84b41c6b9>", line 1, in <module>
    runfile('/mnt/nas/sanaalamgeer/Projects/1/MyOwnChainer/Two-stream_IQA-master/train.py', wdir='/mnt/nas/sanaalamgeer/Projects/1/MyOwnChainer/Two-stream_IQA-master')

  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/spyder_kernels/customize/spydercustomize.py", line 668, in runfile
    execfile(filename, namespace)

  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/spyder_kernels/customize/spydercustomize.py", line 108, in execfile
    exec(compile(f.read(), filename, 'exec'), namespace)

  File "/mnt/nas/sanaalamgeer/Projects/1/MyOwnChainer/Two-stream_IQA-master/train.py", line 129, in <module>
    trainer.run()

  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/training/trainer.py", line 330, in run
    six.reraise(*sys.exc_info())

  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/six.py", line 693, in reraise
    raise value

  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/training/trainer.py", line 316, in run
    update()

  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/training/updaters/standard_updater.py", line 149, in update
    self.update_core()

  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/training/updaters/standard_updater.py", line 154, in update_core
    in_arrays = self.converter(batch, self.device)

  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/dataset/convert.py", line 149, in concat_examples
    return to_device(device, _concat_arrays(batch, padding))

  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/dataset/convert.py", line 37, in to_device
    return cuda.to_gpu(x, device)

  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/backends/cuda.py", line 285, in to_gpu
    return _array_to_gpu(array, device_, stream)

  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/chainer/backends/cuda.py", line 333, in _array_to_gpu
    return cupy.asarray(array)

  File "/home/sanaalamgeer/anaconda3/lib/python3.6/site-packages/cupy/creation/from_data.py", line 60, in asarray
    return core.array(a, dtype, False)

  File "cupy/core/core.pyx", line 2049, in cupy.core.core.array

  File "cupy/core/core.pyx", line 2083, in cupy.core.core.array

ValueError: Unsupported dtype object

也许那是因为我安排training data错了,因为模型将训练参数设置为:

length = x_data.shape[0]
x1 = Variable(x_data[0:length:2])
x2 = Variable(x_data[1:length:2])

并且y_data作为:

t = xp.repeat(y_data[0:length:2], 1)

变量final_train_setprepapres 数据集,tuple (Numpy Array, 66)其中每个Numpy Array都有维度(2, 3, 32, 32),其中包含两种类型的补丁(3, 32, 32)

我使用了上面提供的 github 链接中的数据集。我是Chainer的新手,请帮助!

标签: pythonconv-neural-networktraining-datachainer

解决方案


简而言之,您不恰当地称为numpy.asarray:numpy.asarray不连接两个cupy.ndarrays,而是连接两个numpy.ndarrays。

您的代码简介:

import numpy, cupy

final_train_set = []

N_PATCH_PER_IMAGE = 8

for i in range(10):
    label = 0

    temp_slice_1 = [numpy.zeros((3, 3)) for j in range(N_PATCH_PER_IMAGE)]
    temp_slice_2 = [numpy.zeros((3, 3)) for j in range(N_PATCH_PER_IMAGE)]

    for j in range(N_PATCH_PER_IMAGE):
        temp_slice_1[j] = cupy.array(temp_slice_1[j])
        temp_slice_2[j] = cupy.array(temp_slice_2[j])
        final_train_set.append(
            [
                # attempting to concatenate two cupy arrays by numpy.asarray 
                numpy.asarray([temp_slice_1[j], temp_slice_2[j]]),
                label
            ]
        )

错误

import numpy as np
import cupy as cp

print("two numpy arrays")
print(np.asarray([np.zeros(shape=(1,)), np.zeros(shape=(1,))]))
print(np.asarray([np.zeros(shape=(1,)), np.zeros(shape=(1,))]).dtype)

print()

print("two cupy arrays")
print(np.asarray([cp.zeros(shape=(1,)), cp.zeros(shape=(1,))]))
print(np.asarray([cp.zeros(shape=(1,)), cp.zeros(shape=(1,))]).dtype)
two numpy arrays
[[0.]
 [0.]]
float64

two cupy arrays
[[array(0.)]
 [array(0.)]]
object

解决方法:注释掉两行

import numpy  # not import cupy here

for i in range(10):
    label = 0

    temp_slice_1 = [numpy.zeros((3, 3)) for j in range(N_PATCH_PER_IMAGE)]
    temp_slice_2 = [numpy.zeros((3, 3)) for j in range(N_PATCH_PER_IMAGE)]

    for j in range(N_PATCH_PER_IMAGE):
        # temp_slice_1[j] = cupy.array(temp_slice_1[j]) <- comment out!
        # temp_slice_2[j] = cupy.array(temp_slice_2[j]) <- comment out!
        final_train_set.append(
            [
                # concatenate two numpy arrays: usually cupy should not be used in dataset
                numpy.asarray([temp_slice_1[j], temp_slice_2[j]]),
                label
            ]
        )

脚注

  1. 在您提供的代码中,xp未指定,因此您无法从任何人那里得到答案。如果您无法分离问题,请发布您的代码的全部内容,包括模型。

  2. 我猜您可能由于其他原因无法运行训练代码。在这段代码中,数据首先在final_train_set. 但是如果图像的数量很大,主存就会耗尽并被MemoryError提升。(换句话说,如果图像数量少而你的内存足够大,则不会发生错误)在这种情况下,以下参考资料(Chainer at glanceDataset Abstraction)会有所帮助。


推荐阅读