首页 > 解决方案 > IndexError:索引 2992 超出轴 0 的范围,大小为 2992

问题描述

这是我在 keras 中的代码

batch_size = 16
num_classes = 4
epochs = 30
frames = 5 # The number of frames for each sequence
input_shape = [100 , 100, 3]

def build_rgb_model2():
    model = Sequential()

    model.add(TimeDistributed(Conv2D(32, (3, 3), padding='same'), input_shape=(frames, 120, 180, 3)))
    model.add(TimeDistributed(Activation('relu')))
    model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2))))
    model.add(TimeDistributed(Conv2D(32, (3, 3))))
    model.add(TimeDistributed(Activation('relu')))
    model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2))))
    model.add(TimeDistributed(Conv2D(32, (3, 3))))
    model.add(TimeDistributed(Activation('relu')))
    model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2))))
    model.add(TimeDistributed(Conv2D(32, (3, 3))))
    model.add(TimeDistributed(Activation('relu')))
    model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2))))
    model.add(TimeDistributed(Dropout(0.25)))

    model.add(TimeDistributed(Flatten()))
    model.add(TimeDistributed(Dense(512)))

    model.add(TimeDistributed(Dense(32, name="first_dense_rgb")))

    model.add(LSTM(20, return_sequences=True, name="lstm_layer_rgb"));

    model.add(TimeDistributed(Dense(num_classes), name="time_distr_dense_one_rgb"))
    model.add(GlobalAveragePooling1D(name="global_avg_rgb"))

    return model


def build_rgb_model():
    model = Sequential()
    model.add(TimeDistributed(Conv2D(64, (3, 3), padding='same', activation='relu' ), input_shape=(frames, 120, 180, 3)))


#model.add(TimeDistributed(Activation('relu')))

   # model.add(TimeDistributed(Conv2D(64, (3, 3), activation='relu', padding='same', input_shape=(frames, 120, 180, 3))))
    model.add(TimeDistributed(Conv2D(64, (3, 3), activation='relu', padding='same')))
    model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))

    model.add(TimeDistributed(Conv2D(128, (3, 3), activation='relu', padding='same')))
    model.add(TimeDistributed(Conv2D(128, (3, 3), activation='relu', padding='same')))
    model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))

    model.add(TimeDistributed(Conv2D(256, (3, 3), activation='relu', padding='same')))
    model.add(TimeDistributed(Conv2D(256, (3, 3), activation='relu', padding='same')))
    model.add(TimeDistributed(Conv2D(256, (3, 3), activation='relu', padding='same')))
    model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))

    model.add(TimeDistributed(Conv2D(512, (3, 3), activation='relu', padding='same')))
    model.add(TimeDistributed(Conv2D(512, (3, 3), activation='relu', padding='same')))
    model.add(TimeDistributed(Conv2D(512, (3, 3), activation='relu', padding='same')))
    model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))

    model.add(TimeDistributed(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')))
    model.add(TimeDistributed(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')))
    model.add(TimeDistributed(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')))
    model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))

    model.add(TimeDistributed(Dropout(0.25)))
    model.add(TimeDistributed(Flatten()))
    model.add(TimeDistributed(Dense(1024)))

    model.add(TimeDistributed(Dense(32, name="first_dense_rgb")))

    model.add(LSTM(20, return_sequences=True, name="lstm_layer_rgb"));

    model.add(TimeDistributed(Dense(num_classes), name="time_distr_dense_one_rgb"))
    model.add(GlobalAveragePooling1D(name="global_avg_rgb"))

    return model


def build_flow_model():
    model = Sequential()
    model.add(TimeDistributed(Conv2D(64, (3, 3), padding='same',activation='relu'), input_shape=(frames, 120, 180, 2)))
   # model.add(TimeDistributed(Activation('relu')))

    #model.add(TimeDistributed(Conv2D(64, (3, 3), activation='relu', padding='same', input_shape=(frames, 120, 180, 2))))
    model.add(TimeDistributed(Conv2D(64, (3, 3), activation='relu', padding='same')))
    model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))

    model.add(TimeDistributed(Conv2D(128, (3, 3), activation='relu', padding='same')))
    model.add(TimeDistributed(Conv2D(128, (3, 3), activation='relu', padding='same')))
    model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))

    model.add(TimeDistributed(Conv2D(256, (3, 3), activation='relu', padding='same')))
    model.add(TimeDistributed(Conv2D(256, (3, 3), activation='relu', padding='same')))
    model.add(TimeDistributed(Conv2D(256, (3, 3), activation='relu', padding='same')))
    model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))

    model.add(TimeDistributed(Conv2D(512, (3, 3), activation='relu', padding='same')))
    model.add(TimeDistributed(Conv2D(512, (3, 3), activation='relu', padding='same')))
    model.add(TimeDistributed(Conv2D(512, (3, 3), activation='relu', padding='same')))
    model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))

    model.add(TimeDistributed(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')))
    model.add(TimeDistributed(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')))
    model.add(TimeDistributed(Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')))
    model.add(TimeDistributed(MaxPooling2D((2, 2), strides=(2, 2))))

    model.add(TimeDistributed(Dropout(0.25)))
    model.add(TimeDistributed(Flatten()))
    model.add(TimeDistributed(Dense(1024)))

    model.add(TimeDistributed(Dense(32, name="first_dense_flow")))

    model.add(LSTM(20, return_sequences=True, name="lstm_layer_flow"));

    model.add(TimeDistributed(Dense(num_classes), name="time_distr_dense_one_flow"))
    model.add(GlobalAveragePooling1D(name="global_avg_flow"))

    return model


def build_model():
    rgb_model = build_rgb_model()
    flow_model = build_flow_model()



    out=average([rgb_model.output, flow_model.output])
    model=Model([rgb_model.input,flow_model.input], out)

    #model.add(add([rgb_model, flow_model]))


    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    plot_model(model, to_file='model/cnn_lstm.png')

    return model


def batch_iter(split_file):
    split_data = np.genfromtxt(split_file, dtype=None, delimiter=",")
    total_seq_num = len(split_data)

    ADRi = "/UCF3"
    split_data2 = np.genfromtxt("C.txt", dtype=None, delimiter=",")
    num_batches_per_epoch = int(((int(split_data2[4])-1)/frames - 1) / batch_size)-300
    total_frames=int(split_data2[4])-1
    listing = sorted(os.listdir(ADRi))
    indices2=[]

    def data_generator():
        p = 0
        while 1:
            indices = np.random.permutation(np.arange(total_seq_num))
            t=0
            for j in range(total_seq_num):
                for k in range(int(split_data[j][1]/frames)):
                    indices2.append (j*1000+k*frames)
                    t=t+1
            indices3 = np.random.permutation(np.arange(indices2.__len__()))
            for batch_num in range(num_batches_per_epoch): # for each batch
                start_index = batch_num * batch_size
                end_index = ((batch_num + 1) * batch_size) -1

                RGB = []
                FLOW = []
                Y = []
                for i in range(start_index, end_index): # for each sequence
                    ii=int(indices3[i]/1000) # seqnumber
                    image_dir = split_data[indices[ii]][0].decode("UTF-8")
                    seq_len = int(split_data[indices[ii]][1])
                    y = int(split_data[indices[ii]][2])

                    # To reduce the computational time, data augmentation is performed for each frame
                    jj= min( int(indices3[i]/1000), seq_len-frames-1)
                    augs_rgb = []
                    augs_flow = []
                    for j in range(jj,jj+frames): # for each frame
                        # Get frames at regular interval. start from frame index 1
                        frame = j
                        hf = h5py.File(image_dir+".h5", 'r')
                        # rgb image
                        im = hf.get(str(frame))
                        #rgb_i = load_img("%s/img_%05d.jpg" % (image_dir, frame), target_size=(224, 224))
                        rgb =  im[:, :, :, (3, 5, 7)].transpose(0,3,1,2)
                        #img_gen = ImageDataGenerator(horizontal_flip=True)
                         #= img_gen.apply_transform(rgb)
                        #img = Image.fromarray(rgb)
                        #rgb_flip_i=img.transpose(Image.FLIP_LEFT_RIGHT) # augmentation
                        rgb_flip = np.flip(rgb,2)
                        #t=np.append(rgb, rgb_flip,axis=0)
                        t=np.concatenate([rgb],axis=0)
                        augs_rgb.append(t)

                        # flow image
                        flow_x=im[:, :, :, 1]
                        flow_y = im[:, :, :, 2]

                        flow_x_flip = - np.flip(flow_x,2) # augmentation
                        flow_y_flip =   np.flip(flow_y,2) # augmentation

                        flow = np.concatenate([flow_x, flow_y], axis=0)
                        flow_flip = np.concatenate([flow_x_flip, flow_y_flip], axis=0)
                        #tt=np.concatenate([flow[None,:,:,:], flow_flip[None,:,:,:]], axis=0)
                        tt = np.concatenate([flow[None, :, :, :]], axis=0)
                        augs_flow.append(tt)

                    augs_rgb = np.array(augs_rgb).transpose((1, 0, 3, 4, 2))
                    augs_flow = np.array(augs_flow).transpose((1, 0, 3, 4, 2))
                    RGB.extend(augs_rgb)
                    FLOW.extend(augs_flow)
                    Y.extend([y])

                RGB1 = np.array(RGB)
                FLOW1 = np.array(FLOW)
                Y1 = np_utils.to_categorical(Y, num_classes)
                p=p+1

                yield ([RGB1, FLOW1], Y1)

    return num_batches_per_epoch, data_generator()




if __name__ == "__main__":

    # Build model
    model = build_model()
    model.summary()
    print("Built model")

    # Make batches
    train_steps, train_batches = batch_iter(train_split_file)
    valid_steps, valid_batches = batch_iter(test_split_file)

    # Train model
    history = model.fit_generator(train_batches, steps_per_epoch=train_steps,
                epochs=10, verbose=1, validation_data=valid_batches,
                validation_steps=valid_steps)
    plot_history(history)
    print("Trained model")

当我将 train_steps 设置为 100 时,它可以工作。当我在上次运行中设置真实样本(462)时出现此错误

461/462 [============================>.] - ETA:2s - 损失:6.6250 - acc:0.4111Traceback(最最近通话最后):Two.py”,第 332 行,在 validation_steps=valid_steps)文件“/home/PycharmProjects/test2/local/lib/python2.7/site-packages/keras/legacy/interfaces.py”,第 91 行,在包装返回 func(*args, **kwargs) 文件“/home/PycharmProjects/test2/local/lib/python2.7/site-packages/keras/engine/training.py”,第 1418 行,在 fit_generator initial_epoch= initial_epoch)文件“/home/PycharmProjects/test2/local/lib/python2.7/site-packages/keras/engine/training_generator.py”,第 234 行,在 fit_generator workers=0)文件“/home/PycharmProjects/test2/ local/lib/python2.7/site-packages/keras/legacy/interfaces.py”,第 91 行,在包装器中返回 func(*args, **kwargs) 文件“/home/PycharmProjects/test2/local/lib/python2.7/site-packages/keras/engine/training.py”,第 1472 行,在 evaluate_generator verbose=verbose ) 文件“/home/PycharmProjects/test2/local/lib/python2.7/site-packages/keras/engine/training_generator.py”,第 330 行,在评估生成器 generator_output = next(output_generator) 文件“/home/PycharmProjects/test2 /local/lib/python2.7/site-packages/keras/utils/data_utils.py”,第 709 行,在获取 6.reraise(*sys.exc_info()) 文件“/home/PycharmProjects/test2/local/lib /python2.7/site-packages/keras/utils/data_utils.py”,第 685 行,在获取输入 = self.queue.get(block=True).get() 文件“/usr/lib/python2.7/多处理/pool.py",第 567 行,在 get raise self._value IndexError: index 2992 is out of bounds for axis 0 with size 2992

我没有任何2992的prometric值!

标签: pythontensorflowkerasdeep-learning

解决方案


python中的数组从0开始,所以如果有2992个元素,最后一个是2991。


推荐阅读