首页 > 解决方案 > [Recurrent Neural Networks Tensorflow]: ValueError: setting an array element with a sequence

问题描述

我知道有一些相关的问题,但我仍然无法解决我遇到的问题。我想训练 LSTM 进行情感分析,这是我的代码:

from sklearn.model_selection import train_test_split

train_x, test_x, train_y, test_y = train_test_split(dataset['reviewText'], dataset['label'], test_size = 0.1)

train_x = np.array(train_x)
train_y = np.array(train_y)
test_x = np.array(test_x)
test_y = np.array(test_y)

import tensorflow as tf

batch_size = 512
time_step = max_tokens

num_layers = 4
lstm_size = 256
learning_rate = 0.01

n_words = len(vocabulary)# Add 1 for 0 added to vocab

# Create the graph object
tf.reset_default_graph()

with tf.name_scope('inputs'):
    inputs_ = tf.placeholder(tf.int32, [None, None], name="inputs")
    labels_ = tf.placeholder(tf.int32, [None, None], name="labels")
    keep_prob = tf.placeholder(tf.float64, name="keep_prob")

with tf.name_scope("Embeddings"):
    embed = tf.nn.embedding_lookup(embedding_matrix, inputs_)


def lstm_cell():
    lstm = tf.contrib.rnn.BasicLSTMCell(lstm_size, reuse=tf.get_variable_scope().reuse)
    return tf.contrib.rnn.DropoutWrapper(lstm, output_keep_prob=keep_prob)

with tf.name_scope("RNN_layers"):

    cell = tf.contrib.rnn.MultiRNNCell([lstm_cell() for _ in range(num_layers)])
    initial_state = cell.zero_state(batch_size, tf.float64)

with tf.name_scope("RNN_forward"):
    outputs, final_state = tf.nn.dynamic_rnn(cell, embed, initial_state=initial_state)

with tf.name_scope('predictions'):
    predictions = tf.contrib.layers.fully_connected(outputs[:, -1], 1, activation_fn=tf.sigmoid)
    tf.summary.histogram('predictions', predictions)

with tf.name_scope('cost'):
    cost = tf.losses.mean_squared_error(labels_, predictions)
    tf.summary.scalar('cost', cost)

with tf.name_scope('train'):
    optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)

merged = tf.summary.merge_all()


def get_batches(x, y, batch_size):

    n_batches = len(x)//batch_size
    x, y = x[:n_batches*batch_size], y[:n_batches*batch_size]
    for ii in range(0, len(x), batch_size):
        yield x[ii:ii+batch_size], y[ii:ii+batch_size]

epochs = 10

saver = tf.train.Saver()

with tf.Session() as sess:

    sess.run(tf.global_variables_initializer())
    train_writer = tf.summary.FileWriter('./logs/tb/train', sess.graph)
    test_writer = tf.summary.FileWriter('./logs/tb/test', sess.graph)
    iteration = 1

    for e in range(epochs):

        state = sess.run(initial_state)

        for ii, (x, y) in enumerate(get_batches(train_x, train_y, batch_size), 1):

            feed = {inputs_: x,
                    labels_: y[:, None],
                    keep_prob: 0.5,
                    initial_state: state}
            print(type(merged))
            print(type(cost))
            print(type(final_state))
            summary, loss, state, _ = sess.run([merged, cost, final_state, optimizer], feed_dict=feed)

            train_writer.add_summary(summary, iteration)

            if iteration%5==0:
                print("Epoch: {}/{}".format(e, epochs),
                      "Iteration: {}".format(iteration),
                      "Train loss: {:.3f}".format(loss))

            iteration +=1
            test_writer.add_summary(summary, iteration)
            saver.save(sess, "checkpoints/sentiment_manish.ckpt")

    saver.save(sess, "checkpoints/sentiment_manish.ckpt")

问题是,每当我运行代码时,这行: summary, loss, state, _ = sess.run([merged, cost, final_state, optimizer], feed_dict=feed)给我我在问题标题中陈述的错误。

编辑:

在这里,我为您打印了这些变量的类型 + 值:

<class 'tensorflow.python.framework.ops.Tensor'>
Tensor("Merge/MergeSummary:0", shape=(), dtype=string)

<class 'tensorflow.python.framework.ops.Tensor'>
Tensor("cost/mean_squared_error/value:0", shape=(), dtype=float32)

<class 'tuple'>
(LSTMStateTuple(c=<tf.Tensor 'RNN_forward/rnn/while/Exit_3:0' shape=(512, 256) dtype=float64>, h=<tf.Tensor 'RNN_forward/rnn/while/Exit_4:0' shape=(512, 256) dtype=float64>), LSTMStateTuple(c=<tf.Tensor 'RNN_forward/rnn/while/Exit_5:0' shape=(512, 256) dtype=float64>, h=<tf.Tensor 'RNN_forward/rnn/while/Exit_6:0' shape=(512, 256) dtype=float64>), LSTMStateTuple(c=<tf.Tensor 'RNN_forward/rnn/while/Exit_7:0' shape=(512, 256) dtype=float64>, h=<tf.Tensor 'RNN_forward/rnn/while/Exit_8:0' shape=(512, 256) dtype=float64>), LSTMStateTuple(c=<tf.Tensor 'RNN_forward/rnn/while/Exit_9:0' shape=(512, 256) dtype=float64>, h=<tf.Tensor 'RNN_forward/rnn/while/Exit_10:0' shape=(512, 256) dtype=float64>))

我不确定我到底做错了什么,我认为它有一个问题final_state是元组。如果有人知道发生了什么,我将不胜感激。

提前致谢!

标签: pythontensorflowneural-networkrecurrent-neural-network

解决方案


推荐阅读