首页 > 解决方案 > 如何让 Kera 使用超过 1 个核心?

问题描述

我读过 keras 自动支持 2.2.4+ 的多个内核,但我的工作只作为单线程运行

这是我的代码片段

import numpy as np
import tensorflow as tf
from tensorflow import keras
epochs_ = 1000
batch_size_ = 150

np.random.seed(42)
tf.random.set_seed(42)

from keras.layers import Dense, SimpleRNN, GRU, LSTM
from keras.optimizers import SGD

#simple RNN
data_ = Lagged_Set

model6 = keras.models.Sequential([
    keras.layers.SimpleRNN(32, return_sequences=True, input_shape=[None, len(data_.columns)]),
    keras.layers.SimpleRNN(32, return_sequences=True, input_shape=[None, len(data_.columns)]),
    keras.layers.SimpleRNN(32, return_sequences=True, input_shape=[None, len(data_.columns)]),
    keras.layers.SimpleRNN(32, return_sequences=True, input_shape=[None, len(data_.columns)]),
    keras.layers.SimpleRNN(32, return_sequences=True, input_shape=[None, len(data_.columns)]),
    keras.layers.SimpleRNN(32, return_sequences=True, input_shape=[None, len(data_.columns)]),
    keras.layers.SimpleRNN(32, return_sequences=True),
    keras.layers.TimeDistributed(keras.layers.Dense(n_ahead))
])

model6.compile(loss="MAPE", optimizer="rmsprop",metrics=['MAPE'])
history = model6.fit(X_train, Y_train, epochs=epochs_,batch_size=batch_size_,validation_data=(X_valid, Y_valid))

我试过这个什么都没做

session_conf = tf.compat.v1.ConfigProto(intra_op_parallelism_threads=8, inter_op_parallelism_threads=8)

#tf.compat.v1.ConfigProto.set_random_seed(1)
sess = tf.compat.v1.Session(graph=tf.compat.v1.get_default_graph(), config=session_conf)
tf.compat.v1.keras.backend.set_session(sess)

标签: kerasmultiprocessing

解决方案


推荐阅读