首页 > 解决方案 > R Keras 错误:py_call_impl 中的错误(可调用,dots$args,dots$keywords):StopIteration:

问题描述

我正在尝试学习如何在 Keras 中使用 RNN 执行时间序列预测。我正在关注Francois Chollet 和 JJ Allaire 所著的《深度学习与 R 书》的第 6 章示例。您可以在这篇博文的本章中查看该代码的代码和解释。

任何帮助将不胜感激!

我的会话信息如下:

R version 4.0.5 (2021-03-31)
Platform: x86_64-pc-linux-gnu (64-bit)
Running under: Ubuntu 18.04.5 LTS

Matrix products: default
BLAS:   /usr/lib/x86_64-linux-gnu/atlas/libblas.so.3.10.3
LAPACK: /usr/lib/x86_64-linux-gnu/atlas/liblapack.so.3.10.3

locale:
 [1] LC_CTYPE=C.UTF-8       LC_NUMERIC=C           LC_TIME=C.UTF-8        LC_COLLATE=C.UTF-8    
 [5] LC_MONETARY=C.UTF-8    LC_MESSAGES=C.UTF-8    LC_PAPER=C.UTF-8       LC_NAME=C             
 [9] LC_ADDRESS=C           LC_TELEPHONE=C         LC_MEASUREMENT=C.UTF-8 LC_IDENTIFICATION=C   

attached base packages:
[1] stats     graphics  grDevices utils     datasets  methods   base     

loaded via a namespace (and not attached):
 [1] Rcpp_1.0.7           here_1.0.1           lattice_0.20-41      png_0.1-7           
 [5] rprojroot_2.0.2      zeallot_0.1.0        rappdirs_0.3.3       grid_4.0.5          
 [9] R6_2.5.0             jsonlite_1.7.2       magrittr_2.0.1       tfruns_1.5.0        
[13] whisker_0.4          Matrix_1.3-2         reticulate_1.20-9002 generics_0.1.0      
[17] keras_2.4.0          tools_4.0.5          compiler_4.0.5       base64enc_0.1-3     
[21] tensorflow_2.4.0 

reticulate::py_discover_config("keras") 的结果是这样的:

python:         /home/rstudio-user/.local/share/r-miniconda/envs/r-reticulate/bin/python
libpython:      /home/rstudio-user/.local/share/r-miniconda/envs/r-reticulate/lib/libpython3.6m.so
pythonhome:     /home/rstudio-user/.local/share/r-miniconda/envs/r-reticulate:/home/rstudio-user/.local/share/r-miniconda/envs/r-reticulate
version:        3.6.13 | packaged by conda-forge | (default, Feb 19 2021, 05:36:01)  [GCC 9.3.0]
numpy:          /home/rstudio-user/.local/share/r-miniconda/envs/r-reticulate/lib/python3.6/site-packages/numpy
numpy_version:  1.19.5

尝试通过 fit_generator() 时,我收到以下错误:

Error in py_call_impl(callable, dots$args, dots$keywords) : 
  StopIteration:

13. stop(structure(list(message = "StopIteration: ", call = py_call_impl(callable,      dots$args, dots$keywords), cppstack = structure(list(file = "",      line = -1L, stack = c("/home/rstudio-user/R/x86_64-pc-linux-gnu-library/4.0/reticulate/libs/reticulate.so(Rcpp::exception::exception(char const*, bool)+0x78) [0x7f27979dcb88]",      "/home/rstudio-user/R/x86_64-pc-linux-gnu-library/4.0/reticulate/libs/reticulate.so(Rcpp::stop(std::__cxx11::basic_string<char, std::char_traits<char>, std::allocator<char> > const&)+0x27) [0x7f27979dcbf7]",  ...
12. _peek_and_restore at data_adapter.py#866
11. __init__ at data_adapter.py#809
10. __init__ at data_adapter.py#1166
9. get_data_handler at data_adapter.py#1364
8. fit at training.py#1147
7. (structure(function (...)  {     dots <- py_resolve_dots(list(...))     result <- py_call_impl(callable, dots$args, dots$keywords) ...
6. do.call(object$fit, args)
5. fit.keras.engine.training.Model(object = structure(function (object,      ...)  {     compose_layer(object, x, ...) ...
4.(function (object, ...) 
   {
    UseMethod("fit")
    })(object = structure(function (object, ...)  ... 
3. do.call(fit, args)
2. fit_generator(., train_gen, steps_per_epoch = 500, epochs = 20,    validation_data = val_gen, validation_steps = val_steps)
1. model %>% fit_generator(train_gen, steps_per_epoch = 500, epochs = 20,    validation_data = val_gen, validation_steps = val_steps)

下面是我试图运行的代码:

data <- read_csv("jena_climate_2009_2016.csv")

generator <- function(data, lookback, delay, min_index, max_index,
                      shuffle = FALSE, batch_size = 128, step = 6) {
  if (is.null(max_index)) max_index <- nrow(data) - delay - 1
  i <- min_index + lookback
  function() {
    if (shuffle) {
      rows <- sample(c((min_index+lookback):max_index), size = batch_size)
    } else {
      if (i + batch_size >= max_index)
        i <<- min_index + lookback
      rows <- c(i:min(i+batch_size, max_index))
      i <<- i + length(rows)
    }
    samples <- array(0, dim = c(length(rows),
                                lookback / step,
                                dim(data)[[-1]]))
    targets <- array(0, dim = c(length(rows)))
    for (j in 1:length(rows)) {
      indices <- seq(rows[[j]] - lookback, rows[[j]],
                     length.out = dim(samples)[[2]])
      samples[j,,] <- data[indices,]
      targets[[j]] <- data[rows[[j]] + delay,2]
    }
    list(samples, targets)
  }
}

# Now, lets use the abstract generator function to instantiate three generators: one for training, one for validation, and one for testing. 
# Each will look at different temporal segments of the original data: 
# the training generator looks at the first 200,000 timesteps
# the validation generator looks at the following 100,000
# the test generator looks at the remainder.

lookback <- 1440
step <- 6
delay <- 144
batch_size <- 128

train_gen <- generator(
  data,
  lookback = lookback,
  delay = delay,
  min_index = 1,
  max_index = 200000,
  shuffle = TRUE,
  step = step,
  batch_size = batch_size
)

val_gen = generator(
  data,
  lookback = lookback,
  delay = delay,
  min_index = 200001,
  max_index = 300000,
  step = step,
  batch_size = batch_size
)

test_gen <- generator(
  data,
  lookback = lookback,
  delay = delay,
  min_index = 300001,
  max_index = NULL,
  step = step,
  batch_size = batch_size
)

val_steps <- (300000 - 200001 - lookback) / batch_size      # How many steps to draw from val_gen in order to see the entire validation set
test_steps <- (nrow(data) - 300001 - lookback) / batch_size # How many steps to draw from test_gen in order to see the entire test set

# Define model and fit generator 

model <- keras_model_sequential() %>%
  layer_gru(units = 32, recurrent_activation = "sigmoid",
            reset_after = TRUE, # to get rid of error: WARNING:tensorflow:Layer gru_3 will not use cuDNN kernels since it doesn't meet the criteria. It will use a generic GPU kernel as fallback when running on GPU.
                                # as described in https://www.tensorflow.org/api_docs/python/tf/keras/layers/GRU#used-in-the-notebooks_1
            input_shape = list(NULL, dim(data)[[-1]])) %>%
  layer_dense(units = 1)

model %>% compile(
  optimizer = optimizer_rmsprop(),
  loss = "mae"
)

model %>% fit_generator(
  train_gen,
  steps_per_epoch = 500,
  epochs = 20,
  validation_data = val_gen,
  validation_steps = val_steps
)

plot(history)

标签: rtensorflowkerastime-seriesrecurrent-neural-network

解决方案


推荐阅读