首页 > 解决方案 > MacM1:无法使用 ImageDataGenerator。需要 Scipy,但已安装

问题描述

这是我第一次在这个网站上发布东西,所以我很抱歉我可能做的任何错误!我将尝试对问题进行简短但有效的描述。

我正在使用 Tensorflow 训练神经网络来对石头剪刀布图像进行分类。在我的模型中,我使用的是ImageDataGenerator. 当出现错误时我尝试拟合模型时会"ImportError: Image transformations require SciPy. Install SciPy"出现问题。我检查了我的虚拟环境,并且该scipy软件包已经安装。我试图卸载它并重新安装它,但问题仍然存在。我创建了一个新的虚拟环境,但没有任何改变。

我正在使用 MacBook Pro M1 16GB。操作系统为 MacOS BigSur 11.6。我按照本网站中的说明安装了 Tensorflow:https ://developer.apple.com/metal/tensorflow-plugin/

Python 版本:3.8.11,Tensorflow 版本:2.5.0。

这是代码:

import os
import zipfile

local_zip = './rps.zip'
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('tmp/rps-train')
zip_ref.close()

local_zip = './rps-test-set.zip'
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('tmp/rps-test')
zip_ref.close()

base_dir = 'tmp/rps-train/rps'

rock_dir = os.path.join(base_dir, 'rock')
paper_dir = os.path.join(base_dir, 'paper')
scissors_dir = os.path.join(base_dir, 'scissors')

rock_files = os.listdir(rock_dir)
paper_files = os.listdir(paper_dir)
scissors_files = os.listdir(scissors_dir)

import tensorflow as tf
import keras_preprocessing
from keras_preprocessing import image
from keras_preprocessing.image import ImageDataGenerator

TRAINING_DIR = 'tmp/rps-train/rps'
training_datagen = ImageDataGenerator(rescale=1./255,
                                      rotation_range=40, 
                                      width_shift_range=0.2,
                                      height_shift_range=0.2,
                                      horizontal_flip=True
)

VALIDATION_DIR = 'tmp/rps-test/rps-test-set'
validation_datagen = ImageDataGenerator(rescale=1./255)

train_generator = training_datagen.flow_from_directory(
    TRAINING_DIR,
    target_size=(150,150),
    class_mode='categorical',
    batch_size=126
)

validation_generator = validation_datagen.flow_from_directory(
    VALIDATION_DIR,
    target_size=(150,150),
    class_mode='categorical',
    batch_size=126
)

model = tf.keras.models.Sequential([
    # This is the first convolution
    tf.keras.layers.Conv2D(64, (3,3), activation='relu', input_shape=(150, 150, 3)),
    tf.keras.layers.MaxPooling2D(2, 2),
    # The second convolution
    tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
    tf.keras.layers.MaxPooling2D(2,2),
    # The third convolution
    tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
    tf.keras.layers.MaxPooling2D(2,2),
    # The fourth convolution
    tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
    tf.keras.layers.MaxPooling2D(2,2),
    # Flatten the results to feed into a DNN
    tf.keras.layers.Flatten(),
    tf.keras.layers.Dropout(0.5),
    # 512 neuron hidden layer
    tf.keras.layers.Dense(512, activation='relu'),
    tf.keras.layers.Dense(3, activation='softmax')
])

model.summary()

model.compile(optimizer=tf.optimizers.RMSprop(learning_rate=0.001),
              loss=tf.metrics.categorical_crossentropy,
              metrics=['accuracy'])

当我运行这行代码时发生错误:

model.fit_generator(train_generator,
                    epochs=3)

错误是:

    ImportError                               Traceback (most recent call last)
/var/folders/j4/flhsd8lj4z7g689p_y84tfjh0000gn/T/ipykernel_89314/1416594977.py in <module>
      1 import scipy
      2 
----> 3 model.fit_generator(train_generator,
      4                     epochs=3)

~/miniforge3/envs/python3.8/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, validation_freq, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
   1941                   'will be removed in a future version. '
   1942                   'Please use `Model.fit`, which supports generators.')
-> 1943     return self.fit(
   1944         generator,
   1945         steps_per_epoch=steps_per_epoch,

~/miniforge3/envs/python3.8/lib/python3.8/site-packages/tensorflow/python/keras/engine/training.py in fit(self, x, y, batch_size, epochs, verbose, callbacks, validation_split, validation_data, shuffle, class_weight, sample_weight, initial_epoch, steps_per_epoch, validation_steps, validation_batch_size, validation_freq, max_queue_size, workers, use_multiprocessing)
   1131          training_utils.RespectCompiledTrainableState(self):
   1132       # Creates a `tf.data.Dataset` and handles batch and epoch iteration.
-> 1133       data_handler = data_adapter.get_data_handler(
   1134           x=x,
   1135           y=y,

~/miniforge3/envs/python3.8/lib/python3.8/site-packages/tensorflow/python/keras/engine/data_adapter.py in get_data_handler(*args, **kwargs)
   1362   if getattr(kwargs["model"], "_cluster_coordinator", None):
   1363     return _ClusterCoordinatorDataHandler(*args, **kwargs)
-> 1364   return DataHandler(*args, **kwargs)
   1365 
   1366 

~/miniforge3/envs/python3.8/lib/python3.8/site-packages/tensorflow/python/keras/engine/data_adapter.py in __init__(self, x, y, sample_weight, batch_size, steps_per_epoch, initial_epoch, epochs, shuffle, class_weight, max_queue_size, workers, use_multiprocessing, model, steps_per_execution, distribute)
   1152     adapter_cls = select_data_adapter(x, y)
   1153     self._verify_data_adapter_compatibility(adapter_cls)
-> 1154     self._adapter = adapter_cls(
   1155         x,
   1156         y,

~/miniforge3/envs/python3.8/lib/python3.8/site-packages/tensorflow/python/keras/engine/data_adapter.py in __init__(self, x, y, sample_weights, shuffle, workers, use_multiprocessing, max_queue_size, model, **kwargs)
    930     self._keras_sequence = x
    931     self._enqueuer = None
--> 932     super(KerasSequenceAdapter, self).__init__(
    933         x,
    934         shuffle=False,  # Shuffle is handed in the _make_callable override.

~/miniforge3/envs/python3.8/lib/python3.8/site-packages/tensorflow/python/keras/engine/data_adapter.py in __init__(self, x, y, sample_weights, workers, use_multiprocessing, max_queue_size, model, **kwargs)
    807     # Since we have to know the dtype of the python generator when we build the
    808     # dataset, we have to look at a batch to infer the structure.
--> 809     peek, x = self._peek_and_restore(x)
    810     peek = self._standardize_batch(peek)
    811     peek = _process_tensorlike(peek)

~/miniforge3/envs/python3.8/lib/python3.8/site-packages/tensorflow/python/keras/engine/data_adapter.py in _peek_and_restore(x)
    941   @staticmethod
    942   def _peek_and_restore(x):
--> 943     return x[0], x
    944 
    945   def _handle_multiprocessing(self, x, workers, use_multiprocessing,

~/miniforge3/envs/python3.8/lib/python3.8/site-packages/keras_preprocessing/image/iterator.py in __getitem__(self, idx)
     63         index_array = self.index_array[self.batch_size * idx:
     64                                        self.batch_size * (idx + 1)]
---> 65         return self._get_batches_of_transformed_samples(index_array)
     66 
     67     def __len__(self):

~/miniforge3/envs/python3.8/lib/python3.8/site-packages/keras_preprocessing/image/iterator.py in _get_batches_of_transformed_samples(self, index_array)
    236             if self.image_data_generator:
    237                 params = self.image_data_generator.get_random_transform(x.shape)
--> 238                 x = self.image_data_generator.apply_transform(x, params)
    239                 x = self.image_data_generator.standardize(x)
    240             batch_x[i] = x

~/miniforge3/envs/python3.8/lib/python3.8/site-packages/keras_preprocessing/image/image_data_generator.py in apply_transform(self, x, transform_parameters)
    861         img_channel_axis = self.channel_axis - 1
    862 
--> 863         x = apply_affine_transform(x, transform_parameters.get('theta', 0),
    864                                    transform_parameters.get('tx', 0),
    865                                    transform_parameters.get('ty', 0),

~/miniforge3/envs/python3.8/lib/python3.8/site-packages/keras_preprocessing/image/affine_transformations.py in apply_affine_transform(x, theta, tx, ty, shear, zx, zy, row_axis, col_axis, channel_axis, fill_mode, cval, order)
    279     """
    280     if scipy is None:
--> 281         raise ImportError('Image transformations require SciPy. '
    282                           'Install SciPy.')
    283     transform_matrix = None

ImportError: Image transformations require SciPy. Install SciPy.

标签: pythontensorflowscipyapple-m1

解决方案


欢迎来到社区!错误是明确的,您应该import scipy. 请按照SciPy 文档了解更多详细信息。

因此,您的代码应如下所示:

import os
import zipfile
import scipy

local_zip = './rps.zip'
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('tmp/rps-train')
zip_ref.close()

local_zip = './rps-test-set.zip'
zip_ref = zipfile.ZipFile(local_zip, 'r')
zip_ref.extractall('tmp/rps-test')
zip_ref.close()

base_dir = 'tmp/rps-train/rps'

rock_dir = os.path.join(base_dir, 'rock')
paper_dir = os.path.join(base_dir, 'paper')
scissors_dir = os.path.join(base_dir, 'scissors')

rock_files = os.listdir(rock_dir)
paper_files = os.listdir(paper_dir)
scissors_files = os.listdir(scissors_dir)

import tensorflow as tf
import keras_preprocessing
from keras_preprocessing import image
from keras_preprocessing.image import ImageDataGenerator

TRAINING_DIR = 'tmp/rps-train/rps'
training_datagen = ImageDataGenerator(rescale=1./255,
                                      rotation_range=40, 
                                      width_shift_range=0.2,
                                      height_shift_range=0.2,
                                      horizontal_flip=True
)

VALIDATION_DIR = 'tmp/rps-test/rps-test-set'
validation_datagen = ImageDataGenerator(rescale=1./255)

train_generator = training_datagen.flow_from_directory(
    TRAINING_DIR,
    target_size=(150,150),
    class_mode='categorical',
    batch_size=126
)

validation_generator = validation_datagen.flow_from_directory(
    VALIDATION_DIR,
    target_size=(150,150),
    class_mode='categorical',
    batch_size=126
)

model = tf.keras.models.Sequential([
    # This is the first convolution
    tf.keras.layers.Conv2D(64, (3,3), activation='relu', input_shape=(150, 150, 3)),
    tf.keras.layers.MaxPooling2D(2, 2),
    # The second convolution
    tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
    tf.keras.layers.MaxPooling2D(2,2),
    # The third convolution
    tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
    tf.keras.layers.MaxPooling2D(2,2),
    # The fourth convolution
    tf.keras.layers.Conv2D(128, (3,3), activation='relu'),
    tf.keras.layers.MaxPooling2D(2,2),
    # Flatten the results to feed into a DNN
    tf.keras.layers.Flatten(),
    tf.keras.layers.Dropout(0.5),
    # 512 neuron hidden layer
    tf.keras.layers.Dense(512, activation='relu'),
    tf.keras.layers.Dense(3, activation='softmax')
])

model.summary()

model.compile(optimizer=tf.optimizers.RMSprop(learning_rate=0.001),
              loss=tf.metrics.categorical_crossentropy,
              metrics=['accuracy'])

推荐阅读