首页 > 解决方案 > AttributeError:“numpy.ndarray”对象没有属性“_in_multi_worker_mode”

问题描述

我一直在尝试使用 Xception 模型实现迁移学习模型并对其进行微调。但是当我尝试在代码的最后一部分中训练模型时,它显示以下错误 - AttributeError: 'numpy.ndarray' object has no attribute '_in_multi_worker_mode'。有人可以帮我解决这个错误或任何代码来训练model.My 代码如下。

   # Install TensorFlow
   try:
   # %tensorflow_version only exists in Colab.
   %tensorflow_version 2.x
   except Exception:
   pass

   import tensorflow as to
   print(tf.__version__)

   from tensorflow import keras
   tf.random.set_seed(42)

   import numpy as np
   np.random.seed(42)



   # Pandas and Numpy for data structures and util functions
   import numpy as np
   import pandas as pd
   from numpy.random import rand

   pd.options.display.max_colwidth = 600

   # Scikit Imports
   from sklearn.model_selection import train_test_split

   # Matplot Imports
   import matplotlib.pyplot as plt
   params = {'legend.fontsize': 'x-large',
      'figure.figsize': (15, 5),
      'axes.labelsize': 'x-large',
      'axes.titlesize':'x-large',
      'xtick.labelsize':'x-large',
      'ytick.labelsize':'x-large'}
   import glob
   import PIL
   from PIL import Image          

   plt.rcParams.update(params)
   %matplotlib inline

   # pandas display data frames as tables
   from IPython.display import display, HTML

   import warnings
   warnings.filterwarnings('ignore')
   import sys
   import os
   from tensorflow.keras import utils as np_utils
   from tensorflow.keras.utils import multi_gpu_model
   from tensorflow.keras.utils import Sequence
   from tensorflow.keras.models import Model 
   from tensorflow.keras import layers
   from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
   from tensorflow.keras import regularizers
   from tensorflow.keras.optimizers import Adam
   from tensorflow.keras.layers import GlobalAveragePooling2D
   from tensorflow.keras.layers import Dense, Dropout, Activation, 
   BatchNormalization, Flatten
   from tensorflow.keras.models import Sequential,load_model
   from tensorflow.python.keras.utils.data_utils import Sequence
   from tensorflow.keras.preprocessing.image import ImageDataGenerator, 
   img_to_array, load_img
   from tensorflow.keras.preprocessing.image import NumpyArrayIterator
   from keras.applications import Xception
   from tensorflow.keras.preprocessing import image
   from tensorflow.keras import backend as K 

   imgFiles = glob.glob("dataset/*/*.jpg")
   for items in imgFiles[:8]:
   print(items)

   X = []
   y = []

   for fName in imgFiles:

   X_i = Image.open(fName) 
   X_i = X_i.resize((299,299))
   X_i = np.array(X_i) / 255.0 
   X.append(X_i)

   label = fName.split("/") 
   y_i = label[-2] 
   y.append(y_i)
   print(set(y))

   from sklearn.preprocessing import LabelEncoder
   lEncoder = LabelEncoder()
   y = lEncoder.fit_transform(y)

   print(set(y))
   print(lEncoder.classes_)

   X = np.array(X)
   y = np.array(y)

   print(X.shape)
   print(y.shape)

   from sklearn.model_selection import train_test_split


   X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, 
                                                stratify=y, 
   random_state=42)#splitting train and test 

   images to 70% and 30% reps.


  print("X_train_shape: {}".format(X_train.shape))
  print("X_test_shape: {}".format(X_test.shape))

  mu = X_train.mean()
  std = X_train.std()

  X_train_std = (X_train-mu)/std
  X_test_std = (X_test-mu)/std

  X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, 
  test_size=0.15, stratify=y_train, 
  random_state=42)#splitting 15% of 
             train images for validation
  print("X_val_shape: {}".format(X_val.shape))

  # hyper parameters for model
  nb_classes = 6  # number of classes
  based_model_last_block_layer_number = 126  
  img_width, img_height = 299, 299  
  num_channels= 3
  batch_size = 32  
  nb_epoch = 15  # number of iteration the algorithm gets trained.
  transformation_ratio = .05  # how aggressive will be the data 
  augmentation/transformation

  #data augmentation
  train_datagen = ImageDataGenerator(
                               rescale=1./255, 
                               rotation_range=30, 
                               width_shift_range=0.2,
                               height_shift_range=0.2, 
                               horizontal_flip='true',
                               vertical_flip='true')
 train_generator = train_datagen.flow(X,y, shuffle=False, 
 batch_size=batch_size, seed=1)
 validation_datagen = ImageDataGenerator(rescale=1. / 255)
 val_generator = train_datagen.flow(X,y, shuffle=False, 
 batch_size=batch_size, seed=1) 
 # Pre-Trained CNN Model using imagenet dataset for pre-trained weights
 # Transfer Learning!!
 # Importing Xception pre trained model on ImageNet


 base_model = keras.applications.xception.Xception(include_top=False, 
 weights='imagenet', 
 input_shape=(img_width, img_height, num_channels))
 # first: train only the top layers (which were randomly initialized)
 # i.e. freeze all layers of the based model that is already pre-trained.
 for layer in base_model.layers:
 layer.trainable = False

 # Top Model Block which is to be stacked over xception model
 out = base_model.output
 out = GlobalAveragePooling2D()(out)
 out = Dense(1024, activation='relu')(out)
 out = Dense(512, activation='relu')(out)
 total_classes = y.shape[0]
 predictions = Dense(total_classes, activation='softmax')(out)

 model = keras.models.Model(inputs=base_model.input, outputs=predictions)
 model.compile(Adam(lr=.0001), loss='categorical_crossentropy', metrics= 
 ['accuracy'])  
 model.summary()

 callbacks_list = [keras.callbacks.ModelCheckpoint("bestTL.h5", 
 save_best_only=True)]
 # Train the model
 batch_size = batch_size
 train_steps_per_epoch = X_train.shape[0] // batch_size
 val_steps_per_epoch = X_val.shape[0] // batch_size
 #training cnn up to 15 epoch
 history = Model.fit(X_train_std,y_train,
                          steps_per_epoch=train_steps_per_epoch,
                          validation_data=val_generator,
                          validation_steps=val_steps_per_epoch,
                          callbacks=callback_list
                          epochs=15,
                          verbose=1)

标签: pythonmachine-learningconv-neural-networkobject-detectiontransfer-learning

解决方案


一次只使用tensorflow.keras.xxxxkeras.xxxx不同时使用这两种方法怎么样?


推荐阅读