2个纪元后出现imageai错误:ValueError:确保您指定了正确的输入图像,输入类型,输出类型和/或输出图像路径

问题描述

这是我收到的错误,我不知道为什么会这样,因为第一个纪元运行顺利,我给了包含整个代码的笔记本文件。我在kaggle笔记本电脑上执行此操作,同时启用了Internet和GPU。

我正在使用tensorflow 1,这是一个动作识别任务,我从头开始构建了模型,因此您可能必须查看详细信息,以下是我的整个代码,只是缺少一个函数,但它并不重要,我还给了nexxt评论,以便您了解它的作用

代码 '''

!cp -r ../input/imageai/imageai/imageai/ imageai

!python3 -c 'import tensorflow as tf; print(tf.__version__)'  # for Python 3

!python -c 'import keras; print(keras.__version__)'
1.12.0
Using TensorFlow backend.
2.2.4

import numpy as np # linear algebra
import pandas as pd # data processing,CSV file I/O (e.g. pd.read_csv)
 under the input directory

import os

from keras.preprocessing.image import ImageDataGenerator
from keras.applications.inception_v3 import preprocess_input
from keras.utils.data_utils import GeneratorEnqueuer
import matplotlib.pyplot as plt
import pandas as pd 
import numpy as np 
import math,os
from imageai.Detection import ObjectDetection
import warnings
from keras.callbacks import ModelCheckpoint
from imageai.Detection import ObjectDetection
import os
import cv2
import numpy as np
import shutil

from keras.models import Sequential
from keras.callbacks import Callback
from keras.layers import Conv2D,MaxPooling2D,Dropout,Dense,Flatten,UpSampling2D
from keras import backend as K

import random
import glob
import subprocess
import os
from PIL import Image
import numpy as np
from matplotlib.pyplot import imshow,figure

from keras.layers import Lambda,Reshape,Permute,Input,add,Conv3D,GaussianNoise,concatenate
from keras.layers import ConvLSTM2D,Batchnormalization,Timedistributed,Add
from keras.models import Model

warnings.filterwarnings("ignore")
%matplotlib inline


OBJECT_DETECTION_SPEED = "fastest"
PRETRAINED_MODEL_PATH = "/kaggle/input/imageai/resnet50_coco_best_v2.0.1.h5"
FONT = cv2.FONT_HERShey_SIMPLEX
IMG_SIZE = 128
FRAME_BATCH_SIZE = 4
THRESHOLD_DIFF_TOLERANCE = 80
batch_size = 8
EPOCHS = 5
Using TensorFlow backend.
detector = ObjectDetection()
detector.setModelTypeAsRetinaNet()
detector.setModelPath(PRETRAINED_MODEL_PATH)
detector.loadModel(detection_speed = OBJECT_DETECTION_SPEED) #change parameter to adjust accuracy and speed
custom = detector.CustomObjects(person=True)
train_dir = "/kaggle/input/violence-final-2/welp"
val_dir = "/kaggle/input/moviesviolencenonviolence/movies/"

total_number_frames_train = 0
total_number_frames_valid = 0
def get_Boxes(frame):
    _,detections = detector.detectCustomObjectsFromImage(
        custom_objects=custom,input_type="array",input_image= frame,output_type="array"
        )
    return detections

def frame_preprocessing(frame):
    frame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
    frame = cv2.resize(frame,dsize=(IMG_SIZE,IMG_SIZE),interpolation=cv2.INTER_CUBIC)

    return frame

def get_frame_difference(frame_1,frame_2):
    frame = cv2.absdiff(frame_1,frame_2)
    ret,thresh1 = cv2.threshold(frame,THRESHOLD_DIFF_TOLERANCE,255,cv2.THRESH_BINARY)   #127 is threshold
    return thresh1

def mask_frame(frame,detections,detections_temp):
    mask = np.zeros(frame.shape,dtype=np.uint8)

    for eachObject in detections:
        x1,y1,x2,y2 = eachObject["Box_points"]
        mask = cv2.rectangle(mask,(x1,y1),(x2,y2),(255,255),-1) 

    for eachObject in detections_temp:
        x1,-1) 

    result = cv2.bitwise_and(frame,mask)   # Mask input image with binary mask
    result[mask==0] = 255   # Optional : set background -> Now white/ by default black

    return result
            
def my_generater(batch_size,in_dir,videos_per_category):           
    
    total_number_frames_train = 0
    total_number_frames_valid = 0

    list_fight=os.listdir(os.path.join(in_dir,"Violence"))
    list_no_fight=os.listdir(os.path.join(in_dir,"NonViolence"))

    fight_final=random.sample(list_fight,videos_per_category)
    no_fight_final=random.sample(list_no_fight,videos_per_category)

    fight_labels = []
    no_fight_labels = []

    for i in range (videos_per_category):
        fight_labels.append([1,0])
        no_fight_labels.append([0,1])

    final = fight_final + no_fight_final
    labels = fight_labels + no_fight_labels

    c = list(zip(final,labels))
    random.shuffle(c)
    names,labels = zip(*c)
    
    images_batches=[]
    labelsss=[]
    images_batches=[]
    labelss=[]
    counter = 0
    
#     print("check")
    while True:
        for i in range(len(names)): ##no. of videos loop
            
#             print("check1")
            if labels[i]==[1,0]:
                in_file = os.path.join(in_dir,"Violence")
            else:
                in_file = os.path.join(in_dir,"NonViolence")
                
            in_file = os.path.join(in_file,names[i])
            vidcap = cv2.VideoCapture(in_file)
            length = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))  #frames in a video
            if in_dir == "/kaggle/input/violence-final-2/welp/":
                total_number_frames_train = total_number_frames_train + length
            else:
                total_number_frames_valid = total_number_frames_valid + length
        
            for j in range(int(length/FRAME_BATCH_SIZE)):
                detections_temp_2=[]
                success,frame_temp = vidcap.read()
                frame_temp = cv2.resize(frame_temp,interpolation=cv2.INTER_CUBIC)
                detections_temp = get_Boxes(frame_temp) 
                frame_temp = mask_frame(frame_temp,detections_temp,detections_temp_2)                    
                frame_temp = cv2.cvtColor(frame_temp,cv2.COLOR_BGR2GRAY)
                
                images_frame_batches=[]

                for k in range(FRAME_BATCH_SIZE - 1):
                    
                    success,frame = vidcap.read()
                    frame = cv2.resize(frame,interpolation=cv2.INTER_CUBIC)
                    detections = get_Boxes(frame)                   
                    frame = mask_frame(frame,detections_temp)   
                    frame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)   
                    diff = get_frame_difference(frame,frame_temp)
                    diff = diff/255
                    images_frame_batches.append(diff)
                    
                    frame_temp = frame
                    detections_temp=detections

                if counter < batch_size:
                    images_batches.append(images_frame_batches)
                    counter = counter + 1
                    labelss.append(labels[i])
                    

                else:
                    yield np.array(images_batches).reshape((batch_size,FRAME_BATCH_SIZE-1,IMG_SIZE,1)),np.array(labelss)
                    labelss=[]
                    images_batches=[]
                    counter = 0
        break
  
gen = my_generater(2,train_dir,800)
videos,next_frame = next(gen)

print(np.array(videos).shape)
np.array(next_frame).shape
(2,3,128,1)
(2,2)

#gets total frames combined of all videos
total_number_frames_train = get_total_frames(train_dir,800)
total_number_frames_valid = get_total_frames(val_dir,100)

print(total_number_frames_train)
print(total_number_frames_valid)
224108
18283
steps_per_epoch = total_number_frames_train // (batch_size * FRAME_BATCH_SIZE)
validation_steps = total_number_frames_valid // (batch_size * FRAME_BATCH_SIZE)

print(steps_per_epoch)
print(validation_steps)
7003
571
inp = Input((FRAME_BATCH_SIZE - 1,1))
permuted = Permute((2,4,1))(inp)
noise = GaussianNoise(0.1)(permuted)
c=4
x = Permute((4,1,2,3))(noise)
x =(ConvLSTM2D(filters=c,kernel_size=(3,3),padding='same',name='conv_lstm1',return_sequences=True))(x)

c1=(Batchnormalization())(x)
x = Dropout(0.2)(x)
x =(Timedistributed(MaxPooling2D(pool_size=(2,2))))(c1)

x =(ConvLSTM2D(filters=2*c,name='conv_lstm2',return_sequences=True))(x)
c2=(Batchnormalization())(x)
x = Dropout(0.2)(x)

x =(Timedistributed(MaxPooling2D(pool_size=(2,2))))(c2)
x =(ConvLSTM2D(filters=4*c,name='conv_lstm3',return_sequences=True))(x)

x =(Timedistributed(UpSampling2D(size=(2,2))))(x)
x =(ConvLSTM2D(filters=4*c,name='conv_lstm4',return_sequences=True))(x)
x =(Batchnormalization())(x)

x =(ConvLSTM2D(filters=2*c,name='conv_lstm5',return_sequences=True))(x)
x =(Batchnormalization())(x)
x = Add()([c2,x])
x = Dropout(0.2)(x)

x =(Timedistributed(UpSampling2D(size=(2,2))))(x)
x =(ConvLSTM2D(filters=c,name='conv_lstm6',return_sequences=False))(x)
x =(Batchnormalization())(x)

x = (Flatten())(x)
x = (Dense(units=50,activation='relu'))(x)

x = (Dense(units=2,activation='relu'))(x)

model=Model(inputs=[inp],outputs=[x])

model.summary()  #works,but i have deleted it due to character limit

   

Total params: 3,322,408
Trainable params: 3,328
Non-trainable params: 80
__________________________________________________________________________________________________
model.compile(optimizer='adam',loss='mse',metrics=['accuracy'])
!pip install tensorflow-gpu==1.14.0
import tensorflow as tf
Collecting tensorflow-gpu==1.14.0
  Downloading https://files.pythonhosted.org/packages/76/04/43153bfdfcf6c9a4c38ecdb971ca9a75b9a791bb69a764d652c359aca504/tensorflow_gpu-1.14.0-cp36-cp36m-manylinux1_x86_64.whl (377.0MB)
    100% |████████████████████████████████| 377.0MB 113kB/s 
Requirement already satisfied: six>=1.10.0 in /opt/conda/lib/python3.6/site-packages (from tensorflow-gpu==1.14.0) (1.12.0)
Collecting google-pasta>=0.1.6 (from tensorflow-gpu==1.14.0)
  Downloading https://files.pythonhosted.org/packages/a3/de/c648ef6835192e6e2cc03f40b19eeda4382c49b5bafb43d88b931c4c74ac/google_pasta-0.2.0-py3-none-any.whl (57kB)
    100% |████████████████████████████████| 61kB 6.6MB/s 
Collecting tensorboard<1.15.0,>=1.14.0 (from tensorflow-gpu==1.14.0)
  Downloading https://files.pythonhosted.org/packages/91/2d/2ed263449a078cd9c8a9ba50ebd50123adf1f8cfbea1492f9084169b89d9/tensorboard-1.14.0-py3-none-any.whl (3.1MB)
    100% |████████████████████████████████| 3.2MB 13.3MB/s 
Requirement already satisfied: keras-applications>=1.0.6 in /opt/conda/lib/python3.6/site-packages (from tensorflow-gpu==1.14.0) (1.0.7)
Collecting tensorflow-estimator<1.15.0rc0,>=1.14.0rc0 (from tensorflow-gpu==1.14.0)
  Downloading https://files.pythonhosted.org/packages/3c/d5/21860a5b11caf0678fbc8319341b0ae21a07156911132e0e71bffed0510d/tensorflow_estimator-1.14.0-py2.py3-none-any.whl (488kB)
    100% |████████████████████████████████| 491kB 24.4MB/s 
Requirement already satisfied: grpcio>=1.8.6 in /opt/conda/lib/python3.6/site-packages (from tensorflow-gpu==1.14.0) (1.18.0)
Requirement already satisfied: absl-py>=0.7.0 in /opt/conda/lib/python3.6/site-packages (from tensorflow-gpu==1.14.0) (0.7.0)
Requirement already satisfied: numpy<2.0,>=1.14.5 in /opt/conda/lib/python3.6/site-packages (from tensorflow-gpu==1.14.0) (1.16.1)
Requirement already satisfied: termcolor>=1.1.0 in /opt/conda/lib/python3.6/site-packages (from tensorflow-gpu==1.14.0) (1.1.0)
Requirement already satisfied: wheel>=0.26 in /opt/conda/lib/python3.6/site-packages (from tensorflow-gpu==1.14.0) (0.31.1)
Requirement already satisfied: gast>=0.2.0 in /opt/conda/lib/python3.6/site-packages (from tensorflow-gpu==1.14.0) (0.2.2)
Requirement already satisfied: keras-preprocessing>=1.0.5 in /opt/conda/lib/python3.6/site-packages (from tensorflow-gpu==1.14.0) (1.0.9)
Requirement already satisfied: protobuf>=3.6.1 in /opt/conda/lib/python3.6/site-packages (from tensorflow-gpu==1.14.0) (3.6.1)
Requirement already satisfied: astor>=0.6.0 in /opt/conda/lib/python3.6/site-packages (from tensorflow-gpu==1.14.0) (0.7.1)
Collecting wrapt>=1.11.1 (from tensorflow-gpu==1.14.0)
  Downloading https://files.pythonhosted.org/packages/82/f7/e43cefbe88c5fd371f4cf0cf5eb3feccd07515af9fd6cf7dbf1d1793a797/wrapt-1.12.1.tar.gz
Requirement already satisfied: markdown>=2.6.8 in /opt/conda/lib/python3.6/site-packages (from tensorboard<1.15.0,>=1.14.0->tensorflow-gpu==1.14.0) (3.0.1)
Requirement already satisfied: werkzeug>=0.11.15 in /opt/conda/lib/python3.6/site-packages (from tensorboard<1.15.0,>=1.14.0->tensorflow-gpu==1.14.0) (0.14.1)
Collecting setuptools>=41.0.0 (from tensorboard<1.15.0,>=1.14.0->tensorflow-gpu==1.14.0)
  Downloading https://files.pythonhosted.org/packages/44/a6/7fb6e8b3f4a6051e72e4e2218889351f0ee484b9ee17e995f5ccff780300/setuptools-50.3.0-py3-none-any.whl (785kB)
    100% |████████████████████████████████| 788kB 20.3MB/s 
Requirement already satisfied: h5py in /opt/conda/lib/python3.6/site-packages (from keras-applications>=1.0.6->tensorflow-gpu==1.14.0) (2.9.0)
Building wheels for collected packages: wrapt
  Running setup.py bdist_wheel for wrapt ... - \ | / done
  Stored in directory: /root/.cache/pip/wheels/b1/c2/ed/d62208260edbd3fa7156545c00ef966f45f2063d0a84f8208a
Successfully built wrapt
thinc 6.12.1 has requirement wrapt<1.11.0,>=1.10.0,but you'll have wrapt 1.12.1 which is incompatible.
tensorflow 1.12.0 has requirement tensorboard<1.13.0,>=1.12.0,but you'll have tensorboard 1.14.0 which is incompatible.
pytest-cov 2.6.1 has requirement pytest>=3.6,but you'll have pytest 3.5.1 which is incompatible.
anaconda-client 1.6.14 has requirement python-dateutil>=2.6.1,but you'll have python-dateutil 2.6.0 which is incompatible.
Installing collected packages: google-pasta,setuptools,tensorboard,tensorflow-estimator,wrapt,tensorflow-gpu
  Found existing installation: setuptools 39.1.0
    Uninstalling setuptools-39.1.0:
      Successfully uninstalled setuptools-39.1.0
  Found existing installation: tensorboard 1.12.2
    Uninstalling tensorboard-1.12.2:
      Successfully uninstalled tensorboard-1.12.2
  Found existing installation: wrapt 1.10.11
Cannot uninstall 'wrapt'. It is a distutils installed project and thus we cannot accurately determine which files belong to it which would lead to only a partial uninstall.
You are using pip version 18.1,however version 20.2.3 is available.
You should consider upgrading via the 'pip install --upgrade pip' command.
# device_name = tf.test.gpu_device_name()
if tf.test.gpu_device_name():
    print('Default GPU Device: {}'.format(tf.test.gpu_device_name()))
else:
    print("Please install GPU version of TF")
Default GPU Device: /device:GPU:0
filepath = "/kaggle/working/saved-model-{epoch:02d}-{val_acc:.2f}.hdf5"

checkpoint = ModelCheckpoint(filepath,monitor='val_acc',verbose=1,save_best_only=False,mode='max')
config = tf.ConfigProto( device_count = {'GPU': 1,'cpu': 56} ) 
sess = tf.Session(config=config) 
tf.keras.backend.set_session(sess)
model.fit_generator(my_generater(batch_size,800),steps_per_epoch=steps_per_epoch//4,epochs=EPOCHS,validation_steps=validation_steps//4,validation_data=my_generater(batch_size,val_dir,100),callbacks = [checkpoint])
Epoch 1/5
1750/1750 [==============================] - 4570s 3s/step - loss: 0.5375 - acc: 0.5976 - val_loss: 0.5000 - val_acc: 0.5599

Epoch 00001: saving model to /kaggle/working/saved-model-01-0.56.hdf5
Epoch 2/5
   5/1750 [..............................] - ETA: 3:58 - loss: 0.5000 - acc: 0.3000
---------------------------------------------------------------------------
InvalidArgumentError                      Traceback (most recent call last)
/kaggle/working/imageai/Detection/__init__.py in detectCustomObjectsFromImage(self,custom_objects,input_image,output_image_path,input_type,output_type,extract_detected_objects,minimum_percentage_probability,display_percentage_probability,display_object_name)
    694                     model = self.__model_collection[0]
--> 695                     _,_,detections = model.predict_on_batch(np.expand_dims(image,axis=0))
    696                     predicted_numbers = np.argmax(detections[0,:,4:],axis=1)

/opt/conda/lib/python3.6/site-packages/keras/engine/training.py in predict_on_batch(self,x)
   1273         self._make_predict_function()
-> 1274         outputs = self.predict_function(ins)
   1275         return unpack_singleton(outputs)

/opt/conda/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py in __call__(self,inputs)
   2714 
-> 2715             return self._call(inputs)
   2716         else:

/opt/conda/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py in _call(self,inputs)
   2674         else:
-> 2675             fetched = self._callable_fn(*array_vals)
   2676         return fetched[:len(self.outputs)]

/opt/conda/lib/python3.6/site-packages/tensorflow/python/client/session.py in __call__(self,*args,**kwargs)
   1438               self._session._session,self._handle,args,status,-> 1439               run_Metadata_ptr)
   1440         if run_Metadata:

/opt/conda/lib/python3.6/site-packages/tensorflow/python/framework/errors_impl.py in __exit__(self,type_arg,value_arg,traceback_arg)
    527             compat.as_text(c_api.TF_Message(self.status.status)),--> 528             c_api.TF_GetCode(self.status.status))
    529     # Delete the underlying status object from memory otherwise it stays alive

InvalidArgumentError: indices[0] = 78 is not in [0,0)
     [[{{node nms/embedding_lookup_52}} = GatherV2[Taxis=DT_INT32,Tindices=DT_INT32,Tparams=DT_INT32,_device="/job:localhost/replica:0/task:0/device:cpu:0"](nms/Cast_17,nms/non_max_suppression_17/NonMaxSuppressionV3,nms/embedding_lookup_52/axis)]]

During handling of the above exception,another exception occurred:

ValueError                                Traceback (most recent call last)
<ipython-input-19-37687fd66644> in <module>()
      4                     validation_steps=validation_steps//4,5                     validation_data=my_generater(batch_size,----> 6                     callbacks = [checkpoint])

/opt/conda/lib/python3.6/site-packages/keras/legacy/interfaces.py in wrapper(*args,**kwargs)
     89                 warnings.warn('Update your `' + object_name + '` call to the ' +
     90                               'Keras 2 API: ' + signature,stacklevel=2)
---> 91             return func(*args,**kwargs)
     92         wrapper._original_function = func
     93         return wrapper

/opt/conda/lib/python3.6/site-packages/keras/engine/training.py in fit_generator(self,generator,steps_per_epoch,epochs,verbose,callbacks,validation_data,validation_steps,class_weight,max_queue_size,workers,use_multiprocessing,shuffle,initial_epoch)
   1416             use_multiprocessing=use_multiprocessing,1417             shuffle=shuffle,-> 1418             initial_epoch=initial_epoch)
   1419 
   1420     @interfaces.legacy_generator_methods_support

/opt/conda/lib/python3.6/site-packages/keras/engine/training_generator.py in fit_generator(model,initial_epoch)
    179             batch_index = 0
    180             while steps_done < steps_per_epoch:
--> 181                 generator_output = next(output_generator)
    182 
    183                 if not hasattr(generator_output,'__len__'):

/opt/conda/lib/python3.6/site-packages/keras/utils/data_utils.py in get(self)
    707                     "`use_multiprocessing=False,workers > 1`."
    708                     "For more information see issue #1638.")
--> 709             six.reraise(*sys.exc_info())

/opt/conda/lib/python3.6/site-packages/six.py in reraise(tp,value,tb)
    691             if value.__traceback__ is not tb:
    692                 raise value.with_traceback(tb)
--> 693             raise value
    694         finally:
    695             value = None

/opt/conda/lib/python3.6/site-packages/keras/utils/data_utils.py in get(self)
    683         try:
    684             while self.is_running():
--> 685                 inputs = self.queue.get(block=True).get()
    686                 self.queue.task_done()
    687                 if inputs is not None:

/opt/conda/lib/python3.6/multiprocessing/pool.py in get(self,timeout)
    642             return self._value
    643         else:
--> 644             raise self._value
    645 
    646     def _set(self,i,obj):

/opt/conda/lib/python3.6/multiprocessing/pool.py in worker(inqueue,outqueue,initializer,initargs,maxtasks,wrap_exception)
    117         job,func,kwds = task
    118         try:
--> 119             result = (True,func(*args,**kwds))
    120         except Exception as e:
    121             if wrap_exception and func is not _helper_reraises_exception:

/opt/conda/lib/python3.6/site-packages/keras/utils/data_utils.py in next_sample(uid)
    624         The next value of generator `uid`.
    625     """
--> 626     return six.next(_SHARED_SEQUENCES[uid])
    627 
    628 

<ipython-input-8-1e63801f9903> in my_generater(batch_size,videos_per_category)
     65                     success,frame = vidcap.read()
     66                     frame = cv2.resize(frame,interpolation=cv2.INTER_CUBIC)
---> 67                     detections = get_Boxes(frame)
     68                     frame = mask_frame(frame,detections_temp)
     69                     frame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)

<ipython-input-6-a5006495da23> in get_Boxes(frame)
      4         input_type="array",5         input_image= frame,----> 6         output_type="array"
      7         )
      8     return detections

/kaggle/working/imageai/Detection/__init__.py in detectCustomObjectsFromImage(self,display_object_name)
    884             except:
    885                 raise ValueError(
--> 886                     "Ensure you specified correct input image,input type,output type and/or output image path ")
    887 
    888 

ValueError: Ensure you specified correct input image,output type and/or output image path 

'''

解决方法

暂无找到可以解决该程序问题的有效方法,小编努力寻找整理中!

如果你已经找到好的解决方法,欢迎将解决方案带上本链接一起发送给小编。

小编邮箱:dio#foxmail.com (将#修改为@)