ImageAI: Error when implement ObjectDetection as API in flask

I am having an issue when trying to implement ObjectDetection as an api with Flask framework

My source code is just the simple use of ObjectDetection

@app.route("/")
def hello():
    func()
    return "Hello world"
    
def func():

    execution_path = os.getcwd()
    detector = ObjectDetection()
    detector.setModelTypeAsTinyYOLOv3()
    detector.setModelPath( os.path.join(execution_path , "yolo-tiny.h5"))
    detector.loadModel(detection_speed="fastest")

    custom_objects = detector.CustomObjects(person=True)
    image_path = "sampleImage.jpg"
    check = os.path.isfile(image_path)
    print(check)
    detections = detector.detectCustomObjectsFromImage(custom_objects=custom_objects, input_image=image_path, minimum_percentage_probability=20)

    for eachObject in detections:
        print(eachObject["name"] , " : " , eachObject["percentage_probability"] )

if __name__ == '__main__':
    app.run(debug=True)

Problem is the api only success on the first request. From second request, it comes with following error:

TypeError: Cannot interpret feed_dict key as Tensor: Tensor Tensor("Placeholder:0", shape=(3, 3, 3, 16), dtype=float32) is not an element of this graph.

Full stack trace:

Traceback (most recent call last):
  File "/Users/ptoan/Desktop/object-detection/venv/lib/python3.6/site-packages/flask/app.py", line 2309, in __call__
    return self.wsgi_app(environ, start_response)
  File "/Users/ptoan/Desktop/object-detection/venv/lib/python3.6/site-packages/flask/app.py", line 2295, in wsgi_app
    response = self.handle_exception(e)
  File "/Users/ptoan/Desktop/object-detection/venv/lib/python3.6/site-packages/flask/app.py", line 1741, in handle_exception
    reraise(exc_type, exc_value, tb)
  File "/Users/ptoan/Desktop/object-detection/venv/lib/python3.6/site-packages/flask/_compat.py", line 35, in reraise
    raise value
  File "/Users/ptoan/Desktop/object-detection/venv/lib/python3.6/site-packages/flask/app.py", line 2292, in wsgi_app
    response = self.full_dispatch_request()
  File "/Users/ptoan/Desktop/object-detection/venv/lib/python3.6/site-packages/flask/app.py", line 1815, in full_dispatch_request
    rv = self.handle_user_exception(e)
  File "/Users/ptoan/Desktop/object-detection/venv/lib/python3.6/site-packages/flask/app.py", line 1718, in handle_user_exception
    reraise(exc_type, exc_value, tb)
  File "/Users/ptoan/Desktop/object-detection/venv/lib/python3.6/site-packages/flask/_compat.py", line 35, in reraise
    raise value
  File "/Users/ptoan/Desktop/object-detection/venv/lib/python3.6/site-packages/flask/app.py", line 1813, in full_dispatch_request
    rv = self.dispatch_request()
  File "/Users/ptoan/Desktop/object-detection/venv/lib/python3.6/site-packages/flask/app.py", line 1799, in dispatch_request
    return self.view_functions[rule.endpoint](**req.view_args)
  File "/Users/ptoan/Desktop/object-detection/app.py", line 15, in hello
    func()
  File "/Users/ptoan/Desktop/object-detection/app.py", line 24, in func
    detector.loadModel(detection_speed="fastest")
  File "/Users/ptoan/Desktop/object-detection/venv/lib/python3.6/site-packages/imageai/Detection/__init__.py", line 213, in loadModel
    model.load_weights(self.modelPath)
  File "/Users/ptoan/Desktop/object-detection/venv/lib/python3.6/site-packages/keras/engine/network.py", line 1166, in load_weights
    f, self.layers, reshape=reshape)
  File "/Users/ptoan/Desktop/object-detection/venv/lib/python3.6/site-packages/keras/engine/saving.py", line 1058, in load_weights_from_hdf5_group
    K.batch_set_value(weight_value_tuples)
  File "/Users/ptoan/Desktop/object-detection/venv/lib/python3.6/site-packages/keras/backend/tensorflow_backend.py", line 2470, in batch_set_value
    get_session().run(assign_ops, feed_dict=feed_dict)
  File "/Users/ptoan/Desktop/object-detection/venv/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 929, in run
    run_metadata_ptr)
  File "/Users/ptoan/Desktop/object-detection/venv/lib/python3.6/site-packages/tensorflow/python/client/session.py", line 1095, in _run
    'Cannot interpret feed_dict key as Tensor: ' + e.args[0])
TypeError: Cannot interpret feed_dict key as Tensor: Tensor Tensor("Placeholder:0", shape=(3, 3, 3, 16), dtype=float32) is not an element of this graph.

About this issue

  • Original URL
  • State: open
  • Created 5 years ago
  • Comments: 15 (1 by maintainers)

Most upvoted comments

I have another solution to solve this problem. Recently, I find a param which is thread_safe, it can share the model with thread by using k.session.graph in source code. We need to set it to True. So we can use the threading actually in flask or in other place with threading. And my before code can change like the following:

from flask import Flask, Response, jsonify
app = Flask(__name__)
import os
from imageai.Detection import ObjectDetection
import time
import json

execution_path = os.getcwd()
st = time.time()
detector = ObjectDetection()
detector.setModelTypeAsRetinaNet()
detector.setModelPath(os.path.join(execution_path, "model", "resnet50_coco_best_v2.0.1.h5"))
# detector.setModelTypeAsTinyYOLOv3()
# detector.setModelPath(os.path.join(execution_path, "model", "yolo-tiny.h5"))
detector.loadModel()
# detector.loadModel(detection_speed="fastest")
print(f'Init Timer: {time.time()-st}')

@app.route('/detect/<pic_name>')
def boat_detection(pic_name):
    st = time.time()
    results = getDetections(pic_name)
    print(f'Sum Timer: {time.time()-st}')

    msg = {}
    for i, result in enumerate(results, 1):
        result['percentage_probability'] = float(result['percentage_probability'])
        result['box_points'] = list(result['box_points'])
        for index in range(len(result['box_points'])):
            result['box_points'][index] = int(result['box_points'][index])
        result['box_points'] = tuple(result['box_points'])
        msg[str(i)] = json.dumps(result)
    return jsonify(msg)


def getDetections(file_name):
    start = time.time()

    image_folder = os.path.join(execution_path, 'data\\ship2\\')
    output_folder = os.path.join(execution_path, 'data\\output\\')

    st1 = time.time()
    image_file = os.path.join(image_folder, file_name)
    new_image_file = os.path.join(output_folder, file_name)
    print(image_file, "-->", new_image_file)
    if not os.path.exists(image_file):
        print("not exist.")
        return

    # global detector
    custom_objects = detector.CustomObjects(boat=True)
    detections = detector.detectCustomObjectsFromImage(
        custom_objects=custom_objects,
        input_image=image_file,
        output_image_path=new_image_file,
        minimum_percentage_probability=30,
        thread_safe=True)  # **notice this param**
    print(f'[Info]识别到 boat{len(detections)}艘')
    for eachObject in detections:
        print(eachObject.items())

    end = time.time()
    print(f'Excute Timer: {end-st1}')
    print ("耗时: ",end-start)
    return detections

if __name__ == '__main__':
    app.run(threaded=True) # we can use the threading to detect

the most important points is set the thread_safe=True. If you do that, you will find we can load model just one time and can use in many threads.

I had the same problem a week ago. and I had solved it by setting the flask run in just one thread. the following is my example:

from flask import Flask, Response, jsonify
app = Flask(__name__)
import os
from imageai.Detection import ObjectDetection
import time
import json

execution_path = os.getcwd()
st = time.time()
detector = ObjectDetection()
detector.setModelTypeAsRetinaNet()
detector.setModelPath(os.path.join(execution_path, "model", "resnet50_coco_best_v2.0.1.h5"))
# detector.setModelTypeAsTinyYOLOv3()
# detector.setModelPath(os.path.join(execution_path, "model", "yolo-tiny.h5"))
detector.loadModel()
# detector.loadModel(detection_speed="fastest")
print(f'Init Timer: {time.time()-st}')

@app.route('/detect/<pic_name>')
def boat_detection(pic_name):
    st = time.time()
    results = getDetections(pic_name)
    print(f'Sum Timer: {time.time()-st}')

    msg = {}
    for i, result in enumerate(results, 1):
        result['percentage_probability'] = float(result['percentage_probability'])
        result['box_points'] = list(result['box_points'])
        for index in range(len(result['box_points'])):
            result['box_points'][index] = int(result['box_points'][index])
        result['box_points'] = tuple(result['box_points'])
        msg[str(i)] = json.dumps(result)
    return jsonify(msg)


def getDetections(file_name):
    start = time.time()

    image_folder = os.path.join(execution_path, 'data\\ship2\\')
    output_folder = os.path.join(execution_path, 'data\\output\\')

    st1 = time.time()
    image_file = os.path.join(image_folder, file_name)
    new_image_file = os.path.join(output_folder, file_name)
    print(image_file, "-->", new_image_file)
    if not os.path.exists(image_file):
        print("not exist.")
        return

    # global detector
    custom_objects = detector.CustomObjects(boat=True)
    detections = detector.detectCustomObjectsFromImage(
        custom_objects=custom_objects,
        input_image=image_file,
        output_image_path=new_image_file,
        minimum_percentage_probability=30)
    print(f'[Info]识别到 boat{len(detections)}艘')
    for eachObject in detections:
        print(eachObject.items())

    end = time.time()
    print(f'Excute Timer: {end-st1}')
    print ("耗时: ",end-start)
    return detections

if __name__ == '__main__':
    app.run(threaded=False) # ban the Threaded mode

the most important points is set the app.run(threaded=False). my packages version: Flask==1.1.1 h5py==2.9.0 Keras==2.2.4 opencv-python==4.1.0.25 tensorflow==1.14.0 imageai==2.1.0