Demo entry 6784923

sda

   

Submitted by anonymous on Mar 11, 2019 at 14:08
Language: Python 3. Code size: 7.7 kB.

from tftrt.examples.object_detection import optimize_model
import tensorflow as tf
import os
import numpy as np
import glob
import cv2
import json
import time
from object_detection.utils.label_map_util import get_label_map_dict,create_categories_from_labelmap
class TensorRT_OP(object):
    def __init__(self,config_path,checkpoint_path,precision_mode = "FP32"):
        self.config_path = config_path
        self.checkpoint_path = checkpoint_path
        self.network = optimize_model(config_path = self.config_path,
                                                     checkpoint_path = self.checkpoint_path,
                                                     use_trt=True,
                                                     output_path='opt.pb',
                                                     precision_mode='FP32')
        self.INPUT_NAME = 'image_tensor'
        self.BOXES_NAME = 'detection_boxes'
        self.CLASSES_NAME = 'detection_classes'
        self.SCORES_NAME = 'detection_scores'
        self.NUM_DETECTIONS_NAME = 'num_detections'
        self.FROZEN_GRAPH_NAME = 'frozen_inference_graph.pb'
        self.PIPELINE_CONFIG_NAME = 'pipeline.config'
        self.CHECKPOINT_PREFIX = 'model.ckpt'
        self.tf_config = self.set_gpu()

    def read_image(self,image_path, image_shape):
        image = cv2.cvtColor(cv2.imread(image_path),cv2.COLOR_BGR2RGB)
        if image_shape is not None:
            image = cv2.resize(image,(image_shape))
        return np.array(image)
    def set_gpu(self):
        tf_config = tf.ConfigProto()
        tf_config.gpu_options.allow_growth = True
        return tf_config

    def vis_image(self,image,label_list,thr = 0.3,save = None):
        output_dict = self.predict(image)
        bbox = output_dict['boxes']
        classes = output_dict['classes']
        scores = output_dict['scores']
        detect_num = output_dict['nums']
        detect_classes = []
        score_data = 0
        for i in range(int(detect_num)):
            if scores[0][i] > thr:
                box = bbox[i][:]
                # print("bbox:{}".format(box))
                index = int(classes[0][i])
                score_data = scores[0][i]
                score_info = "{:.2f}:{}".format(score_data,label_list[index-1]["name"])
                cv2.rectangle(image, (box[0], box[1]), (box[0] + box[2], box[1] + box[3]), (0, 255, 0), 1)
                cv2.putText(image,score_info,(box[0]-5, box[1]-4),cv2.FONT_HERSHEY_SIMPLEX,0.4,
                            (0,255,0),1)
        if save is not None:
            image = cv2.cvtColor(image,cv2.COLOR_RGB2BGR)
            cv2.imwrite(save,image)
            return score_data
        else:

            # cv2.imwrite(os.path.join(save), image)
            cv2.imshow("TRT", image)
            cv2.waitKey()
            return  score_data
    def predict(self,image):
        output_dict = {}
        runtimes = []
        with tf.Graph().as_default() as tf_graph:
            with tf.Session(config=self.tf_config) as tf_sess:
                tf.import_graph_def(self.network, name='')
                tf_input = tf_graph.get_tensor_by_name(self.INPUT_NAME + ':0')
                tf_boxes = tf_graph.get_tensor_by_name(self.BOXES_NAME + ':0')
                tf_classes = tf_graph.get_tensor_by_name(self.CLASSES_NAME + ':0')
                tf_scores = tf_graph.get_tensor_by_name(self.SCORES_NAME + ':0')
                tf_num_detections = tf_graph.get_tensor_by_name(
                    self.NUM_DETECTIONS_NAME + ':0')
                # image = self.read_image(image_path, shape)
                t0 = time.time()
                boxes, classes, scores, num_detections = tf_sess.run(
                    [tf_boxes, tf_classes, tf_scores, tf_num_detections],
                    feed_dict={tf_input: [image]})
                t1 = time.time()
                # runtimes.append(float(t1 - t0))
                runtimes = float(t1-t0)
                image_height,image_width = image.shape[:2]
                bbox_list = []
                for j in range(int(num_detections)):
                    bbox = boxes[0][j]
                    bbox_coco_fmt = [
                        int(bbox[1] * image_width),  # x0
                        int(bbox[0] * image_height),  # x1
                        int((bbox[3] - bbox[1]) * image_width),  # width
                        int((bbox[2] - bbox[0]) * image_height),  # height
                    ]
                    bbox_list.append(bbox_coco_fmt)
                output_dict.update({"boxes":np.array(bbox_list),"classes":classes,
                                    "scores":scores,"nums":num_detections})
        return output_dict
    def benchmark(self,image,save = 'result.json',single = False):
        statistics = {}
        runtimes = []
        image_data_list = []
        scores_list = []
        runtimes = []
        with tf.Graph().as_default() as tf_graph:
            with tf.Session(config=self.tf_config) as tf_sess:
                tf.import_graph_def(self.network, name='')
                tf_input = tf_graph.get_tensor_by_name(self.INPUT_NAME + ':0')
                tf_boxes = tf_graph.get_tensor_by_name(self.BOXES_NAME + ':0')
                tf_classes = tf_graph.get_tensor_by_name(self.CLASSES_NAME + ':0')
                tf_scores = tf_graph.get_tensor_by_name(self.SCORES_NAME + ':0')
                tf_num_detections = tf_graph.get_tensor_by_name(
                    self.NUM_DETECTIONS_NAME + ':0')
                image_height, image_width = image.shape[:2]
                # images_list = glob.glob(images_dir+"/*.png")
                if single:
                    score_data = 0
                    num_objs = 0
                    for i in range(25):
                        t0 = time.time()
                        boxes, classes, scores, num_detections = tf_sess.run(
                            [tf_boxes, tf_classes, tf_scores, tf_num_detections],
                            feed_dict={tf_input: [image]})
                        t1 = time.time()
                        runtimes.append(float(t1 - t0))
                        score_data = scores
                        num_objs = num_detections
                    statistics = {
                        'inference': 1000.0 * np.mean(runtimes[1:]),
                        'fps': 24 / np.sum(runtimes[1:]),
                        'runtimes_ms': [1000.0 * r for r in runtimes[1:]]

                    }
        if save is not None:
            with open(save,'w') as f:
                json.dump(statistics,f)
        return statistics
def main():
    MODEL_PATH = '/mnt/train_chess/chess_mult/ssd_mobilenet_v1_coco_2018_01_28_trainoutput/export_models'
    pbtxt_file = '/mnt/chess_all/Origin_data/tf/mult_label_map.pbtxt'
    image_dir = '/mnt/train_chess/chess_mult/ssd_inception_v2_coco_2018_01_28_trainoutput/test_image'
    config_path = os.path.join(MODEL_PATH, 'pipeline.config')
    checkpoint_path = os.path.join(MODEL_PATH, 'model.ckpt')
    label_list = create_categories_from_labelmap(pbtxt_file)

    t = TensorRT_OP(config_path,checkpoint_path)

    for image_path in glob.glob(image_dir+"/test*"):
        image = cv2.cvtColor(cv2.imread(image_path),cv2.COLOR_BGR2RGB)
        filename = image_path.split('/')[-1].split('.')[0]

        output_dict = t.predict(image)
        save_image_path = os.path.join(image_dir, filename+"TensorRT_Predict_FP32.png")
        t.vis_image(image,  label_list, save=save_image_path)

if __name__ == "__main__":
    main()

This snippet took 0.01 seconds to highlight.

Back to the Entry List or Home.

Delete this entry (admin only).