并行运行线程比在 python 中顺序执行需要更多时间

问题描述

我有两个 ONNX 深度学习模型。 我想并行运行这两个模型。 我正在使用 python 中的线程。但令人惊讶的是,与顺序运行这两个模型相比,它需要更多时间。

要完成的任务。

  1. 制作一类模型
  2. 在该类的 init 中加载这两个模型。
  3. 并行运行两个模型以对给定输入进行推理。

这是正常行为吗? 请建议解决此问题的方法

class ModelImp:

def __init__(self):
    print('loading model...')
    # Load your model here
    curr_dir = os.getcwd()
    model_path = os.path.join(curr_dir,"model","hatev5.onnx")
    self.hate_sess = onnxruntime.InferenceSession(model_path)
    self.hate_input_name = self.hate_sess.get_inputs()[0].name
    self.hate_seq_len=15
    self.corona_seq_len=16
    print('********************************Hate model loaded.**********************************************************')
    model_path = os.path.join(curr_dir,"corona.onnx")
    self.corona_sess = onnxruntime.InferenceSession(model_path)
    self.corona_input_name = self.corona_sess.get_inputs()[0].name
    # self.model = keras.models.load_model(model_path,custom_objects={"gelu": gelu})
    # print(self.model.summary())
    print('********************************Corona model loaded.**********************************************************')
    print("_________________________************MODEL.py : loading tokenizer ************___________________________")
    curr_dir = os.getcwd()
    vocab_path = os.path.join(curr_dir,"vocab.txt")
    self.wordpiece_tokenizer = tokenization.FullTokenizer(vocab_path,do_lower_case=True)
    tokenizer_path = os.path.join(curr_dir,"hate_tokenizer.json")
    with open(tokenizer_path) as f:
        data = json.load(f)
        self.hate_tokenizer = tokenizer_from_json(data)
    print("_________________________************ HATE MODEL.py : tokenizer loaded************___________________________")
    tokenizer_path = os.path.join(curr_dir,"corona_tokenizer.json")
    with open(tokenizer_path) as f:
        data = json.load(f)
        self.corona_tokenizer = tokenizer_from_json(data)
    print("_________________________************ CORONA MODEL.py : tokenizer loaded************___________________________")
    curr_dir = os.getcwd()
# string version of Eval
# data is a string
def thread_eval(self,data,q):
    # print("--------------------------------------corona started----------------------------------------------------------")
    corona_lines = []
    corona_line = ' '.join(trim(self.wordpiece_tokenizer.tokenize(data.strip()),self.corona_seq_len))
    corona_lines.append(corona_line)
    # print(texts)
    corona_line_1 = self.corona_tokenizer.texts_to_sequences(corona_lines)
    corona_line_2 = sequence.pad_sequences(corona_line_1,padding='post',maxlen=self.corona_seq_len)
    corona_pred = self.corona_sess.run(None,{self.corona_input_name: corona_line_2})
    corona_prob = corona_pred[0][0][1]
    q.put(corona_prob)
    # print("---------------------------------------corona ended------------------------------------------------------------")
def Eval(self,data):

    try:
        
        
        # pre_start = time.time()
        # mp = ModelImp()
        # with tf.Graph().as_default() as graph: #tf.device(config['gpu_device'] )

        # print(data)
        d = json.loads(data)
        out_json = {}

        if (not (("query" in d) or ("Query" in d))):
            # print("Query: ",data)
            score = -2 * 10000  # new_change
            output = {"Output": [[score]]}  # {"score" :score,"Succ" : False }
            output_str = json.dumps(output)
            return output_str
        if ("query" in d):
            query = d["query"][0]  # new_change
            # print("Query 1: ",query)
        elif ("Query" in d):
            query = d["Query"][0]  # new_change
            # print("Query 2: ",query)
        if (len(query.strip()) == 0):
            query = "good"
            # print("Query 3: ",query)
        ## HATE MODEL input preprocess
        que = queue.Queue()
        x = threading.Thread(target=self.thread_eval,args=(query,que),daemon=True)
        x.start()
        hate_lines = []
        hate_line = ' '.join(trim(self.wordpiece_tokenizer.tokenize(query.strip()),self.hate_seq_len))
        hate_lines.append(hate_line)
        # print(texts)
        hate_line_1 = self.hate_tokenizer.texts_to_sequences(hate_lines)
        hate_line_2 = sequence.pad_sequences(hate_line_1,maxlen=self.hate_seq_len)
        ## CORONA MODEL input preprocess
        
        # print(line_2)
        
        # print("----------------------------------------hate started----------------------------------------")
        hate_pred = self.hate_sess.run(None,{self.hate_input_name: hate_line_2})
        # print("----------------------------------------hate ended----------------------------------------")
        # print("pred: ",pred[0])
        # prob = math.exp(pred[0][0][1])/(math.exp(pred[0][0][0]) + math.exp(pred[0][0][1]))
        hate_prob = hate_pred[0][0][1]
        # print("hate_prob: ",hate_prob)
        # hate_score = int(hate_prob * 10000)  # new_change  
        x.join()          
        corona_prob=que.get()
        # print("pred: ",pred[0])
        # prob = math.exp(pred[0][0][1])/(math.exp(pred[0][0][0]) + math.exp(pred[0][0][1]))
        
        # print("corona_prob: ",corona_prob)
        output_prob = max(corona_prob,hate_prob)
        # corona_score = int(corona_prob * 10000)  # new_change
        output_score = int(output_prob * 10000)

        output = {"Output": [[output_score]]}  # {"score" :score,"Succ" : True }
        output_str = json.dumps(output)

        return output_str

    except Exception as e:
        print("Exception: ",data)
        score = -3 * 10000  # new_change
        output = {"Output": [[score]]}  # {"score" :score,"Succ" : False }
        output_str = json.dumps(output)
        print(e)
        return output_str

解决方法

暂无找到可以解决该程序问题的有效方法,小编努力寻找整理中!

如果你已经找到好的解决方法,欢迎将解决方案带上本链接一起发送给小编。

小编邮箱:dio#foxmail.com (将#修改为@)

相关问答

Selenium Web驱动程序和Java。元素在(x,y)点处不可单击。其...
Python-如何使用点“。” 访问字典成员?
Java 字符串是不可变的。到底是什么意思?
Java中的“ final”关键字如何工作?(我仍然可以修改对象。...
“loop:”在Java代码中。这是什么,为什么要编译?
java.lang.ClassNotFoundException:sun.jdbc.odbc.JdbcOdbc...