句子编码器中的关键错误

问题描述

“语料库”是字符串(段落)的列表。错误在“ sentence_embeddings = model.encode(dist_repr,is_pretokenized = True)”行中。我尝试使用is_pretokenized = False,这是认设置,它为未知单词提供了keyerror。然后,我先添加一个矢量化器,然后将转换后的矩阵传递给编码器。因此,此错误。你能看看是否可以在互联网上找到一些解决办法吗?

from sentence_transformers import SentenceTransformer,LoggingHandler
from sklearn.feature_extraction.text import CountVectorizer


vectorizer = CountVectorizer(stop_words='english')  
vectorizer.fixed_vocabulary_ = True
dist_repr = vectorizer.fit_transform(corpus)
dist_repr = dist_repr.todense()
model = SentenceTransformer('distilbert-base-nli-stsb-mean-tokens')
sentence_embeddings = model.encode(dist_repr,is_pretokenized = True)


272 dist_repr = dist_repr.todense()
273 model = SentenceTransformer('distilbert-base-nli-stsb-mean-tokens')
--> 274 sentence_embeddings = model.encode(dist_repr,is_pretokenized = True)
275 dist_repr = sentence_embeddings
276

~/opt/anaconda3/lib/python3.7/site-packages/sentence_transformers/SentenceTransformer.py in encode(self,sentences,batch_size,show_progress_bar,output_value,convert_to_numpy,convert_to_tensor,is_pretokenized,device,num_workers)
173 iterator = tqdm(inp_DataLoader,desc="Batches")
174
--> 175 for features in iterator:
176 for feature_name in features:
177 features[feature_name] = features[feature_name].to(device)

~/opt/anaconda3/lib/python3.7/site-packages/torch/utils/data/DataLoader.py in __next__(self)
343
344 def __next__(self):
--> 345 data = self._next_data()
346 self._num_yielded += 1
347 if self._dataset_kind == _DatasetKind.Iterable and \

~/opt/anaconda3/lib/python3.7/site-packages/torch/utils/data/DataLoader.py in _next_data(self)
383 def _next_data(self):
384 index = self._next_index() # may raise stopiteration
--> 385 data = self._dataset_fetcher.fetch(index) # may raise stopiteration
386 if self._pin_memory:
387 data = _utils.pin_memory.pin_memory(data)

~/opt/anaconda3/lib/python3.7/site-packages/torch/utils/data/_utils/fetch.py in fetch(self,possibly_batched_index)
45 else:
46 data = self.dataset[possibly_batched_index]
---> 47 return self.collate_fn(data)

~/opt/anaconda3/lib/python3.7/site-packages/sentence_transformers/SentenceTransformer.py in smart_batching_collate_text_only(self,batch)
421
422 for text in batch:
--> 423 sentence_features = self.get_sentence_features(text,max_seq_len)
424 for feature_name in sentence_features:
425 if feature_name not in feature_lists:

~/opt/anaconda3/lib/python3.7/site-packages/sentence_transformers/SentenceTransformer.py in get_sentence_features(self,*features)
320
321 def get_sentence_features(self,*features):
--> 322 return self._first_module().get_sentence_features(*features)
323
324 def get_sentence_embedding_dimension(self):

~/opt/anaconda3/lib/python3.7/site-packages/sentence_transformers/models/Transformer.py in get_sentence_features(self,tokens,pad_seq_length)
77 return self.tokenizer.prepare_for_model(tokens,max_length=pad_seq_length,padding='max_length',return_tensors='pt',truncation=True,prepend_batch_axis=True)
78 else:
---> 79 return self.tokenizer.prepare_for_model(tokens[0],tokens[1],truncation='longest_first',prepend_batch_axis=True)
80
81 def get_config_dict(self):

~/opt/anaconda3/lib/python3.7/site-packages/numpy/matrixlib/defmatrix.py in __getitem__(self,index)
191
192 try:
--> 193 out = N.ndarray.__getitem__(self,index)
194 finally:
195 self._getitem = False

IndexError: index 1 is out of bounds for axis 0 with size 1

解决方法

暂无找到可以解决该程序问题的有效方法,小编努力寻找整理中!

如果你已经找到好的解决方法,欢迎将解决方案带上本链接一起发送给小编。

小编邮箱:dio#foxmail.com (将#修改为@)