使用 SemTorch 库和 Fastai V2 进行图像分割预测

问题描述

我正在尝试训练一个用于图像语义分割的深度学习模型,我正在使用下面的代码来训练我的模型。但是,在保存最后一个训练模型后,我无法从测试集中获取预测图像掩码 - 我收到错误消息。

有什么建议吗?

from fastai.basics import *
from fastai.vision import models
from fastai.vision.all import *
from fastai.metrics import *
from fastai.data.all import *
from fastai.callback import *

# SemTorch
from semtorch import get_segmentation_learner

from pathlib import Path
import random
%matplotlib inline
import numpy as np
import os 
import cv2

number_of_the_seed = 2020
random.seed(number_of_the_seed)
set_seed(number_of_the_seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False

获取与图像相关的标签,在分割的情况下是一个掩码

# get_y_fn = lambda x: Path(str(x).replace("Images","Labels").replace(".png",".png"))
def get_y_fn(x): 

 return  Path(str(x).replace("Images",".png"))

def ParentSplitter(x):
    return Path(x).parent.name==test_name

加载不同类的代码。所有数据集具有相同的代码

codes = np.loadtxt('/content/drive/MyDrive/Skin Cancer Project/codes.txt',dtype=str)

name2id = {v:k for k,v in enumerate(codes)}
print(name2id)
void_code = name2id['Background']

def no_bg_acc(input,target):
   target =target.squeeze(1)
   mask = target!= void_code
   return (input.argmax(dim=1)[mask]==target[mask]).float().mean()

def segment_acc (input,target):
    target =target.squeeze(1)
    return (input.argmax(dim=1)==target).float().mean()

提前停止参数

monitor_training="valid_loss"
comp_training=np.less

monitor_evaluating="dice"
comp_evaluating=np.greater

patience=2


from albumentations import (
    Compose,OneOf,ElasticTransform,Griddistortion,Opticaldistortion,Flip,Rotate,Transpose,CLAHE,ShiftScaleRotate
)


class SegmentationAlbumentationsTransform(ItemTransform):
    split_idx = 0
    def __init__(self,aug): 
        self.aug = aug
    def encodes(self,x):
        img,mask = x
        aug = self.aug(image=np.array(img),mask=np.array(mask))
        return PILImage.create(aug["image"]),PILMask.create(aug["mask"])


class TargetMaskConvertTransform(ItemTransform):
    def __init__(self): 
        pass
    def encodes(self,mask = x
        
        #Convert to array
        mask = np.array(mask)

U-Net 模型

        # Change 255 for 1
        mask[mask==255]=1
        
        # Back to PILMask
        mask = PILMask.create(mask)
        return img,mask
    
transformPipeline=Compose([
                        Flip(p=0.5),Transpose(p=0.5),Rotate(p=0.40,limit=10)
                    ],p=1)

transformPipeline=SegmentationAlbumentationsTransform(transformPipeline)



learn = get_segmentation_learner(dls=dls,number_classes=2,segmentation_type="Semantic Segmentation",architecture_name="unet",backbone_name="resnet34",metrics=[segment_acc,no_bg_acc,Dice(),JaccardCoeff()],wd=1e-2,pretrained=True,normalize=True).to_fp16()


learn.lr_find() # find learning rate
learn.recorder # plot learning rate graph 


fname="unet-no-data-augmentation-before-unfreeze-WD-2-best"

callbacksFitBeforeUnfreeze = [
    ShowGraphCallback(),EarlyStoppingCallback(monitor=monitor_training,comp=comp_training,patience=patience),SaveModelCallback(monitor=monitor_training,every_epoch=False,fname=fname)  
]
lr = 1e-03 # pick a lr
learn.fit_one_cycle(10,slice(lr),cbs=callbacksFitBeforeUnfreeze,pct_start=0.3)


learn.load("unet-no-data-augmentation-before-unfreeze-WD-2-best")
learn.unfreeze()
learn.lr_find() # find learning rate
learn.recorder # plot learning rate graph


fname="unet-no-data-augmentation-after-unfreeze-WD-2-best"

callbacksFitAfterUnfreeze = [
    ShowGraphCallback(),fname=fname)  
]

learn.fit_one_cycle(1,slice(1e-7,1e-7),cbs=callbacksFitAfterUnfreeze)


learn.save('Trained_resnet34_Model.pkl') # save model
learn.load('Trained_resnet34_Model.pkl');

learn.export('Trained_resnet34_Model.pkl')

#*********this is the final trained model exported***********
!cp '/content/Trained_resnet34_Model.pkl' 'drive/My Drive/Skin Cancer Project/Trained_resnet34_Model.pkl'


path_img = 'dataset/test_set'
path_img


#*********this is the final trained model exported***********
!cp '/content/drive/MyDrive/Skin Cancer Project/Trained_resnet34_Model.pkl' 'samples_png/model/Trained_resnet34_Model.pkl'

加载训练好的模型

learn = load_learner('samples_png/model/Trained_resnet34_Model.pkl')

创建一个文件夹来保存测试掩码

!mkdir generated_samples_masks 
out_pred_samples_mask = 'generated_samples_masks'

然后,我尝试了以下代码来循环测试图像:

 for file in os.listdir(dst_folder):
  img = cv2.imread(dst_folder+file)
  mask = learn.predict(img)
  segmentaion = mask[2][0].numpy()
  segmentaion = 255-segmentaion*255
  cv2.imwrite(out_pred_samples_mask+'/'+file,segmentaion)

但是,我遇到了无法修复的错误。请问有什么办法可以解决吗?

ValueError                                Traceback (most recent call last)
<ipython-input-57-74c5d900a73a> in <module>()
      1 for file in os.listdir(dst_folder):
      2  img = cv2.imread(dst_folder+file)
----> 3  mask = learn.predict(img)
      4  segmentaion = mask[2][0].numpy()
      5  segmentaion = 255-segmentaion*255

20 frames
<ipython-input-34-9fcb09a919b6> in encodes(self,x)
      3         pass
      4     def encodes(self,x):
----> 5         img,mask = x
      6 
      7         #Convert to array

ValueError: not enough values to unpack (expected 2,got 1)

解决方法

暂无找到可以解决该程序问题的有效方法,小编努力寻找整理中!

如果你已经找到好的解决方法,欢迎将解决方案带上本链接一起发送给小编。

小编邮箱:dio#foxmail.com (将#修改为@)