问题描述
将pytorch模型转换为coreml后,预测结果差很多。你怎么看,可能有什么问题?在转换过程中,我收到警告:警告:根:在图形输出中检测到元组。这将在转换后的模型中展平。 和警告:root:函数 main 中 i32 类型的输出 var reduce_argmax_1 被强制转换为 fp32 类型。
我的模型:
class Net(nn.Module):
def __init__(self,NumClasses,PreTrainedModelPath="",UpdateEncoderBatchnormStatistics=True):
super(Net,self).__init__()
self.Encoder = densenet_cosine_264_k32.densenet_cosine_264_k32
if not PreTrainedModelPath=="":
self.Encoder.load_state_dict(torch.load(PreTrainedModelPath))
print ("Dense net encoder weights loaded")
if not UpdateEncoderBatchnormStatistics:
self.Encoder.eval()
self.SkipConnectionLayers=[2,12,28,96]#,147]
self.PSPScales = [1,1 / 2,1 / 4,1 / 8]
self.PSPLayers = nn.ModuleList()
for Ps in self.PSPScales:
self.PSPLayers.append(nn.Sequential(
nn.Conv2d(2688,1024,stride=1,kernel_size=3,padding=1,bias=True)))
self.PSPSqueeze = nn.Sequential(
nn.Conv2d(4096,512,kernel_size=1,padding=0,bias=False),nn.Batchnorm2d(512),nn.ReLU()
)
self.SkipConnections = nn.ModuleList()
self.SkipConnections.append(nn.Sequential(
nn.Conv2d(1152,nn.ReLU()))
self.SkipConnections.append(nn.Sequential(
nn.Conv2d(256,256,nn.Batchnorm2d(256),nn.ReLU()))
self.SqueezeUpsample = nn.ModuleList()
self.SqueezeUpsample.append(nn.Sequential(
nn.Conv2d(1024,nn.ReLU()
))
self.SqueezeUpsample.append(nn.Sequential(
nn.Conv2d(256+512,nn.ReLU()
))
self.FinalPrdiction=nn.Conv2d(256,bias=False)
self=self.cuda()
def forward(self,Images,EvalMode=False):
RGBMean = [123.68,116.779,103.939]
RGBStd = [65,65,65]
#InpImages = torch.autograd.Variable(torch.from_numpy(Images.astype(float)),requires_grad=False).transpose(2,3).transpose(1,2).type(torch.FloatTensor)
#InpImages = torch.autograd.Variable(torch.from_numpy(Images.astype(float)),2).type(torch.HalfTensor)
InpImages = torch.autograd.Variable(Images)
InpImages=InpImages.cuda()
for i in range(len(RGBMean)): InpImages[:,i,:,:]=(InpImages[:,:]-RGBMean[i])/RGBStd[i]
x=InpImages
SkipConFeatures=[]
for i in range(147):
x=self.Encoder[i](x)
if i in self.SkipConnectionLayers:
SkipConFeatures.append(x)
PSPSize=(x.shape[2],x.shape[3])
PSPFeatures=[]
for i,Layer in enumerate(self.PSPLayers):
NewSize=np.ceil(np.array(PSPSize)*self.PSPScales[i]).astype(np.int)
y = nn.functional.interpolate(x,tuple(NewSize),mode='bilinear')
y = Layer(y)
y = nn.functional.interpolate(y,PSPSize,mode='bilinear')
PSPFeatures.append(y)
x=torch.cat(PSPFeatures,dim=1)
x=self.PSPSqueeze(x)
for i in range(len(self.SkipConnections)):
sp=(SkipConFeatures[-1-i].shape[2],SkipConFeatures[-1-i].shape[3])
x=nn.functional.interpolate(x,size=sp,mode='bilinear')
x = torch.cat((self.SkipConnections[i](SkipConFeatures[-1-i]),x),dim=1)
x = self.SqueezeUpsample[i](x)
x = self.FinalPrdiction(x)
x = nn.functional.interpolate(x,size=InpImages.shape[2:4],mode='bilinear')
Prob=F.softmax(x,dim=1)
tt,Labels=x.max(1)
return Prob,Labels
Net = Net(NumClasses=3)
Net.load_state_dict(torch.load(Trained_model_path))
Net.eval()
import coremltools
mlmodel = coremltools.converters.convert(
traced_model,inputs=[coremltools.TensorType(shape=(1,3,255,255))],)
我期待任何建议
解决方法
转换模型时还需要指定以下内容:
RGBMean = [123.68,116.779,103.939]
RGBStd = [65,65,65]
这样做会使输入成为图像,而不是张量。有关实际语法,请参阅 coremltools 文档。