发生python scipy.optimize.fmin_l_bfgs_b grad错误

问题描述

我正在尝试使用l_bfgs重复一份对手示例论文(FeatureAdversary)。当我使用作者的g = src.diff[0].flatten().astype(float)时,会发生错误。 np.diff将形状更改为149856。

F:\anaconda3\lib\site-packages\scipy\optimize\lbfgsb.py in fmin_l_bfgs_b(func,x0,fprime,args,approx_grad,bounds,m,factr,pgtol,epsilon,iprint,maxfun,maxiter,disp,callback,maxls)
    197 
    198     res = _minimize_lbfgsb(fun,args=args,jac=jac,bounds=bounds,--> 199                            **opts)
    200     d = {'grad': res['jac'],201          'task': res['message'],F:\anaconda3\lib\site-packages\scipy\optimize\lbfgsb.py in _minimize_lbfgsb(fun,jac,maxcor,ftol,gtol,eps,maxls,**unkNown_options)
    326         _lbfgsb.setulb(m,x,low_bnd,upper_bnd,nbd,f,g,327                        pgtol,wa,iwa,task,csave,lsave,--> 328                        isave,dsave,maxls)
    329         task_str = task.tostring()
    330         if task_str.startswith(b'FG'):

ValueError: 0-th dimension must be fixed to 150528 but got 149856

这是原始代码

def calc_gstep(cs_x,net,g_feat,end,objective,verbose=True):
    src = net.blobs['data']  # input image is stored in Net's 'data' blob
    dst = net.blobs[end]     # Guide image
    src.data[0][...] = cs_x.reshape(src.data[0].shape)
    
    next_layer = get_next_layer(end,net)
    net.forward(end=next_layer)
    rfeat = dst.data[0].copy()
    net.forward(end=end)
    # specify the optimization objective
    obj = objective(dst,rfeat,verbose)
    net.backward(start=end)

    g = src.diff[0]

    return obj,g.flatten().astype(float)

我的损失函数

class Evaluator(object):
    def __init__(self):
        self.loss_value = None
        self.grads_values = None

    def loss(self,x):        
        #x shape is (150528,)
        #src shape is (1,3,224,224)
        src=Is_input.clone().numpy()
        src[0][...] = x.reshape(src[0].shape)

        x=torch.from_numpy(x.reshape(Ig_input.shape)).cuda().float()
        _,feat_x,_,_ = model(x)
        # minimize \\ q(I)-q(Ig)\\,feat_k_guide is q(Ig)
        loss = torch.dist(feat_x,feat_k_guide,p=2)
        self.loss_value = loss

        #here,change to np.diff(src)
        g = np.diff(src)
        self.grad_values = g.flatten().astype(float)

        print(self.loss_value)
        return self.loss_value

    def grads(self,x):
        assert self.loss_value is not None
        grad_values = np.copy(self.grad_values)
        self.loss_value = None
        self.grad_values = None
        return grad_values

x,min_val,info = fmin_l_bfgs_b(evaluator.loss,Is_input.numpy().flatten(),fprime=evaluator.grads
                                )

使用np.diff函数后,形状变为[1,223]并引起问题。我应该采取什么研究生职能?有人可以看看吗?谢谢!

解决方法

暂无找到可以解决该程序问题的有效方法,小编努力寻找整理中!

如果你已经找到好的解决方法,欢迎将解决方案带上本链接一起发送给小编。

小编邮箱:dio#foxmail.com (将#修改为@)