在 3D 模型中查找相机位置

问题描述

我是计算机视觉的新手,我遇到了一个难以解决的问题。我正在尝试从图像中获取 3D 模型中的相机位置。我有一个 ply 文件中的 3D 模型,一个作为 tiff 的正射影像,一个作为 xml 的相机校准数据和来自相机的 70 个图像。我需要获取此相机相对于我拥有图像的位置的位置。这些是航拍图像,相机正在接近地面。我为此目的使用了solvePnP,但我的结果不正确,我怎样才能随着距离的减小(Z 坐标)确定这个位置

这是我的代码

    import cv2
    import numpy as np        
    import glob
    import matplotlib.pyplot as plt
    from PIL import Image
    from plyfile import PlyData
    from osgeo import gdal,osr,ogr
    import matplotlib.animation as animation
    from mpl_toolkits.mplot3d import Axes3D
    import pandas as pd
    import yaml

def ORB(image_path):
    image = cv2.imread(image_path,cv2.IMREAD_UNCHANGED)
    image = cv2.cvtColor(image,cv2.COLOR_BGR2RGB)

    orb = cv2.ORB_create()
    kp,des = orb.detectAndCompute(image,None)
    
    return kp,des

def getCalibrationData():
    cv_file = cv2.FileStorage('path-to-myCalibrationFile.xml',cv2.FILE_STORAGE_READ)
    mtx = cv_file.getNode("Camera_Matrix").mat()
    dist = cv_file.getNode("distortion_Coefficients").mat()
    cv_file.release()

    return mtx,dist

def plot_image(image,label):
    plt.xticks([])
    plt.yticks([])
    plt.imshow(image,cmap='gray')
    plt.title(label)
    plt.show()

def read_ply_xyz(filename):
    with open(filename,'rb') as f:
        plydata = PlyData.read(f)
        num_verts = plydata['vertex'].count
        vertices = np.zeros(shape=[num_verts,3],dtype=np.float32)
        vertices[:,0] = plydata['vertex'].data['x']
        vertices[:,1] = plydata['vertex'].data['y']
        vertices[:,2] = plydata['vertex'].data['z']
    return vertices 

def rover_mars_position(fileName,mtx,dist,vertices):
    objPoints = []
    imgPoints = []

    frame = cv2.imread(fileName)

    kp_img,des_img = ORB(fileName) #Creating KP&DES for each image 
    orb = cv2.ORB_create()

    ds = gdal.Open('file-to-my-tiffFile.tif') #read tiff
    channel = np.array(ds.GetRasterBand(1).ReadAsArray())
    img = Image.fromarray(np.uint8(channel * 255),'L')
    open_cv_image = np.array(img) # create array image from tiff

    bf = cv2.BFMatcher(cv2.norM_HAMMING,crossCheck=True)
    kp_orto,des_orto = orb.detectAndCompute(open_cv_image,None) #ORB raster.tif

    matches = bf.match(des_orto,des_img)
    matches = sorted(matches,key = lambda x:x.distance)

    image3 = cv2.drawMatches(open_cv_image,kp_orto,frame,kp_img,matches[:10],None,flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
    plot_image(image3,'img')

    #Delete points out of image range
    for point in matches:
        if((kp_img[point.trainIdx].pt[0] > frame.shape[0] or kp_img[point.trainIdx].pt[1] > frame.shape[1]) or (kp_orto[point.trainIdx].pt[0] > channel.shape[0] or kp_orto[point.trainIdx].pt[1] > channel.shape[0])):
            matches.remove(point)

    #Create ImgPoints and ObjPoints
    for g in matches[:10]:
        imgPoints.append([kp_img[g.trainIdx].pt[0],kp_img[g.trainIdx].pt[1]])
        
        x = kp_orto[g.trainIdx].pt[0]
        y = kp_orto[g.trainIdx].pt[1]
        
        z = channel[np.int(x)][np.int(y)]

        objPoints.append([x,y,z])

    _,rVec,tVec = cv2.solvePnP(np.array(objPoints,dtype=np.float32),np.array(imgPoints,dist)
    Rt = cv2.Rodrigues(rVec)
    R = Rt[0].T

    pos = (-np.matrix(R)) * np.matrix(tVec)
    
    print("ROVER=(%4.2f,%4.2f,%4.2f)"%(pos[0],pos[1],pos[2]))
    data.append([np.int(pos[0]),np.int(pos[1]),np.int(pos[2])])

data = []

def main():
    
    images = glob.glob('mars-LVS-fotos/*.png') #plik z obrazami
    mtx,dist = getCalibrationData() #Camera calibration
    vertices = read_ply_xyz('path-to-my-ply-file.ply')

    for fileName in images:
        rover_mars_position(fileName,vertices) #Get rover position
        np.savetxt('data.csv',data,delimiter=',')

main()

解决方法

暂无找到可以解决该程序问题的有效方法,小编努力寻找整理中!

如果你已经找到好的解决方法,欢迎将解决方案带上本链接一起发送给小编。

小编邮箱:dio#foxmail.com (将#修改为@)

相关问答

Selenium Web驱动程序和Java。元素在(x,y)点处不可单击。其...
Python-如何使用点“。” 访问字典成员?
Java 字符串是不可变的。到底是什么意思?
Java中的“ final”关键字如何工作?(我仍然可以修改对象。...
“loop:”在Java代码中。这是什么,为什么要编译?
java.lang.ClassNotFoundException:sun.jdbc.odbc.JdbcOdbc...