如何在包含 pytorch 中的负数据的数据集上训练 fast-rcnn

问题描述

我正在尝试训练 torchvision Faster R-CNN 模型,以便在我的自定义数据上进行对象检测。我在 torchvision 对象检测微调 tutorial 中使用了代码。但收到此错误

Expected target Boxes to be a tensor of shape [N,4],got torch.Size([0])

这与我的自定义数据集中的负数据(空训练图像/无边界框)有关。我们如何更改下面的 Dataset class 以在包括负数据的数据集上启用训练 fast-rcnn?

class MyCustomDataset(Dataset):

    def __init__(self,root,transforms):
        self.root = root
        self.transforms = transforms
        # load all image files,sorting them to
        # ensure that they are aligned
        self.imgs = list(sorted(os.listdir(os.path.join(root,"PNGImages"))))
        self.masks = list(sorted(os.listdir(os.path.join(root,"PedMasks"))))

    def __len__(self):
        return len(self.imgs)

    def __getitem__(self,idx):
        # load images ad masks
        img_path = os.path.join(self.root,"PNGImages",self.imgs[idx])
        mask_path = os.path.join(self.root,"PedMasks",self.masks[idx])
        img = Image.open(img_path).convert("RGB")
        # note that we haven't converted the mask to RGB,# because each color corresponds to a different instance
        # with 0 being background
        mask = Image.open(mask_path)
        # convert the PIL Image into a numpy array
        mask = np.array(mask)
        # instances are encoded as different colors
        obj_ids = np.unique(mask)
        # first id is the background,so remove it
        obj_ids = obj_ids[1:]

        # split the color-encoded mask into a set of binary masks
        masks = mask == obj_ids[:,None,None]

        # get bounding Box coordinates for each mask
        num_objs = len(obj_ids)
        
        Boxes = []
        for i in range(num_objs):
            pos = np.where(masks[i])
            xmin = np.min(pos[1])
            xmax = np.max(pos[1])
            ymin = np.min(pos[0])
            ymax = np.max(pos[0])
            Boxes.append([xmin,ymin,xmax,ymax])

        # convert everything into a torch.Tensor
        Boxes = torch.as_tensor(Boxes,dtype=torch.float32)      
        # there is only one class  
        labels = torch.ones((num_objs,),dtype=torch.int64)
        image_id = torch.tensor([idx])
        area = (Boxes[:,3] - Boxes[:,1]) * (Boxes[:,2] - Boxes[:,0])
        # suppose all instances are not crowd
        iscrowd = torch.zeros((num_objs,dtype=torch.int64)

        target = {}
        target["Boxes"] = Boxes
        target["labels"] = labels
        target["image_id"] =  torch.tensor([idx])
        target["area"] = area
        target["iscrowd"] = iscrowd

        return img,target 

解决方法

我们需要对数据集类进行两项更改

1- 空框输入为:

if num_objs == 0:
    boxes = torch.zeros((0,4),dtype=torch.float32)
else:
    boxes = torch.as_tensor(boxes,dtype=torch.float32)

2-为空边界框分配area=0,更改用于计算面积的代码,并将其设为torch tensor

area = 0
for i in range(num_objs):
    pos = np.where(masks[i])
    xmin = np.min(pos[1])
    xmax = np.max(pos[1])
    ymin = np.min(pos[0])
    ymax = np.max(pos[0])
    area += (xmax-xmin)*(ymax-ymin)
area = torch.as_tensor(area,dtype=torch.float32)

我们将在现有的 for 循环中加入第 2 步。

因此,修改后的数据集类将如下所示:

class MyCustomDataset(Dataset):

    def __init__(self,root,transforms):
        self.root = root
        self.transforms = transforms
        # load all image files,sorting them to
        # ensure that they are aligned
        self.imgs = list(sorted(os.listdir(os.path.join(root,"PNGImages"))))
        self.masks = list(sorted(os.listdir(os.path.join(root,"PedMasks"))))

    def __len__(self):
        return len(self.imgs)

    def __getitem__(self,idx):
        # load images ad masks
        img_path = os.path.join(self.root,"PNGImages",self.imgs[idx])
        mask_path = os.path.join(self.root,"PedMasks",self.masks[idx])
        img = Image.open(img_path).convert("RGB")
        # note that we haven't converted the mask to RGB,# because each color corresponds to a different instance
        # with 0 being background
        mask = Image.open(mask_path)
        # convert the PIL Image into a numpy array
        mask = np.array(mask)
        # instances are encoded as different colors
        obj_ids = np.unique(mask)
        # first id is the background,so remove it
        obj_ids = obj_ids[1:]

        # split the color-encoded mask into a set of binary masks
        masks = mask == obj_ids[:,None,None]

        # get bounding box coordinates for each mask
        num_objs = len(obj_ids)
        
        boxes = []
        area = 0 
        for i in range(num_objs):
            pos = np.where(masks[i])
            xmin = np.min(pos[1])
            xmax = np.max(pos[1])
            ymin = np.min(pos[0])
            ymax = np.max(pos[0])
            boxes.append([xmin,ymin,xmax,ymax])
            area += (xmax-xmin)*(ymax-ymin)
        area = torch.as_tensor(area,dtype=torch.float32)

        # Handle empty bounding boxes
        if num_objs == 0:
            boxes = torch.zeros((0,dtype=torch.float32)
        else:
            boxes = torch.as_tensor(boxes,dtype=torch.float32)   

        # there is only one class  
        labels = torch.ones((num_objs,),dtype=torch.int64)
        image_id = torch.tensor([idx])

        #area = (boxes[:,3] - boxes[:,1]) * (boxes[:,2] - boxes[:,0])

        # suppose all instances are not crowd
        iscrowd = torch.zeros((num_objs,dtype=torch.int64)

        target = {}
        target["boxes"] = boxes
        target["labels"] = labels
        target["image_id"] =  torch.tensor([idx])
        target["area"] = area
        target["iscrowd"] = iscrowd

        return img,target