I am working on Image Segmentation using UNet model.I am facing the following error in the dataloader part.
RuntimeError: permute(sparse_coo): number of dimensions in the tensor input does not match the length of the desired ordering of dimensions i.e. input.dim() = 4 is not equal to len(dims) = 3
I guess the error says that some of the input images have channels = 4. However I have tried to check the shape of input images right after reading the images. All the input images have 3 channels.
This is my dataloader class:
class MyDataloader(torch.utils.data.Dataset): def init( self, df, dir_path ): self.df = df self.dir_path = dir_pathdef len(self): return len(self.df)def getitem(self, idx):cur_index = self.df.iloc[idx]img_path = os.path.join( self.dir_path, cur_index[0] ) ## image pathgt_path = os.path.join( self.dir_path, cur_index[1] ) ## gt pathimg = Image.open(img_path)gt = Image.open(gt_path)# Convert the image and gt to a NumPy arrayimg = np.array(img)gt = np.array(gt).astype(np.float32)# define transformtransform = A.Compose([ A.Resize(height=400, width=400), A.RandomRotate90(), A.Transpose(), A.ShiftScaleRotate(shift_limit=0.0625, scale_limit=0.50, rotate_limit=45, p=.75), A.Blur(blur_limit=3), A.OpticalDistortion(), A.GridDistortion(), A.HueSaturationValue() # A.Normalize( mean = (0.485, 0.456, 0.406), std = (0.229, 0.224, 0.225) ) ])# apply transformaugmented = transform( image = img, mask = gt )augmented_image = augmented['image']augmented_gt = augmented['mask']return torch.tensor(augmented_image).permute(2, 0, 1).float(), torch.tensor(augmented_gt).unsqueeze(1).permute(1, 0, 2)batch_size = 20# create train dataloader --train_dl = torch.utils.data.DataLoader( train_ds, batch_size = batch_size, drop_last = True, shuffle = True, num_workers = 4