Quantcast
Channel: Active questions tagged python - Stack Overflow
Viewing all articles
Browse latest Browse all 13861

Pytroch segmentation model(.pt) not converting to CoreML

$
0
0

According to apple article link we need Wrap the Model to Allow Tracing which is be followed the same.

class WrappedDeeplabv3Resnet1011(nn.Module):    def __init__(self):        super(WrappedDeeplabv3Resnet1011, self).__init__()        self.model = torch.load('/content/aircraft_best_model.pt',map_location ='cpu').eval()    def forward(self, x):        res = self.model(x)        # extract the tensor we want from the output dictionary        x = res['out']        return x

I see the my model doesn't have the key "Out" but I see the other keys like show below

[{'boxes': tensor([[ 510.2429,  229.1375, 1011.1587,  399.5730],        [ 550.1007,  202.8524, 1047.5089,  376.9215],        [ 457.9409,  196.4182,  947.7454,  412.4210],        [ 333.6804,  204.8605, 1073.0546,  442.6238]],       grad_fn=<StackBackward0>), 'labels': tensor([1, 2, 3, 1]), 'scores': tensor([0.0870, 0.0631, 0.0587, 0.0531], grad_fn=<IndexBackward0>)}]

when I apply any of these keys as output it throws the error as shown below

TypeError: list indices must be integers or slices, not str

my model eval() shown like below

FasterRCNN(  (transform): GeneralizedRCNNTransform(      Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])      Resize(min_size=(800,), max_size=1333, mode='bilinear')  )  (backbone): BackboneWithFPN(    (body): IntermediateLayerGetter(      (conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)      (bn1): FrozenBatchNorm2d(64, eps=0.0)      (relu): ReLU(inplace=True)      (maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)      (layer1): Sequential(        (0): Bottleneck(          (conv1): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)          (bn1): FrozenBatchNorm2d(64, eps=0.0)          (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)          (bn2): FrozenBatchNorm2d(64, eps=0.0)          (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)          (bn3): FrozenBatchNorm2d(256, eps=0.0)          (relu): ReLU(inplace=True)          (downsample): Sequential(            (0): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)            (1): FrozenBatchNorm2d(256, eps=0.0)          )        )        (1): Bottleneck(          (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)          (bn1): FrozenBatchNorm2d(64, eps=0.0)          (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)          (bn2): FrozenBatchNorm2d(64, eps=0.0)          (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)          (bn3): FrozenBatchNorm2d(256, eps=0.0)          (relu): ReLU(inplace=True)        )        (2): Bottleneck(          (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)          (bn1): FrozenBatchNorm2d(64, eps=0.0)          (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)          (bn2): FrozenBatchNorm2d(64, eps=0.0)          (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)          (bn3): FrozenBatchNorm2d(256, eps=0.0)          (relu): ReLU(inplace=True)        )      )      (layer2): Sequential(        (0): Bottleneck(          (conv1): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)          (bn1): FrozenBatchNorm2d(128, eps=0.0)          (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)          (bn2): FrozenBatchNorm2d(128, eps=0.0)          (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)          (bn3): FrozenBatchNorm2d(512, eps=0.0)          (relu): ReLU(inplace=True)          (downsample): Sequential(            (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)            (1): FrozenBatchNorm2d(512, eps=0.0)          )        )        (1): Bottleneck(          (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)          (bn1): FrozenBatchNorm2d(128, eps=0.0)          (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)          (bn2): FrozenBatchNorm2d(128, eps=0.0)          (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)          (bn3): FrozenBatchNorm2d(512, eps=0.0)          (relu): ReLU(inplace=True)        )        (2): Bottleneck(          (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)          (bn1): FrozenBatchNorm2d(128, eps=0.0)          (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)          (bn2): FrozenBatchNorm2d(128, eps=0.0)          (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)          (bn3): FrozenBatchNorm2d(512, eps=0.0)          (relu): ReLU(inplace=True)        )        (3): Bottleneck(          (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)          (bn1): FrozenBatchNorm2d(128, eps=0.0)          (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)          (bn2): FrozenBatchNorm2d(128, eps=0.0)          (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)          (bn3): FrozenBatchNorm2d(512, eps=0.0)          (relu): ReLU(inplace=True)        )      )      (layer3): Sequential(        (0): Bottleneck(          (conv1): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)          (bn1): FrozenBatchNorm2d(256, eps=0.0)          (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)          (bn2): FrozenBatchNorm2d(256, eps=0.0)          (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)          (bn3): FrozenBatchNorm2d(1024, eps=0.0)          (relu): ReLU(inplace=True)          (downsample): Sequential(            (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(2, 2), bias=False)            (1): FrozenBatchNorm2d(1024, eps=0.0)          )        )        (1): Bottleneck(          (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)          (bn1): FrozenBatchNorm2d(256, eps=0.0)          (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)          (bn2): FrozenBatchNorm2d(256, eps=0.0)          (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)          (bn3): FrozenBatchNorm2d(1024, eps=0.0)          (relu): ReLU(inplace=True)        )        (2): Bottleneck(          (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)          (bn1): FrozenBatchNorm2d(256, eps=0.0)          (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)          (bn2): FrozenBatchNorm2d(256, eps=0.0)          (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)          (bn3): FrozenBatchNorm2d(1024, eps=0.0)          (relu): ReLU(inplace=True)        )        (3): Bottleneck(          (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)          (bn1): FrozenBatchNorm2d(256, eps=0.0)          (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)          (bn2): FrozenBatchNorm2d(256, eps=0.0)          (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)          (bn3): FrozenBatchNorm2d(1024, eps=0.0)          (relu): ReLU(inplace=True)        )        (4): Bottleneck(          (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)          (bn1): FrozenBatchNorm2d(256, eps=0.0)          (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)          (bn2): FrozenBatchNorm2d(256, eps=0.0)          (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)          (bn3): FrozenBatchNorm2d(1024, eps=0.0)          (relu): ReLU(inplace=True)        )        (5): Bottleneck(          (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)          (bn1): FrozenBatchNorm2d(256, eps=0.0)          (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)          (bn2): FrozenBatchNorm2d(256, eps=0.0)          (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)          (bn3): FrozenBatchNorm2d(1024, eps=0.0)          (relu): ReLU(inplace=True)        )      )      (layer4): Sequential(        (0): Bottleneck(          (conv1): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)          (bn1): FrozenBatchNorm2d(512, eps=0.0)          (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)          (bn2): FrozenBatchNorm2d(512, eps=0.0)          (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)          (bn3): FrozenBatchNorm2d(2048, eps=0.0)          (relu): ReLU(inplace=True)          (downsample): Sequential(            (0): Conv2d(1024, 2048, kernel_size=(1, 1), stride=(2, 2), bias=False)            (1): FrozenBatchNorm2d(2048, eps=0.0)          )        )        (1): Bottleneck(          (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)          (bn1): FrozenBatchNorm2d(512, eps=0.0)          (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)          (bn2): FrozenBatchNorm2d(512, eps=0.0)          (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)          (bn3): FrozenBatchNorm2d(2048, eps=0.0)          (relu): ReLU(inplace=True)        )        (2): Bottleneck(          (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)          (bn1): FrozenBatchNorm2d(512, eps=0.0)          (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)          (bn2): FrozenBatchNorm2d(512, eps=0.0)          (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)          (bn3): FrozenBatchNorm2d(2048, eps=0.0)          (relu): ReLU(inplace=True)        )      )    )    (fpn): FeaturePyramidNetwork(      (inner_blocks): ModuleList(        (0): Conv2dNormActivation(          (0): Conv2d(256, 256, kernel_size=(1, 1), stride=(1, 1))        )        (1): Conv2dNormActivation(          (0): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1))        )        (2): Conv2dNormActivation(          (0): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1))        )        (3): Conv2dNormActivation(          (0): Conv2d(2048, 256, kernel_size=(1, 1), stride=(1, 1))        )      )      (layer_blocks): ModuleList(        (0-3): 4 x Conv2dNormActivation(          (0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))        )      )      (extra_blocks): LastLevelMaxPool()    )  )  (rpn): RegionProposalNetwork(    (anchor_generator): AnchorGenerator()    (head): RPNHead(      (conv): Sequential(        (0): Conv2dNormActivation(          (0): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))          (1): ReLU(inplace=True)        )      )      (cls_logits): Conv2d(256, 3, kernel_size=(1, 1), stride=(1, 1))      (bbox_pred): Conv2d(256, 12, kernel_size=(1, 1), stride=(1, 1))    )  )  (roi_heads): RoIHeads(    (box_roi_pool): MultiScaleRoIAlign(featmap_names=['0', '1', '2', '3'], output_size=(7, 7), sampling_ratio=2)    (box_head): TwoMLPHead(      (fc6): Linear(in_features=12544, out_features=1024, bias=True)      (fc7): Linear(in_features=1024, out_features=1024, bias=True)    )    (box_predictor): FastRCNNPredictor(      (cls_score): Linear(in_features=1024, out_features=6, bias=True)      (bbox_pred): Linear(in_features=1024, out_features=24, bias=True)    )  ))

How to convert the pytroch model to CoreML


Viewing all articles
Browse latest Browse all 13861

Trending Articles



<script src="https://jsc.adskeeper.com/r/s/rssing.com.1596347.js" async> </script>