Quantcast
Channel: Active questions tagged python - Stack Overflow
Viewing all articles
Browse latest Browse all 13891

3D RGB-D Panorama to 3D mesh

$
0
0

I am projecting a Panorama image back to 3D, however I am struggling with the projection.

enter image description here

Panorama

import argparseimport osimport cv2import numpy as npimport open3dclass PointCloudReader():    def __init__(self, resolution="full", random_level=0, generate_color=False, generate_normal=False):        self.random_level = random_level        self.resolution = resolution        self.generate_color = generate_color        self.point_cloud = self.generate_point_cloud(self.random_level, color=self.generate_color)    def generate_point_cloud(self, random_level=0, color=False, normal=False):        coords = []        colors = []        # Load and resize depth image        depth_image_path = 'DPT/output_monodepth/basel_stapfelberg_panorama.png'        depth_img = cv2.imread(depth_image_path, cv2.IMREAD_ANYDEPTH)        depth_img = cv2.resize(depth_img, (depth_img.shape[1] // 2, depth_img.shape[0] // 2))        # Load and resize RGB image        equirectangular_image = 'DPT/input/basel_stapfelberg_panorama.png'        rgb_img = cv2.imread(equirectangular_image)        rgb_img = cv2.resize(rgb_img, (depth_img.shape[1], depth_img.shape[0]))        # Define parameters for conversion        focal_length = depth_img.shape[1] / 2        sensor_width = 36        sensor_height = 24        y_ticks = np.deg2rad(np.arange(0, 360, 360 / depth_img.shape[1]))        x_ticks = np.deg2rad(np.arange(-90, 90, 180 / depth_img.shape[0]))        # Compute spherical coordinates        theta, phi = np.meshgrid(y_ticks, x_ticks)        depth = depth_img + np.random.random(depth_img.shape) * random_level        x_sphere = depth * np.cos(phi) * np.sin(theta)        y_sphere = depth * np.sin(phi)        z_sphere = depth * np.cos(phi) * np.cos(theta)        # Convert spherical coordinates to camera coordinates        x_cam = x_sphere.flatten()        y_cam = -y_sphere.flatten()        z_cam = z_sphere.flatten()        coords = np.stack((x_cam, y_cam, z_cam), axis=-1)        if color:            colors = rgb_img.reshape(-1, 3) / 255.0        points = {'coords': coords}        if color:            points['colors'] = colors        return points    def visualize(self):        pcd = open3d.geometry.PointCloud()        pcd.points = open3d.utility.Vector3dVector(self.point_cloud['coords'])        if self.generate_color:            pcd.colors = open3d.utility.Vector3dVector(self.point_cloud['colors'])        open3d.visualization.draw_geometries([pcd])def main(args):    reader = PointCloudReader(random_level=10, generate_color=True, generate_normal=False)    reader.visualize()if __name__ == "__main__":    main(None)

How can I get a valid 3d representation from the panorama? As we can see in my reprojection the distortion of the panorama affects my 3d scene.


Viewing all articles
Browse latest Browse all 13891

Trending Articles



<script src="https://jsc.adskeeper.com/r/s/rssing.com.1596347.js" async> </script>