CARS logo

Dense dsm step by step from sensor images¶

This notebook run step by step the dense dsm pipeline, from sensor images inputs to terrain DSM for one pair.

Imports¶

In [1]:
# Notebook local imports

import os
import math
###
# Silent OTB info logs
os.environ['OTB_LOGGER_LEVEL']='WARNING'
import warnings
# Filter warnings
warnings.filterwarnings("ignore",category=UserWarning)
# import pretty print
import pprint 
pp = pprint.PrettyPrinter(indent=4)

# import external function for notebook
from notebook_helpers import get_full_data, show_data, save_data, get_dir_path, set_up_demo_inputs
In [2]:
# CARS imports

# Applications
from cars.applications.application import Application
from cars.applications.grid_generation import grid_correction
from cars.applications.sparse_matching import sparse_matching_tools

# Pipelines
import cars.pipelines.sensor_to_dense_dsm.sensor_dense_dsm_constants as sens_cst
from cars.pipelines.sensor_to_dense_dsm import sensors_inputs
from cars.pipelines.sensor_to_dense_dsm import dsm_output

# Conf, core, orchestrator
from cars.core import cars_logging
from cars.core import inputs, preprocessing
from cars.core.utils import safe_makedirs
from cars.orchestrator import orchestrator
from cars.core.utils import make_relative_path_absolute
In [3]:
# Show CARS version
from cars import __version__
print("CARS version used : {}".format(__version__))
CARS version used : 0.7.0

Inputs/Outputs/Applications Init¶

Define outputs¶

In [4]:
# Modify with your own output path if needed
output_dir = os.path.join(get_dir_path(), "output_notebook")
print(output_dir)
/home/vboxuser/Development/discover-cnes-3d-tools/cars/output_notebook

Define inputs¶

In [5]:
# By default, the notebook use data_gizeh_small.tar.bz2, data_gizeh is available also (performance dependent).
# For you own data: Modify input_dir_path and modify all images, geometric models and color file names below
input_dir_path = set_up_demo_inputs("data_gizeh_small")

inputs_conf = {
    "sensors": {
        "left": {
            "image": os.path.join(input_dir_path, "img1.tif"),
            "geomodel": os.path.join(input_dir_path, "img1.geom"),
            "color": os.path.join(input_dir_path, "color1.tif"),
            "no_data": 0,


        },
        "right": {
            "image": os.path.join(input_dir_path, "img2.tif"),
            "geomodel": os.path.join(input_dir_path, "img2.geom"),
            "no_data": 0,
        },   
    },
    "pairing": [["left", "right"]],
    "initial_elevation": os.path.join(input_dir_path, "srtm_dir", "N29E031_KHEOPS.tif")
}

inputs = sensors_inputs.sensors_check_inputs(inputs_conf)
# pp.pprint(inputs)
{   'check_inputs': False,
    'default_alt': 0,
    'epipolar_a_priori': {},
    'epsg': None,
    'geoid': '/home/vboxuser/Development/cars/cars/pipelines/sensor_to_dense_dsm/../../conf/geoid/egm96.grd',
    'initial_elevation': '/home/vboxuser/Development/discover-cnes-3d-tools/cars/data_gizeh_small/srtm_dir/N29E031_KHEOPS.tif',
    'pairing': [['left', 'right']],
    'roi': None,
    'sensors': {   'left': {   'classification': None,
                               'color': '/home/vboxuser/Development/discover-cnes-3d-tools/cars/data_gizeh_small/color1.tif',
                               'geomodel': '/home/vboxuser/Development/discover-cnes-3d-tools/cars/data_gizeh_small/img1.geom',
                               'geomodel_filters': None,
                               'geomodel_type': 'RPC',
                               'image': '/home/vboxuser/Development/discover-cnes-3d-tools/cars/data_gizeh_small/img1.tif',
                               'mask': None,
                               'no_data': 0},
                   'right': {   'classification': None,
                                'color': '/home/vboxuser/Development/discover-cnes-3d-tools/cars/data_gizeh_small/img2.tif',
                                'geomodel': '/home/vboxuser/Development/discover-cnes-3d-tools/cars/data_gizeh_small/img2.geom',
                                'geomodel_filters': None,
                                'geomodel_type': 'RPC',
                                'image': '/home/vboxuser/Development/discover-cnes-3d-tools/cars/data_gizeh_small/img2.tif',
                                'mask': None,
                                'no_data': 0}},
    'use_epipolar_a_priori': False}

Applications Init¶

GridGeneration¶

This application generates epipolar grids corresponding to sensor pair

In [6]:
epipolar_grid_generation_application = Application("grid_generation")

Resampling¶

This application generates epipolar images from epipolar grids

In [7]:
resampling_application = Application("resampling")

SparseMatching¶

This application generates sparse matches of stereo images pairs

In [8]:
sparse_matching_application = Application("sparse_matching")

DenseMatching¶

This application generates dense matches of stereo images pairs

In [9]:
dense_matching_application = Application("dense_matching")

Show used application configuration¶

In [10]:
# Example with dense matching application
# dense_matching_application.print_config()
{   'epipolar_tile_margin_in_percent': 60,
    'generate_performance_map': False,
    'loader': 'pandora',
    'loader_conf': OrderedDict([   (   'input',
                                       {   'nodata_left': -9999,
                                           'nodata_right': -9999}),
                                   (   'pipeline',
                                       {   'cost_volume_confidence': {   'confidence_method': 'ambiguity',
                                                                         'eta_max': 0.7,
                                                                         'eta_step': 0.01,
                                                                         'indicator': ''},
                                           'disparity': {   'disparity_method': 'wta',
                                                            'invalid_disparity': nan},
                                           'filter': {   'filter_method': 'median',
                                                         'filter_size': 3},
                                           'matching_cost': {   'matching_cost_method': 'census',
                                                                'subpix': 1,
                                                                'window_size': 5},
                                           'optimization': {   'min_cost_paths': False,
                                                               'optimization_method': 'sgm',
                                                               'overcounting': False,
                                                               'penalty': {   'P1': 8,
                                                                              'P2': 32,
                                                                              'p2_method': 'constant',
                                                                              'penalty_method': 'sgm_penalty'},
                                                               'sgm_version': 'c++',
                                                               'use_confidence': False},
                                           'refinement': {   'refinement_method': 'vfit'},
                                           'right_disp_map': {   'method': 'accurate'},
                                           'validation': {   'cross_checking_threshold': 1.0,
                                                             'validation_method': 'cross_checking'}})]),
    'max_elevation_offset': None,
    'max_epi_tile_size': 1500,
    'method': 'census_sgm',
    'min_elevation_offset': None,
    'min_epi_tile_size': 300,
    'perf_ambiguity_threshold': 0.6,
    'perf_eta_max_ambiguity': 0.99,
    'perf_eta_max_risk': 0.25,
    'perf_eta_step': 0.04,
    'save_disparity_map': False}

Triangulation¶

This application triangulates matches, in order to get each (X, Y, Z) point position

In [11]:
triangulation_application = Application("triangulation")

PointCloudFusion¶

This application performs the fusion of epipolar points from pairs to a terrain point cloud

In [12]:
pc_fusion_application = Application("point_cloud_fusion")

PointCloudOutliersRemoving : small components¶

This application removes outliers points. The method used is the "small components removing"

In [13]:
conf_outlier_removing_small_components = {"method": "small_components", "activated": True}
pc_outlier_removing_small_comp_application = Application("point_cloud_outliers_removing", cfg=conf_outlier_removing_small_components)

PointCloudOutliersRemoving : statistical¶

This application removes outliers points. The method used is the "statistical removing"

In [14]:
conf_outlier_removing_small_statistical = {"method": "statistical", "activated": True}
pc_outlier_removing_stats_application = Application("point_cloud_outliers_removing", cfg=conf_outlier_removing_small_statistical)

PointCloudRasterization¶

This application performs the rasterization of a terrain point cloint.

In [15]:
conf_rasterization = { 
    "method": "simple_gaussian",
    "dsm_radius": 3,
    "sigma": 0.3
}
rasterization_application = Application("point_cloud_rasterization", cfg=conf_rasterization)

Create orchestrator¶

In [16]:
# Use sequential mode in notebook
orchestrator_conf = {"mode": "sequential"}
cars_orchestrator = orchestrator.Orchestrator(orchestrator_conf=orchestrator_conf, out_dir=output_dir)

Run pipeline step by step from sensors to DSM¶

Sensors images generation¶

From input configuration "inputs" seen before

In [17]:
_, sensor_image_left, sensor_image_right = sensors_inputs.generate_inputs(inputs)[0]

Grid Generation : epipolar grid generation¶

In [18]:
grid_left, grid_right = epipolar_grid_generation_application.run(
    sensor_image_left,
    sensor_image_right,
    orchestrator=cars_orchestrator,
    srtm_dir=inputs[sens_cst.INITIAL_ELEVATION],
    default_alt=inputs[sens_cst.DEFAULT_ALT],
    geoid_path=inputs[sens_cst.GEOID],
)

Resampling : epipolar images generation¶

In [19]:
epipolar_image_left, epipolar_image_right = resampling_application.run(
    sensor_image_left,
    sensor_image_right,
    grid_left,
    grid_right,
    orchestrator=cars_orchestrator,
    margins=sparse_matching_application.get_margins()
)

Show epipolar image¶

In [20]:
data_image_left = get_full_data(epipolar_image_left, "im")
show_data(data_image_left, mode="image")

Sparse matching: compute sifts¶

In [21]:
epipolar_matches_left, _ = sparse_matching_application.run(
    epipolar_image_left,
    epipolar_image_right,
    grid_left.attributes["disp_to_alt_ratio"],
    orchestrator=cars_orchestrator
)

Grid correction: correct epipolar grids from sparse matches¶

Find correction to apply, and generate new right epipolar grid

Filter matches¶

In [22]:
matches_array = sparse_matching_application.filter_matches(epipolar_matches_left, orchestrator=cars_orchestrator)

Estimate grid correction¶

In [23]:
grid_correction_coef, corrected_matches_array, _, _, _ = grid_correction.estimate_right_grid_correction(matches_array, grid_right)

Correct right grid¶

In [24]:
corrected_grid_right = grid_correction.correct_grid(grid_right, grid_correction_coef)

Estimate disp min and disp max from sparse matches¶

In [25]:
dmin, dmax = sparse_matching_tools.compute_disp_min_disp_max(
                    sensor_image_left,
                    sensor_image_right,
                    grid_left,
                    corrected_grid_right,
                    grid_right,
                    corrected_matches_array,
                    orchestrator=cars_orchestrator,
                    disp_margin=(
                        sparse_matching_application.get_disparity_margin()
                    ),
                    disp_to_alt_ratio=grid_left.attributes["disp_to_alt_ratio"],
                    geometry_loader=triangulation_application.get_geometry_loader(),
                    srtm_dir=inputs[sens_cst.INITIAL_ELEVATION],
                    default_alt=inputs[sens_cst.DEFAULT_ALT],
)

Compute margins used in dense matching, with corresponding disparity min and max¶

In [26]:
dense_matching_margins, disp_min, disp_max = dense_matching_application.get_margins(
    grid_left, disp_min=dmin, disp_max=dmax)

Resampling: generate epipolar images with corrected grids and new margins¶

In [27]:
new_epipolar_image_left, new_epipolar_image_right = resampling_application.run(
    sensor_image_left,
    sensor_image_right,
    grid_left,
    corrected_grid_right,
    orchestrator=cars_orchestrator,
    margins=dense_matching_margins,
    optimum_tile_size=(
        dense_matching_application.get_optimal_tile_size(
            disp_min, 
            disp_max,
            cars_orchestrator.cluster.checked_conf_cluster[
                "max_ram_per_worker"
            ],
        )
    ),
    add_color=True,
)

Dense Matching: compute disparities with pandora¶

In [28]:
epipolar_disparity_map = dense_matching_application.run(
    new_epipolar_image_left,
    new_epipolar_image_right,
    orchestrator=cars_orchestrator,
    disp_min=disp_min,
    disp_max=disp_max,
)

Show full disparity map¶

In [29]:
data_disparity = get_full_data(epipolar_disparity_map, "disp")
show_data(data_disparity)

Compute epsg

In [30]:
epsg = preprocessing.compute_epsg(
    sensor_image_left, 
    sensor_image_right,
    grid_left,
    corrected_grid_right,
    triangulation_application.get_geometry_loader(),
    orchestrator=cars_orchestrator,
    srtm_dir=inputs[sens_cst.INITIAL_ELEVATION],
    default_alt=inputs[sens_cst.DEFAULT_ALT],
    disp_min=disp_min,
    disp_max=disp_max
)

Triangulation : triangulate matches¶

In [31]:
epipolar_points_cloud = triangulation_application.run(
    sensor_image_left,
    sensor_image_right,
    new_epipolar_image_left,
    grid_left,
    corrected_grid_right,
    epipolar_disparity_map,
    epsg,
    orchestrator=cars_orchestrator,
    uncorrected_grid_right=grid_right,
    geoid_path=inputs[sens_cst.GEOID],
    disp_min=disp_min,
    disp_max=disp_max,
)

Compute terrain bounding box¶

In [32]:
current_terrain_roi_bbox = preprocessing.compute_terrain_bbox(
    inputs[sens_cst.INITIAL_ELEVATION],
    inputs[sens_cst.DEFAULT_ALT],
    inputs[sens_cst.GEOID],
    sensor_image_left,
    sensor_image_right,
    new_epipolar_image_left,
    grid_left,
    corrected_grid_right,
    epsg,
    triangulation_application.get_geometry_loader(),
    resolution=rasterization_application.get_resolution(),
    disp_min=disp_min,
    disp_max=disp_max,
    orchestrator=cars_orchestrator
)
terrain_bounds, optimal_terrain_tile_width = preprocessing.compute_terrain_bounds(
    [current_terrain_roi_bbox],
    resolution=rasterization_application.get_resolution()
)

Transform point cloud to terrain point cloud¶

In [33]:
merged_points_clouds = pc_fusion_application.run(
    [epipolar_points_cloud],
    terrain_bounds,
    epsg,
    orchestrator=cars_orchestrator,
    margins=rasterization_application.get_margins(),
    optimal_terrain_tile_width=optimal_terrain_tile_width
)

Point Cloud Outlier Removing : remove points with small components removing method¶

In [34]:
filtered_sc_merged_points_clouds = pc_outlier_removing_small_comp_application.run(
    merged_points_clouds,
    orchestrator=cars_orchestrator,
)    

Point Cloud Outlier Removing: remove points with statistical removing method¶

In [35]:
filtered_stats_merged_points_clouds = pc_outlier_removing_stats_application.run(
    filtered_sc_merged_points_clouds,
    orchestrator=cars_orchestrator,
)

Rasterization : rasterize point cloud¶

In [36]:
dsm = rasterization_application.run(
    filtered_stats_merged_points_clouds,
    epsg,
    orchestrator=cars_orchestrator
)

Show DSM¶

In [37]:
data_dsm = get_full_data(dsm, "hgt")
show_data(data_dsm, mode="dsm")

Show ortho image¶

In [38]:
data_ortho = get_full_data(dsm, "img")[..., 0:3]
show_data(data_ortho, mode='image')

Save DSM¶

In [39]:
save_data(dsm, os.path.join(output_dir, "dsm.tif"), "hgt")