added some visualization tools for lidar pointclouds

This commit is contained in:
Jan Kowalczyk
2024-04-19 13:38:09 +02:00
parent 0907c89cce
commit 0ee3bc82b5
4 changed files with 395 additions and 0 deletions

View File

@@ -0,0 +1,60 @@
from configargparse import ArgParser, YAMLConfigFileParser, ArgumentDefaultsRawHelpFormatter
from sys import exit
from open3d.geometry import Geometry
from open3d.visualization import Visualizer
from open3d.io import write_pinhole_camera_parameters
from pathlib import Path
from util import (
load_dataset_from_bag,
existing_file,
)
def create_camera_settings(geometry: Geometry, camera_config_output_json_path: Path) -> None:
vis = Visualizer()
vis.create_window()
vis.add_geometry(geometry)
vis.run()
parameters = vis.get_view_control().convert_to_pinhole_camera_parameters()
write_pinhole_camera_parameters(camera_config_output_json_path.as_posix(), parameters)
print(f"Written camera config to {camera_config_output_json_path.as_posix()}!")
vis.destroy_window()
def main() -> int:
parser = ArgParser(
config_file_parser_class=YAMLConfigFileParser,
default_config_files=["create_camera_settings_config.yaml"],
formatter_class=ArgumentDefaultsRawHelpFormatter,
description="Use an interactive window to move the camera and export the camera view at the moment of closing"
" the window into a JSON file",
)
parser.add_argument("--render-config-file", is_config_file=True, help="yaml config file path")
parser.add_argument("--input-bag-path", required=True, type=existing_file, help="path to bag file")
parser.add_argument(
"--camera-config-output-json-path",
default=Path("./saved_camera_settings.json"),
type=Path,
help="path the camera settings json file should be saved to",
)
parser.add_argument(
"--bag-pointcloud-index",
type=int,
default=0,
help="index of the frame inside the experiment bag that should be displayed in 3d",
)
args = parser.parse_args()
print("Creating camera settings!")
print("Move the view in the window to the desired camera position" " and then close the window using the ESC key!")
dataset = load_dataset_from_bag(args.input_bag_path)
open3d_pointcloud = dataset[args.bag_pointcloud_index].to_instance("open3d")
create_camera_settings(open3d_pointcloud, args.camera_config_output_json_path)
return 0
if __name__ == "__main__":
exit(main())

140
tools/render2d.py Normal file
View File

@@ -0,0 +1,140 @@
from configargparse import ArgParser, YAMLConfigFileParser, ArgumentDefaultsRawHelpFormatter
from sys import exit
from pathlib import Path
from pointcloudset import Dataset
from rich.progress import track
from pandas import DataFrame
from PIL import Image
import matplotlib.pyplot as plt
from util import (
load_dataset_from_bag,
existing_file,
create_video_from_images,
calculate_average_frame_rate,
get_colormap_with_special_missing_color,
)
def create_2d_projection(
df: DataFrame,
output_file_path: Path,
tmp_file_path: Path,
colormap_name: str,
missing_data_color: str,
reverse_colormap: bool,
):
fig, ax = plt.subplots(figsize=(20.48, 5.12))
ax.imshow(
df,
cmap=get_colormap_with_special_missing_color(colormap_name, missing_data_color, reverse_colormap),
aspect="auto",
)
ax.axis("off")
fig.subplots_adjust(left=0, right=1, top=1, bottom=0)
plt.savefig(tmp_file_path, dpi=100, bbox_inches="tight", pad_inches=0)
plt.close()
img = Image.open(tmp_file_path)
img_resized = img.resize((2048, 512), Image.LANCZOS)
img_resized.save(output_file_path)
def render_2d_images(
dataset: Dataset,
output_images_path: Path,
image_pattern_prefix: str,
tmp_files_path: Path,
colormap_name: str,
missing_data_color: str,
reverse_colormap: bool,
) -> list[Path]:
rendered_images = []
for i, pc in track(enumerate(dataset, 1), description="Rendering images...", total=len(dataset)):
pc.data["horizontal_position"] = pc.data["original_id"] % 2048
image_data = pc.data.pivot(index="ring", columns="horizontal_position", values="range")
normalized_data = (image_data - image_data.min().min()) / (image_data.max().max() - image_data.min().min())
image_path = create_2d_projection(
normalized_data,
output_images_path / f"{image_pattern_prefix}_frame_{i:04d}.png",
tmp_files_path / "tmp.png",
colormap_name,
missing_data_color,
reverse_colormap,
)
rendered_images.append(image_path)
return rendered_images
def main() -> int:
parser = ArgParser(
config_file_parser_class=YAMLConfigFileParser,
default_config_files=["render2d_config.yaml"],
formatter_class=ArgumentDefaultsRawHelpFormatter,
description="Render a 2d projection of a point cloud",
)
parser.add_argument("--render-config-file", is_config_file=True, help="yaml config file path")
parser.add_argument("--input-bag-path", required=True, type=existing_file, help="path to bag file")
parser.add_argument(
"--tmp-files-path", default=Path("./tmp"), type=Path, help="path temporary files will be written to"
)
parser.add_argument(
"--output-images", type=bool, default=True, help="if rendered frames should be outputted as images"
)
parser.add_argument(
"--output-images-path", default=Path("./output"), type=Path, help="path rendered frames should be written to"
)
parser.add_argument(
"--output-video", type=bool, default=True, help="if rendered frames should be outputted as a video"
)
parser.add_argument(
"--output-video-path",
default=Path("./output/2d_render.mp4"),
type=Path,
help="path rendered video should be written to",
)
parser.add_argument("--output-images-prefix", default="2d_render", type=str, help="filename prefix for output")
parser.add_argument("--colormap-name", default="viridis", type=str, help="name of matplotlib colormap to be used")
parser.add_argument(
"--missing-data-color", default="black", type=str, help="name of color to be used for missing data"
)
parser.add_argument("--reverse-colormap", default=True, type=bool, help="if colormap should be reversed")
args = parser.parse_args()
if args.output_images:
args.output_images_path.mkdir(parents=True, exist_ok=True)
args.tmp_files_path = args.output_images_path
else:
args.tmp_files_path.mkdir(parents=True, exist_ok=True)
if args.output_video:
args.output_video_path.parent.mkdir(parents=True, exist_ok=True)
dataset = load_dataset_from_bag(args.input_bag_path)
images = render_2d_images(
dataset,
args.tmp_files_path,
args.output_images_prefix,
args.tmp_files_path,
args.colormap_name,
args.missing_data_color,
args.reverse_colormap,
)
if args.output_video:
input_images_pattern = f"{args.tmp_files_path / args.output_images_prefix}_frame_%04d.png"
create_video_from_images(input_images_pattern, args.output_video_path, calculate_average_frame_rate(dataset))
if not args.output_images:
for image in images:
image.unlink()
return 0
if __name__ == "__main__":
exit(main())

128
tools/render3d.py Normal file
View File

@@ -0,0 +1,128 @@
from configargparse import ArgParser, YAMLConfigFileParser, ArgumentDefaultsRawHelpFormatter
from sys import exit
from open3d.visualization.rendering import OffscreenRenderer, MaterialRecord
from open3d.io import read_pinhole_camera_parameters, write_image
from open3d.utility import Vector3dVector
from pathlib import Path
from pointcloudset import Dataset
from rich.progress import track
import matplotlib.pyplot as plt
import numpy as np
from util import (
load_dataset_from_bag,
existing_file,
create_video_from_images,
calculate_average_frame_rate,
)
def render_3d_images(
dataset: Dataset,
camera_config_input_json_path: Path,
output_images_path: Path,
image_pattern_prefix,
) -> list[Path]:
camera_params = read_pinhole_camera_parameters("saved_camera_settings.json")
width, height = 1920, 1080
renderer = OffscreenRenderer(width, height)
renderer.setup_camera(
intrinsic_matrix=camera_params.intrinsic.intrinsic_matrix,
extrinsic_matrix=camera_params.extrinsic,
intrinsic_height_px=camera_params.intrinsic.height,
intrinsic_width_px=camera_params.intrinsic.width,
)
renderer.scene.set_background([1, 1, 1, 1])
def color_points_by_range(pcd):
points = np.asarray(pcd.points)
distances = np.linalg.norm(points, axis=1)
max_distance = distances.max()
min_distance = distances.min()
normalized_distances = (distances - min_distance) / (max_distance - min_distance)
colors = plt.get_cmap("jet")(normalized_distances)[:, :3]
pcd.colors = Vector3dVector(colors)
return pcd
rendered_images = []
for i, pc in track(enumerate(dataset, 1), description="Rendering images...", total=len(dataset)):
o3d_pc = pc.to_instance("open3d")
o3d_pc = color_points_by_range(o3d_pc)
renderer.scene.add_geometry("point_cloud", o3d_pc, MaterialRecord())
image_path = output_images_path / f"{image_pattern_prefix}_{i:04d}.png"
write_image(image_path.as_posix(), renderer.render_to_image())
renderer.scene.remove_geometry("point_cloud")
rendered_images.append(image_path)
return rendered_images
def main() -> int:
parser = ArgParser(
config_file_parser_class=YAMLConfigFileParser,
default_config_files=["render3d_config.yaml"],
formatter_class=ArgumentDefaultsRawHelpFormatter,
description="Render a 3d representation of a point cloud",
)
parser.add_argument("--render-config-file", is_config_file=True, help="yaml config file path")
parser.add_argument("--input-bag-path", required=True, type=existing_file, help="path to bag file")
parser.add_argument(
"--tmp-files-path", default=Path("./tmp"), type=Path, help="path temporary files will be written to"
)
parser.add_argument(
"--output-images", type=bool, default=True, help="if rendered frames should be outputted as images"
)
parser.add_argument(
"--output-images-path", default=Path("./output"), type=Path, help="path rendered frames should be written to"
)
parser.add_argument(
"--output-video", type=bool, default=True, help="if rendered frames should be outputted as a video"
)
parser.add_argument(
"--output-video-path",
default=Path("./output/2d_render.mp4"),
type=Path,
help="path rendered video should be written to",
)
parser.add_argument("--output-images-prefix", default="2d_render", type=str, help="filename prefix for output")
parser.add_argument(
"--camera-config-input-json-path",
default="./saved_camera_settings.json",
type=existing_file,
help="path to json file containing camera settings (can be created with the create_camera_settings.py script)",
)
args = parser.parse_args()
if args.output_images:
args.output_images_path.mkdir(parents=True, exist_ok=True)
args.tmp_files_path = args.output_images_path
else:
args.tmp_files_path.mkdir(parents=True, exist_ok=True)
if args.output_video:
args.output_video_path.parent.mkdir(parents=True, exist_ok=True)
dataset = load_dataset_from_bag(args.input_bag_path)
images = render_3d_images(
dataset, args.camera_config_input_json_path, args.tmp_files_path, args.output_images_prefix
)
if args.output_video:
input_images_pattern = f"{args.tmp_files_path / args.output_images_prefix}_%04d.png"
create_video_from_images(input_images_pattern, args.output_video_path, calculate_average_frame_rate(dataset))
if not args.output_images:
for image in images:
image.unlink()
return 0
if __name__ == "__main__":
exit(main())

67
tools/util.py Normal file
View File

@@ -0,0 +1,67 @@
from pointcloudset import Dataset
from pathlib import Path
from argparse import ArgumentTypeError
from subprocess import run
from datetime import timedelta
from matplotlib.colors import Colormap
from matplotlib import colormaps
def load_dataset_from_bag(bag_file_path: Path, pointcloud_topic: str = "/ouster/points") -> Dataset:
return Dataset.from_file(bag_file_path, topic=pointcloud_topic)
def calculate_average_frame_rate(dataset: Dataset):
timestamps = dataset.timestamps
time_deltas = [timestamps[i + 1] - timestamps[i] for i in range(len(timestamps) - 1)]
average_delta = sum(time_deltas, timedelta()) / len(time_deltas)
average_frame_rate = 1 / average_delta.total_seconds()
return average_frame_rate
def existing_file(path_string: str) -> Path:
path = Path(path_string)
if not path.exists():
raise ArgumentTypeError(f"{path} does not exist!")
if not path.is_file():
raise ArgumentTypeError(f"{path} is not a valid file!")
return path
def existing_folder(path_string: str) -> Path:
path = Path(path_string)
if not path.exists():
raise ArgumentTypeError(f"{path} does not exist!")
if not path.is_folder():
raise ArgumentTypeError(f"{path} is not a valid folder!")
return path
def get_colormap_with_special_missing_color(
colormap_name: str, missing_data_color: str = "black", reverse: bool = False
) -> Colormap:
colormap = colormaps[colormap_name] if not reverse else colormaps[f"{colormap_name}_r"]
colormap.set_bad(missing_data_color)
return colormap
def create_video_from_images(input_images_pattern: str, output_file: Path, frame_rate: int) -> None:
# Construct the ffmpeg command
command = [
"ffmpeg",
"-y",
"-framerate",
str(frame_rate),
"-i",
input_images_pattern,
"-c:v",
"libx264",
"-profile:v",
"high",
"-crf",
"20",
"-pix_fmt",
"yuv420p",
output_file.as_posix(),
]
run(command, check=True)