sea-small / sea_scenes /__main__.py
alexandros-petkos's picture
first commit
386005a
#!/usr/bin/env python3
from __future__ import annotations
import argparse
from pathlib import Path
from typing import Any
import numpy as np
import rerun as rr
import rerun.blueprint as rrb
from rerun.blueprint import archetypes as rrba
from rerun.blueprint.components import BackgroundKind
from .loaders import load_frames, load_depth, load_trajectory, load_body_data, load_hand_data
DESCRIPTION = """
# SEA Scenes
This example visualizes the [SEA dataset](https://huggingface.co/datasets/spatial-ai/sea-small) using Rerun.
Spatial Everyday Activities (SEA) is an egocentric dataset designed for training robotic foundation models.
It comprises approximately 10,000 hours of egocentric data collected by computer vision experts across a diverse range of locations in the US and EU.
""".strip()
Color = tuple[float, float, float, float]
SEQUENCE_ROOT = Path("./dataset")
AVAILABLE_SEQUENCES = [s.name for s in SEQUENCE_ROOT.iterdir() if s.is_dir()]
CAMERA_LEFT_ENTITY_PATH = "world/camera_left"
CAMERA_RIGHT_ENTITY_PATH = "world/camera_right"
TRAJECTORY_LEFT_ENTITY_PATH = "world/trajectory_left"
TRAJECTORY_RIGHT_ENTITY_PATH = "world/trajectory_right"
BODY_ENTITY_PATH = "world/body"
HAND_LEFT_ENTITY_PATH = "world/hand_left"
HAND_RIGHT_ENTITY_PATH = "world/hand_right"
BODY_CONNECTIONS = [
(6, 14), (14, 15), (15, 16), (16, 17), # Left arm
(6, 9), (9, 10), (10, 11), (11, 12), # Right arm
(2, 3), (3, 4), (4, 5), (5, 6), (6, 7), # Spine
]
HAND_CONNECTIONS = [
(1, 2), (2, 3), (3, 4), (4, 5), # Thumb
(1, 6), (6, 7), (7, 8), (8, 9), (9, 10), # Index
(1, 11), (11, 12), (12, 13), (13, 14), (14, 15), # Middle finger
(1, 16), (16, 17), (17, 18), (18, 19), (19, 20), # Ring finger
(1, 21), (21, 22), (22, 23), (23, 24), (24, 25), # Little finger
]
def log_camera(
intrinsics: np.ndarray,
translation: np.ndarray,
rotation_xyzw: np.ndarray,
entity_id: str,
) -> None:
"""Log a pinhole camera and its transform."""
w, h, fx, fy, cx, cy = intrinsics
intrinsic = np.array([[fx, 0.0, cx], [0.0, fy, cy], [0.0, 0.0, 1.0]], dtype=np.float32)
rr.log(
entity_id,
rr.Transform3D(
translation=translation,
rotation=rr.Quaternion(xyzw=rotation_xyzw),
),
)
rr.log(
entity_id,
rr.Pinhole(
image_from_camera=intrinsic,
resolution=[int(w), int(h)],
camera_xyz=rr.ViewCoordinates.LEFT_HAND_Y_UP,
image_plane_distance=2e-1,
),
)
def log_trajectory(
positions: list[list[float]],
entity_id: str,
color: Color = (1.0, 1.0, 1.0, 1.0),
radii: float = 0.0025,
) -> None:
"""Log a simple 3D trajectory as a line strip."""
strips = np.array(positions, dtype=np.float32)
rr.log(
entity_id,
rr.LineStrips3D(
strips=[strips],
colors=[color],
radii=[radii],
),
)
def log_keypoints(
keypoints: list[Any],
connections: list[tuple[int, int]],
entity_id: str,
color: Color = (1.0, 1.0, 1.0, 1.0),
radii: float = 0.0075,
) -> None:
"""Log a set of 3D keypoints as point primitives and connections."""
if not keypoints:
return
positions = np.array([[keypoint.position.x, keypoint.position.y, keypoint.position.z]
for keypoint in keypoints], dtype=np.float32)
rr.log(
f"{entity_id}/keypoints",
rr.Points3D(
positions=positions,
colors=[color],
radii=radii,
),
)
strips = np.array([[positions[connection[0]], positions[connection[1]]]
for connection in connections], dtype=np.float32)
rr.log(
f"{entity_id}/connections",
rr.LineStrips3D(
strips=strips,
colors=[color],
radii=[radii * 0.25],
),
)
def log_sea(sequence_path: Path) -> None:
"""
Logs SEA sequence data using Rerun.
Args:
----
sequence_path (Path):
The path to the SEA recording.
Returns
-------
None
"""
left_frames_path = sequence_path / "stereo" / "left_frames.dat"
right_frames_path = sequence_path / "stereo" / "right_frames.dat"
depth_path = sequence_path / "depth"
left_intrinsics_path = sequence_path / "stereo" / "left_intrinsics.txt"
right_intrinsics_path = sequence_path / "stereo" / "right_intrinsics.txt"
left_trajectory_path = sequence_path / "stereo" / "left_trajectory.bin"
right_trajectory_path = sequence_path / "stereo" / "right_trajectory.bin"
body_data_path = sequence_path / "body_data.bin"
hand_data_path = sequence_path / "hand_data.bin"
# Load frames
left_frames = load_frames(left_frames_path)
right_frames = load_frames(right_frames_path)
# Load depth
depth_frames = load_depth(depth_path)
# Load intrinsics
left_intrinsics = np.loadtxt(left_intrinsics_path)
right_intrinsics = np.loadtxt(right_intrinsics_path)
# Load trajectories
left_trajectory = load_trajectory(left_trajectory_path)
right_trajectory = load_trajectory(right_trajectory_path)
# Load body and hand data
body_data = load_body_data(body_data_path)
hand_data= load_hand_data(hand_data_path)
# World coordinate system
rr.log("world", rr.ViewCoordinates.LEFT_HAND_Y_UP, static=True)
# Log left and right images
for timestamp, image in left_frames:
rr.set_time("time", timestamp=timestamp * 1e-3)
rr.log(f"{CAMERA_LEFT_ENTITY_PATH}/bgr", rr.Image(image, color_model="BGR"))
for timestamp, image in right_frames:
rr.set_time("time", timestamp=timestamp * 1e-3)
rr.log(f"{CAMERA_RIGHT_ENTITY_PATH}/bgr", rr.Image(image, color_model="BGR"))
# Log depth
for timestamp, depth in depth_frames:
rr.set_time("time", timestamp=timestamp * 1e-3)
rr.log(f"{CAMERA_LEFT_ENTITY_PATH}/depth",
rr.DepthImage(depth, meter=1.0, colormap="viridis", depth_range=(0.0, 1.0)),
)
# Log left camera poses and trajectory
cumulative_xyz: list[list[float]] = []
for timestamp, pos, quat in left_trajectory:
rr.set_time("time", timestamp=timestamp * 1e-3)
log_camera(left_intrinsics, pos, quat, CAMERA_LEFT_ENTITY_PATH)
cumulative_xyz.append(pos.tolist())
log_trajectory(cumulative_xyz, TRAJECTORY_LEFT_ENTITY_PATH, color=(1.0, 1.0, 0.0, 1.0))
for timestamp, keypoints in body_data:
rr.set_time("time", timestamp=timestamp * 1e-3)
log_keypoints(keypoints, BODY_CONNECTIONS, BODY_ENTITY_PATH, color=(0.8, 0.0, 1.0, 1.0))
for timestamp, left_keypoints, right_keypoints in hand_data:
rr.set_time("time", timestamp=timestamp * 1e-3)
log_keypoints(left_keypoints, HAND_CONNECTIONS, HAND_LEFT_ENTITY_PATH, color=(0.3, 0.0, 0.4, 1.0))
log_keypoints(right_keypoints, HAND_CONNECTIONS, HAND_RIGHT_ENTITY_PATH, color=(0.3, 0.0, 0.4, 1.0))
def main() -> None:
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument(
"--sequence",
type=str,
choices=AVAILABLE_SEQUENCES,
default="0aeb0c00-ef9c-4325-b005-53ace076b641",
help="Sequence ID of the SEA dataset",
)
rr.script_add_args(parser)
args = parser.parse_args()
blueprint = rrb.Horizontal(
rrb.Spatial3DView(
name="3D",
origin="world",
background=rrba.Background(
kind=BackgroundKind.SolidColor,
color=[0, 0, 0, 255],
)
),
rrb.Vertical(
rrb.Spatial2DView(
name="Left",
origin=CAMERA_LEFT_ENTITY_PATH,
contents=["$origin/bgr"],
),
rrb.Spatial2DView(
name="Right",
origin=CAMERA_RIGHT_ENTITY_PATH,
contents=["$origin/bgr"],
),
rrb.Spatial2DView(
name="Depth",
origin=CAMERA_LEFT_ENTITY_PATH,
contents=["$origin/depth"],
),
name="2D",
),
)
rr.script_setup(args, "sea_scenes")
rr.send_blueprint(blueprint)
log_sea(SEQUENCE_ROOT / args.sequence)
rr.script_teardown(args)
if __name__ == "__main__":
main()