diff --git a/README.md b/README.md index ae91ecfbaf..572bf73605 100644 --- a/README.md +++ b/README.md @@ -148,11 +148,9 @@ Modules are subsystems on a robot that operate autonomously and communicate with ```py import threading, time, numpy as np -from dimos.core import In, Module, Out, rpc -from dimos.core.blueprints import autoconnect +from dimos.core import In, Module, Out, rpc, autoconnect from dimos.msgs.geometry_msgs import Twist -from dimos.msgs.sensor_msgs import Image -from dimos.msgs.sensor_msgs.Image import ImageFormat +from dimos.msgs.sensor_msgs import Image, ImageFormat class RobotConnection(Module): cmd_vel: In[Twist] @@ -195,10 +193,9 @@ Blueprints can be composed, remapped, and have transports overridden if `autocon A blueprint example that connects the image stream from a robot to an LLM Agent for reasoning and action execution. ```py -from dimos.core.blueprints import autoconnect -from dimos.core.transport import LCMTransport +from dimos.core import autoconnect, LCMTransport from dimos.msgs.sensor_msgs import Image -from dimos.robot.unitree.connection.go2 import go2_connection +from dimos.robot.unitree.go2.connection import go2_connection from dimos.agents.agent import llm_agent blueprint = autoconnect( @@ -207,7 +204,8 @@ blueprint = autoconnect( ).transports({("color_image", Image): LCMTransport("/color_image", Image)}) # Run the blueprint -blueprint.build().loop() +if __name__ == "__main__": + blueprint.build().loop() ``` # Development diff --git a/dimos/agents/__init__.py b/dimos/agents/__init__.py index f058f24d2f..ab3c67cfa5 100644 --- a/dimos/agents/__init__.py +++ b/dimos/agents/__init__.py @@ -1,18 +1,12 @@ -from dimos.agents.agent import Agent, deploy -from dimos.agents.spec import AgentSpec -from dimos.agents.vlm_agent import VLMAgent -from dimos.agents.vlm_stream_tester import VlmStreamTester -from dimos.protocol.skill.skill import skill -from dimos.protocol.skill.type import Output, Reducer, Stream +import lazy_loader as lazy -__all__ = [ - "Agent", - "AgentSpec", - "Output", - "Reducer", - "Stream", - "VLMAgent", - "VlmStreamTester", - "deploy", - "skill", -] +__getattr__, __dir__, __all__ = lazy.attach( + __name__, + submod_attrs={ + "agent": ["Agent", "deploy"], + "spec": ["AgentSpec"], + "vlm_agent": ["VLMAgent"], + "vlm_stream_tester": ["VlmStreamTester"], + "_skill_exports": ["skill", "Output", "Reducer", "Stream"], + }, +) diff --git a/dimos/agents/_skill_exports.py b/dimos/agents/_skill_exports.py new file mode 100644 index 0000000000..a04093b20b --- /dev/null +++ b/dimos/agents/_skill_exports.py @@ -0,0 +1,18 @@ +# Copyright 2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dimos.protocol.skill.skill import skill +from dimos.protocol.skill.type import Output, Reducer, Stream + +__all__ = ["Output", "Reducer", "Stream", "skill"] diff --git a/dimos/agents/skills/conftest.py b/dimos/agents/skills/conftest.py index 0e2e3e0636..ae5566b9f9 100644 --- a/dimos/agents/skills/conftest.py +++ b/dimos/agents/skills/conftest.py @@ -21,7 +21,7 @@ from dimos.agents.skills.gps_nav_skill import GpsNavSkillContainer from dimos.agents.skills.navigation import NavigationSkillContainer from dimos.agents.system_prompt import SYSTEM_PROMPT -from dimos.robot.unitree_webrtc.unitree_skill_container import UnitreeSkillContainer +from dimos.robot.unitree.unitree_skill_container import UnitreeSkillContainer @pytest.fixture(autouse=True) diff --git a/dimos/agents/skills/person_follow.py b/dimos/agents/skills/person_follow.py index 830e532f13..91ba5d01b8 100644 --- a/dimos/agents/skills/person_follow.py +++ b/dimos/agents/skills/person_follow.py @@ -75,7 +75,7 @@ def __init__( # Use MuJoCo camera intrinsics in simulation mode if self._global_config.simulation: - from dimos.robot.unitree_webrtc.mujoco_connection import MujocoConnection + from dimos.robot.unitree.mujoco_connection import MujocoConnection camera_info = MujocoConnection.camera_info_static diff --git a/dimos/agents/spec.py b/dimos/agents/spec.py index b0a0324e89..02eef359de 100644 --- a/dimos/agents/spec.py +++ b/dimos/agents/spec.py @@ -22,7 +22,6 @@ if TYPE_CHECKING: from dimos.protocol.skill.skill import SkillContainer -from langchain.chat_models.base import _SUPPORTED_PROVIDERS from langchain_core.language_models.chat_models import BaseChatModel from langchain_core.messages import ( AIMessage, @@ -45,8 +44,31 @@ logger = setup_logger() -# Dynamically create ModelProvider enum from LangChain's supported providers -_providers = {provider.upper(): provider for provider in _SUPPORTED_PROVIDERS} +# FIXME: I dont see a stable (and dynamic) way to get these, this is only for type-hints and Paul's PR should replace this in a couple of days (this is a stop-gap change to get CI passing) +_providers = [ + "ANTHROPIC", + "AZURE_AI", + "AZURE_OPENAI", + "BEDROCK", + "BEDROCK_CONVERSE", + "COHERE", + "DEEPSEEK", + "FIREWORKS", + "GOOGLE_ANTHROPIC_VERTEX", + "GOOGLE_GENAI", + "GOOGLE_VERTEXAI", + "GROQ", + "HUGGINGFACE", + "IBM", + "MISTRALAI", + "NVIDIA", + "OLLAMA", + "OPENAI", + "PERPLEXITY", + "TOGETHER", + "UPSTAGE", + "XAI", +] Provider = Enum("Provider", _providers, type=str) # type: ignore[misc] diff --git a/dimos/agents/test_mock_agent.py b/dimos/agents/test_mock_agent.py index 4f449e973a..37eac2bf31 100644 --- a/dimos/agents/test_mock_agent.py +++ b/dimos/agents/test_mock_agent.py @@ -26,7 +26,7 @@ from dimos.msgs.geometry_msgs import PoseStamped, Vector3 from dimos.msgs.sensor_msgs import Image, PointCloud2 from dimos.protocol.skill.test_coordinator import SkillContainerTest -from dimos.robot.unitree.connection.go2 import GO2Connection +from dimos.robot.unitree.go2.connection import GO2Connection @pytest.mark.integration diff --git a/dimos/control/__init__.py b/dimos/control/__init__.py index 50d0330107..23ac02836b 100644 --- a/dimos/control/__init__.py +++ b/dimos/control/__init__.py @@ -49,52 +49,34 @@ >>> coord.start() """ -from dimos.control.components import ( - HardwareComponent, - HardwareId, - HardwareType, - JointName, - JointState, - make_joints, -) -from dimos.control.coordinator import ( - ControlCoordinator, - ControlCoordinatorConfig, - TaskConfig, - control_coordinator, -) -from dimos.control.hardware_interface import ConnectedHardware -from dimos.control.task import ( - ControlMode, - ControlTask, - CoordinatorState, - JointCommandOutput, - JointStateSnapshot, - ResourceClaim, -) -from dimos.control.tick_loop import TickLoop +import lazy_loader as lazy -__all__ = [ - # Connected hardware - "ConnectedHardware", - # Coordinator - "ControlCoordinator", - "ControlCoordinatorConfig", - "ControlMode", - # Task protocol and types - "ControlTask", - "CoordinatorState", - "HardwareComponent", - "HardwareId", - "HardwareType", - "JointCommandOutput", - "JointName", - "JointState", - "JointStateSnapshot", - "ResourceClaim", - "TaskConfig", - # Tick loop - "TickLoop", - "control_coordinator", - "make_joints", -] +__getattr__, __dir__, __all__ = lazy.attach( + __name__, + submod_attrs={ + "components": [ + "HardwareComponent", + "HardwareId", + "HardwareType", + "JointName", + "JointState", + "make_joints", + ], + "coordinator": [ + "ControlCoordinator", + "ControlCoordinatorConfig", + "TaskConfig", + "control_coordinator", + ], + "hardware_interface": ["ConnectedHardware"], + "task": [ + "ControlMode", + "ControlTask", + "CoordinatorState", + "JointCommandOutput", + "JointStateSnapshot", + "ResourceClaim", + ], + "tick_loop": ["TickLoop"], + }, +) diff --git a/dimos/control/tasks/cartesian_ik_task.py b/dimos/control/tasks/cartesian_ik_task.py index 3f0ad70915..7ff5b21e52 100644 --- a/dimos/control/tasks/cartesian_ik_task.py +++ b/dimos/control/tasks/cartesian_ik_task.py @@ -248,10 +248,11 @@ def _get_current_joints(self, state: CoordinatorState) -> NDArray[np.floating[An if pos is None: # Fallback to last solution if self._last_q_solution is not None: - return self._last_q_solution.copy() + result: NDArray[np.floating[Any]] = self._last_q_solution.copy() + return result return None positions.append(pos) - return np.array(positions) + return np.array(positions, dtype=np.float64) def _solve_ik( self, diff --git a/dimos/core/__init__.py b/dimos/core/__init__.py index 5a4acc1762..55bd566657 100644 --- a/dimos/core/__init__.py +++ b/dimos/core/__init__.py @@ -5,60 +5,41 @@ import time from typing import TYPE_CHECKING, cast -from dask.distributed import Client, LocalCluster from rich.console import Console -import dimos.core.colors as colors from dimos.core.core import rpc -from dimos.core.module import Module, ModuleBase, ModuleConfig, ModuleConfigT -from dimos.core.rpc_client import RPCClient -from dimos.core.stream import In, Out, RemoteIn, RemoteOut, Transport -from dimos.core.transport import ( - LCMTransport, - SHMTransport, - ZenohTransport, - pLCMTransport, - pSHMTransport, -) -from dimos.protocol.rpc import LCMRPC -from dimos.protocol.rpc.spec import RPCSpec -from dimos.protocol.tf import LCMTF, TF, PubSubTF, TFConfig, TFSpec -from dimos.utils.actor_registry import ActorRegistry from dimos.utils.logging_config import setup_logger +import lazy_loader as lazy if TYPE_CHECKING: # Avoid runtime import to prevent circular import; ruff's TC001 would otherwise move it. + from dask.distributed import LocalCluster + + from dimos.core._dask_exports import DimosCluster + from dimos.core.module import Module from dimos.core.rpc_client import ModuleProxy logger = setup_logger() -__all__ = [ - "LCMRPC", - "LCMTF", - "TF", - "DimosCluster", - "In", - "LCMTransport", - "Module", - "ModuleBase", - "ModuleConfig", - "ModuleConfigT", - "Out", - "PubSubTF", - "RPCSpec", - "RemoteIn", - "RemoteOut", - "SHMTransport", - "TFConfig", - "TFSpec", - "Transport", - "ZenohTransport", - "colors", - "pLCMTransport", - "pSHMTransport", - "rpc", - "start", -] +__getattr__, __dir__, __all__ = lazy.attach( + __name__, + submodules=["colors"], + submod_attrs={ + "blueprints": ["autoconnect", "Blueprint"], + "_dask_exports": ["DimosCluster"], + "_protocol_exports": ["LCMRPC", "RPCSpec", "LCMTF", "TF", "PubSubTF", "TFConfig", "TFSpec"], + "module": ["Module", "ModuleBase", "ModuleConfig", "ModuleConfigT"], + "stream": ["In", "Out", "RemoteIn", "RemoteOut", "Transport"], + "transport": [ + "LCMTransport", + "SHMTransport", + "ZenohTransport", + "pLCMTransport", + "pSHMTransport", + ], + }, +) +__all__ += ["DimosCluster", "Module", "rpc", "start", "wait_exit"] class CudaCleanupPlugin: @@ -91,10 +72,10 @@ def teardown(self, worker) -> None: # type: ignore[no-untyped-def] def patch_actor(actor, cls) -> None: ... # type: ignore[no-untyped-def] -DimosCluster = Client +def patchdask(dask_client: DimosCluster, local_cluster: LocalCluster) -> DimosCluster: + from dimos.core.rpc_client import RPCClient + from dimos.utils.actor_registry import ActorRegistry - -def patchdask(dask_client: Client, local_cluster: LocalCluster) -> DimosCluster: def deploy( # type: ignore[no-untyped-def] actor_class: type[Module], *args, @@ -129,6 +110,7 @@ def deploy( # type: ignore[no-untyped-def] def check_worker_memory() -> None: """Check memory usage of all workers.""" info = dask_client.scheduler_info() + console = Console() total_workers = len(info.get("workers", {})) total_memory_used = 0 @@ -263,6 +245,8 @@ def start(n: int | None = None, memory_limit: str = "auto") -> DimosCluster: DimosCluster: A patched Dask client with deploy(), check_worker_memory(), stop(), and close_all() methods """ + from dask.distributed import Client, LocalCluster + console = Console() if not n: n = mp.cpu_count() diff --git a/dimos/core/_dask_exports.py b/dimos/core/_dask_exports.py new file mode 100644 index 0000000000..cb257e7804 --- /dev/null +++ b/dimos/core/_dask_exports.py @@ -0,0 +1,17 @@ +# Copyright 2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dask.distributed import Client as DimosCluster + +__all__ = ["DimosCluster"] diff --git a/dimos/core/_protocol_exports.py b/dimos/core/_protocol_exports.py new file mode 100644 index 0000000000..be77fd8323 --- /dev/null +++ b/dimos/core/_protocol_exports.py @@ -0,0 +1,19 @@ +# Copyright 2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dimos.protocol.rpc import LCMRPC +from dimos.protocol.rpc.spec import RPCSpec +from dimos.protocol.tf import LCMTF, TF, PubSubTF, TFConfig, TFSpec + +__all__ = ["LCMRPC", "LCMTF", "TF", "PubSubTF", "RPCSpec", "TFConfig", "TFSpec"] diff --git a/dimos/core/core.py b/dimos/core/core.py index e7a7d09f58..6c95700926 100644 --- a/dimos/core/core.py +++ b/dimos/core/core.py @@ -17,7 +17,6 @@ from typing import ( TYPE_CHECKING, - Any, TypeVar, ) @@ -30,7 +29,12 @@ register_picklers() T = TypeVar("T") +from typing import ParamSpec, TypeVar -def rpc(fn: Callable[..., Any]) -> Callable[..., Any]: +P = ParamSpec("P") +R = TypeVar("R") + + +def rpc(fn: Callable[P, R]) -> Callable[P, R]: fn.__rpc__ = True # type: ignore[attr-defined] return fn diff --git a/dimos/core/test_core.py b/dimos/core/test_core.py index 2177071211..ddacb9dbf8 100644 --- a/dimos/core/test_core.py +++ b/dimos/core/test_core.py @@ -24,12 +24,11 @@ Out, pLCMTransport, rpc, - start, ) from dimos.core.testing import MockRobotClient, dimos from dimos.msgs.geometry_msgs import Vector3 from dimos.msgs.sensor_msgs import PointCloud2 -from dimos.robot.unitree_webrtc.type.odometry import Odometry +from dimos.robot.unitree.type.odometry import Odometry assert dimos @@ -138,8 +137,3 @@ def test_basic_deployment(dimos) -> None: assert nav.lidar_msg_count >= 8 dimos.shutdown() - - -if __name__ == "__main__": - client = start(1) # single process for CI memory - test_deployment(client) diff --git a/dimos/core/test_stream.py b/dimos/core/test_stream.py index b963022c50..836f879b67 100644 --- a/dimos/core/test_stream.py +++ b/dimos/core/test_stream.py @@ -25,7 +25,7 @@ ) from dimos.core.testing import MockRobotClient, dimos from dimos.msgs.sensor_msgs import PointCloud2 -from dimos.robot.unitree_webrtc.type.odometry import Odometry +from dimos.robot.unitree.type.odometry import Odometry assert dimos diff --git a/dimos/core/testing.py b/dimos/core/testing.py index 38774ef327..88598e74b8 100644 --- a/dimos/core/testing.py +++ b/dimos/core/testing.py @@ -20,8 +20,8 @@ from dimos.core import In, Module, Out, rpc, start from dimos.msgs.geometry_msgs import Vector3 from dimos.msgs.sensor_msgs import PointCloud2 -from dimos.robot.unitree_webrtc.type.lidar import pointcloud2_from_webrtc_lidar -from dimos.robot.unitree_webrtc.type.odometry import Odometry +from dimos.robot.unitree.type.lidar import pointcloud2_from_webrtc_lidar +from dimos.robot.unitree.type.odometry import Odometry from dimos.utils.testing import SensorReplay diff --git a/dimos/hardware/sensors/camera/gstreamer/gstreamer_camera.py b/dimos/hardware/sensors/camera/gstreamer/gstreamer_camera.py index 949330881a..9161185d50 100644 --- a/dimos/hardware/sensors/camera/gstreamer/gstreamer_camera.py +++ b/dimos/hardware/sensors/camera/gstreamer/gstreamer_camera.py @@ -22,7 +22,9 @@ import numpy as np -from dimos.core import Module, ModuleConfig, Out, rpc +from dimos.core.core import rpc +from dimos.core.module import Module, ModuleConfig +from dimos.core.stream import Out from dimos.msgs.sensor_msgs import Image, ImageFormat from dimos.utils.logging_config import setup_logger diff --git a/dimos/hardware/sensors/camera/module.py b/dimos/hardware/sensors/camera/module.py index 251b703d56..1b59fceed3 100644 --- a/dimos/hardware/sensors/camera/module.py +++ b/dimos/hardware/sensors/camera/module.py @@ -20,15 +20,18 @@ import reactivex as rx from reactivex import operators as ops -from dimos.agents import Output, Reducer, Stream, skill -from dimos.core import Module, ModuleConfig, Out, rpc from dimos.core.blueprints import autoconnect +from dimos.core.core import rpc from dimos.core.global_config import GlobalConfig, global_config +from dimos.core.module import Module, ModuleConfig +from dimos.core.stream import Out from dimos.hardware.sensors.camera.spec import CameraHardware from dimos.hardware.sensors.camera.webcam import Webcam from dimos.msgs.geometry_msgs import Quaternion, Transform, Vector3 from dimos.msgs.sensor_msgs.CameraInfo import CameraInfo from dimos.msgs.sensor_msgs.Image import Image, sharpness_barrier +from dimos.protocol.skill.skill import skill +from dimos.protocol.skill.type import Output, Reducer, Stream from dimos.spec import perception from dimos.utils.reactive import iter_observable diff --git a/dimos/hardware/sensors/camera/realsense/camera.py b/dimos/hardware/sensors/camera/realsense/camera.py index 3613dbf0a2..8f31441d20 100644 --- a/dimos/hardware/sensors/camera/realsense/camera.py +++ b/dimos/hardware/sensors/camera/realsense/camera.py @@ -23,8 +23,10 @@ import reactivex as rx from scipy.spatial.transform import Rotation # type: ignore[import-untyped] -from dimos.core import Module, ModuleConfig, Out, rpc +from dimos.core.core import rpc +from dimos.core.module import Module, ModuleConfig from dimos.core.module_coordinator import ModuleCoordinator +from dimos.core.stream import Out from dimos.core.transport import LCMTransport from dimos.hardware.sensors.camera.spec import ( OPTICAL_ROTATION, diff --git a/dimos/hardware/sensors/camera/zed/camera.py b/dimos/hardware/sensors/camera/zed/camera.py index 67d80af4b0..171b706ff9 100644 --- a/dimos/hardware/sensors/camera/zed/camera.py +++ b/dimos/hardware/sensors/camera/zed/camera.py @@ -18,12 +18,14 @@ from dataclasses import dataclass, field import threading import time +from typing import TYPE_CHECKING import cv2 import pyzed.sl as sl import reactivex as rx -from dimos.core import Module, ModuleConfig, Out, rpc +from dimos.core.core import rpc +from dimos.core.module import Module, ModuleConfig from dimos.core.module_coordinator import ModuleCoordinator from dimos.core.transport import LCMTransport from dimos.hardware.sensors.camera.spec import ( @@ -39,6 +41,9 @@ from dimos.spec import perception from dimos.utils.reactive import backpressure +if TYPE_CHECKING: + from dimos.core.stream import Out + def default_base_transform() -> Transform: """Default identity transform for camera mounting.""" diff --git a/dimos/hardware/sensors/fake_zed_module.py b/dimos/hardware/sensors/fake_zed_module.py index e8fc51bf31..ec5613077d 100644 --- a/dimos/hardware/sensors/fake_zed_module.py +++ b/dimos/hardware/sensors/fake_zed_module.py @@ -24,7 +24,9 @@ from dimos_lcm.sensor_msgs import CameraInfo import numpy as np -from dimos.core import Module, ModuleConfig, Out, rpc +from dimos.core.core import rpc +from dimos.core.module import Module, ModuleConfig +from dimos.core.stream import Out from dimos.msgs.geometry_msgs import PoseStamped from dimos.msgs.sensor_msgs import Image, ImageFormat from dimos.msgs.std_msgs import Header diff --git a/dimos/manipulation/control/coordinator_client.py b/dimos/manipulation/control/coordinator_client.py index b30c1f46f7..5bc3513ae6 100644 --- a/dimos/manipulation/control/coordinator_client.py +++ b/dimos/manipulation/control/coordinator_client.py @@ -49,7 +49,9 @@ from dimos.control.coordinator import ControlCoordinator from dimos.core.rpc_client import RPCClient -from dimos.manipulation.planning import JointTrajectoryGenerator +from dimos.manipulation.planning.trajectory_generator.joint_trajectory_generator import ( + JointTrajectoryGenerator, +) if TYPE_CHECKING: from dimos.msgs.trajectory_msgs import JointTrajectory diff --git a/dimos/manipulation/control/dual_trajectory_setter.py b/dimos/manipulation/control/dual_trajectory_setter.py index 4b54f0e3e5..4f8a8802e1 100644 --- a/dimos/manipulation/control/dual_trajectory_setter.py +++ b/dimos/manipulation/control/dual_trajectory_setter.py @@ -34,7 +34,9 @@ import time from dimos import core -from dimos.manipulation.planning import JointTrajectoryGenerator +from dimos.manipulation.planning.trajectory_generator.joint_trajectory_generator import ( + JointTrajectoryGenerator, +) from dimos.msgs.sensor_msgs import JointState from dimos.msgs.trajectory_msgs import JointTrajectory diff --git a/dimos/manipulation/control/trajectory_setter.py b/dimos/manipulation/control/trajectory_setter.py index 5b8b2ff234..bad3854521 100644 --- a/dimos/manipulation/control/trajectory_setter.py +++ b/dimos/manipulation/control/trajectory_setter.py @@ -33,7 +33,9 @@ import time from dimos import core -from dimos.manipulation.planning import JointTrajectoryGenerator +from dimos.manipulation.planning.trajectory_generator.joint_trajectory_generator import ( + JointTrajectoryGenerator, +) from dimos.msgs.sensor_msgs import JointState from dimos.msgs.trajectory_msgs import JointTrajectory diff --git a/dimos/manipulation/manipulation_module.py b/dimos/manipulation/manipulation_module.py index 33dea33697..cc0689f660 100644 --- a/dimos/manipulation/manipulation_module.py +++ b/dimos/manipulation/manipulation_module.py @@ -784,7 +784,7 @@ def _get_gripper_hardware_id(self, robot_name: RobotName | None = None) -> str | if not config.gripper_hardware_id: logger.warning(f"No gripper_hardware_id configured for '{config.name}'") return None - return config.gripper_hardware_id + return str(config.gripper_hardware_id) @rpc def set_gripper(self, position: float, robot_name: RobotName | None = None) -> bool: diff --git a/dimos/manipulation/planning/__init__.py b/dimos/manipulation/planning/__init__.py index a9a89a96cd..8aaf0caa25 100644 --- a/dimos/manipulation/planning/__init__.py +++ b/dimos/manipulation/planning/__init__.py @@ -57,55 +57,28 @@ ``` """ -# Factory functions -from dimos.manipulation.planning.factory import ( - create_kinematics, - create_planner, - create_planning_stack, - create_world, +import lazy_loader as lazy + +__getattr__, __dir__, __all__ = lazy.attach( + __name__, + submod_attrs={ + "factory": ["create_kinematics", "create_planner", "create_planning_stack", "create_world"], + "spec": [ + "CollisionObjectMessage", + "IKResult", + "IKStatus", + "JointPath", + "KinematicsSpec", + "Obstacle", + "ObstacleType", + "PlannerSpec", + "PlanningResult", + "PlanningStatus", + "RobotModelConfig", + "RobotName", + "WorldRobotID", + "WorldSpec", + ], + "trajectory_generator.joint_trajectory_generator": ["JointTrajectoryGenerator"], + }, ) - -# Data classes and Protocols -from dimos.manipulation.planning.spec import ( - CollisionObjectMessage, - IKResult, - IKStatus, - JointPath, - KinematicsSpec, - Obstacle, - ObstacleType, - PlannerSpec, - PlanningResult, - PlanningStatus, - RobotModelConfig, - RobotName, - WorldRobotID, - WorldSpec, -) - -# Trajectory generation -from dimos.manipulation.planning.trajectory_generator.joint_trajectory_generator import ( - JointTrajectoryGenerator, -) - -__all__ = [ - "CollisionObjectMessage", - "IKResult", - "IKStatus", - "JointPath", - "JointTrajectoryGenerator", - "KinematicsSpec", - "Obstacle", - "ObstacleType", - "PlannerSpec", - "PlanningResult", - "PlanningStatus", - "RobotModelConfig", - "RobotName", - "WorldRobotID", - "WorldSpec", - "create_kinematics", - "create_planner", - "create_planning_stack", - "create_world", -] diff --git a/dimos/manipulation/planning/utils/kinematics_utils.py b/dimos/manipulation/planning/utils/kinematics_utils.py index 80536304a0..c9f3f95a3d 100644 --- a/dimos/manipulation/planning/utils/kinematics_utils.py +++ b/dimos/manipulation/planning/utils/kinematics_utils.py @@ -66,7 +66,8 @@ def damped_pseudoinverse( """ JJT = J @ J.T I = np.eye(JJT.shape[0]) - return J.T @ np.linalg.inv(JJT + damping**2 * I) + result: NDArray[np.float64] = J.T @ np.linalg.inv(JJT + damping**2 * I) + return result def check_singularity( diff --git a/dimos/manipulation/planning/world/drake_world.py b/dimos/manipulation/planning/world/drake_world.py index 532ca6d548..b8c5197d94 100644 --- a/dimos/manipulation/planning/world/drake_world.py +++ b/dimos/manipulation/planning/world/drake_world.py @@ -480,7 +480,7 @@ def _create_shape(self, obstacle: Obstacle) -> Any: elif obstacle.obstacle_type == ObstacleType.MESH: if not obstacle.mesh_path: raise ValueError("MESH obstacle requires mesh_path") - return Convex(obstacle.mesh_path) + return Convex(Path(obstacle.mesh_path)) else: raise ValueError(f"Unsupported obstacle type: {obstacle.obstacle_type}") @@ -539,7 +539,7 @@ def clear_obstacles(self) -> None: def _set_preview_colors(self) -> None: """Set all preview robot visual geometries to yellow/semi-transparent.""" - source_id = self._plant.get_source_id() + source_id: Any = self._plant.get_source_id() preview_color = Rgba(1.0, 0.8, 0.0, 0.4) for robot_data in self._robots.values(): @@ -550,11 +550,11 @@ def _set_preview_colors(self) -> None: for geom_id in self._plant.GetVisualGeometriesForBody(body): props = IllustrationProperties() props.AddProperty("phong", "diffuse", preview_color) - self._scene_graph.AssignRole(source_id, geom_id, props, RoleAssign.kReplace) + self._scene_graph.AssignRole(source_id, geom_id, props, RoleAssign.kReplace) # type: ignore[call-overload] def _remove_preview_collision_roles(self) -> None: """Remove proximity (collision) role from all preview robot geometries.""" - source_id = self._plant.get_source_id() + source_id: Any = self._plant.get_source_id() # SourceId for robot_data in self._robots.values(): if robot_data.preview_model_instance is None: diff --git a/dimos/models/vl/__init__.py b/dimos/models/vl/__init__.py index e4bb68e03c..482a907cbd 100644 --- a/dimos/models/vl/__init__.py +++ b/dimos/models/vl/__init__.py @@ -1,16 +1,13 @@ -from dimos.models.vl.base import Captioner, VlModel -from dimos.models.vl.florence import Florence2Model -from dimos.models.vl.moondream import MoondreamVlModel -from dimos.models.vl.moondream_hosted import MoondreamHostedVlModel -from dimos.models.vl.openai import OpenAIVlModel -from dimos.models.vl.qwen import QwenVlModel +import lazy_loader as lazy -__all__ = [ - "Captioner", - "Florence2Model", - "MoondreamHostedVlModel", - "MoondreamVlModel", - "OpenAIVlModel", - "QwenVlModel", - "VlModel", -] +__getattr__, __dir__, __all__ = lazy.attach( + __name__, + submod_attrs={ + "base": ["Captioner", "VlModel"], + "florence": ["Florence2Model"], + "moondream": ["MoondreamVlModel"], + "moondream_hosted": ["MoondreamHostedVlModel"], + "openai": ["OpenAIVlModel"], + "qwen": ["QwenVlModel"], + }, +) diff --git a/dimos/msgs/sensor_msgs/Image.py b/dimos/msgs/sensor_msgs/Image.py index 2a8aa2c017..eb12b64c0a 100644 --- a/dimos/msgs/sensor_msgs/Image.py +++ b/dimos/msgs/sensor_msgs/Image.py @@ -465,7 +465,7 @@ def lcm_encode(self, frame_id: str | None = None) -> bytes: channels = 1 if self.data.ndim == 2 else self.data.shape[2] msg.step = self.width * self.dtype.itemsize * channels - view = memoryview(np.ascontiguousarray(self.data)).cast("B") + view = memoryview(np.ascontiguousarray(self.data)).cast("B") # type: ignore[arg-type] msg.data_length = len(view) msg.data = view diff --git a/dimos/msgs/sensor_msgs/test_PointCloud2.py b/dimos/msgs/sensor_msgs/test_PointCloud2.py index 52f9b1bd59..501a4cd441 100644 --- a/dimos/msgs/sensor_msgs/test_PointCloud2.py +++ b/dimos/msgs/sensor_msgs/test_PointCloud2.py @@ -17,7 +17,7 @@ import numpy as np from dimos.msgs.sensor_msgs import PointCloud2 -from dimos.robot.unitree_webrtc.type.lidar import pointcloud2_from_webrtc_lidar +from dimos.robot.unitree.type.lidar import pointcloud2_from_webrtc_lidar from dimos.utils.testing import SensorReplay diff --git a/dimos/navigation/rosnav.py b/dimos/navigation/rosnav.py index f3e8907bd9..2aa492a509 100644 --- a/dimos/navigation/rosnav.py +++ b/dimos/navigation/rosnav.py @@ -30,8 +30,11 @@ from dimos import spec from dimos.agents import Reducer, Stream, skill # type: ignore[attr-defined] from dimos.core import DimosCluster, In, LCMTransport, Module, Out, rpc -from dimos.core.module import ModuleConfig -from dimos.core.transport import ROSTransport +from dimos.core._dask_exports import DimosCluster +from dimos.core.core import rpc +from dimos.core.module import Module, ModuleConfig +from dimos.core.stream import In, Out +from dimos.core.transport import LCMTransport, ROSTransport from dimos.msgs.geometry_msgs import ( PoseStamped, Quaternion, @@ -45,6 +48,8 @@ from dimos.msgs.std_msgs import Bool, Int8 from dimos.msgs.tf2_msgs.TFMessage import TFMessage from dimos.navigation.base import NavigationInterface, NavigationState +from dimos.protocol.skill.skill import skill +from dimos.protocol.skill.type import Reducer, Stream from dimos.utils.logging_config import setup_logger from dimos.utils.transform_utils import euler_to_quaternion @@ -219,7 +224,7 @@ def current_position(self): # type: ignore[no-untyped-def] continue yield f"current position {tf.translation.x}, {tf.translation.y}" - @skill(stream=Stream.call_agent, reducer=Reducer.string) # type: ignore[arg-type] + @skill(stream=Stream.call_agent, reducer=Reducer.string) # type: ignore[untyped-decorator] def goto(self, x: float, y: float): # type: ignore[no-untyped-def] """ move the robot in relative coordinates @@ -238,7 +243,7 @@ def goto(self, x: float, y: float): # type: ignore[no-untyped-def] self.navigate_to(pose_to) yield "arrived" - @skill(stream=Stream.call_agent, reducer=Reducer.string) # type: ignore[arg-type] + @skill(stream=Stream.call_agent, reducer=Reducer.string) # type: ignore[untyped-decorator] def goto_global(self, x: float, y: float) -> Generator[str, None, None]: """ go to coordinates x,y in the map frame diff --git a/dimos/perception/detection/__init__.py b/dimos/perception/detection/__init__.py index b76f497425..ae9f8cb14d 100644 --- a/dimos/perception/detection/__init__.py +++ b/dimos/perception/detection/__init__.py @@ -1,10 +1,10 @@ -from dimos.perception.detection.detectors import Detector, Yolo2DDetector -from dimos.perception.detection.module2D import Detection2DModule -from dimos.perception.detection.module3D import Detection3DModule +import lazy_loader as lazy -__all__ = [ - "Detection2DModule", - "Detection3DModule", - "Detector", - "Yolo2DDetector", -] +__getattr__, __dir__, __all__ = lazy.attach( + __name__, + submod_attrs={ + "detectors": ["Detector", "Yolo2DDetector"], + "module2D": ["Detection2DModule"], + "module3D": ["Detection3DModule"], + }, +) diff --git a/dimos/perception/detection/conftest.py b/dimos/perception/detection/conftest.py index 8c6953e410..3b24422c47 100644 --- a/dimos/perception/detection/conftest.py +++ b/dimos/perception/detection/conftest.py @@ -35,8 +35,8 @@ ImageDetections3DPC, ) from dimos.protocol.tf import TF -from dimos.robot.unitree.connection import go2 -from dimos.robot.unitree_webrtc.type.odometry import Odometry +from dimos.robot.unitree.go2 import connection +from dimos.robot.unitree.type.odometry import Odometry from dimos.utils.data import get_data from dimos.utils.testing import TimedSensorReplay @@ -100,7 +100,7 @@ def moment_provider(**kwargs) -> Moment: if odom_frame is None: raise ValueError("No odom frame found") - transforms = go2.GO2Connection._odom_to_tf(odom_frame) + transforms = connection.GO2Connection._odom_to_tf(odom_frame) tf.receive_transform(*transforms) @@ -108,7 +108,7 @@ def moment_provider(**kwargs) -> Moment: "odom_frame": odom_frame, "lidar_frame": lidar_frame, "image_frame": image_frame, - "camera_info": go2._camera_info_static(), + "camera_info": connection._camera_info_static(), "transforms": transforms, "tf": tf, } @@ -260,8 +260,8 @@ def object_db_module(get_moment): from dimos.perception.detection.detectors import Yolo2DDetector module2d = Detection2DModule(detector=lambda: Yolo2DDetector(device="cpu")) - module3d = Detection3DModule(camera_info=go2._camera_info_static()) - moduleDB = ObjectDBModule(camera_info=go2._camera_info_static()) + module3d = Detection3DModule(camera_info=connection._camera_info_static()) + moduleDB = ObjectDBModule(camera_info=connection._camera_info_static()) # Process 5 frames to build up object history for i in range(5): diff --git a/dimos/perception/detection/module3D.py b/dimos/perception/detection/module3D.py index 037376f995..9fe5bd9c6c 100644 --- a/dimos/perception/detection/module3D.py +++ b/dimos/perception/detection/module3D.py @@ -13,6 +13,8 @@ # limitations under the License. +from typing import TYPE_CHECKING, Any + from dimos_lcm.foxglove_msgs.ImageAnnotations import ( ImageAnnotations, ) @@ -21,20 +23,24 @@ from reactivex.observable import Observable from dimos import spec -from dimos.agents import skill # type: ignore[attr-defined] -from dimos.core import DimosCluster, In, Out, rpc +from dimos.core.core import rpc +from dimos.core.stream import In, Out from dimos.msgs.geometry_msgs import PoseStamped, Quaternion, Transform, Vector3 from dimos.msgs.sensor_msgs import Image, PointCloud2 from dimos.msgs.vision_msgs import Detection2DArray from dimos.perception.detection.module2D import Detection2DModule -from dimos.perception.detection.type import ( - ImageDetections2D, - ImageDetections3DPC, -) +from dimos.perception.detection.type.detection2d.imageDetections2D import ImageDetections2D from dimos.perception.detection.type.detection3d import Detection3DPC +from dimos.perception.detection.type.detection3d.imageDetections3DPC import ImageDetections3DPC +from dimos.protocol.skill.skill import skill from dimos.types.timestamped import align_timestamped from dimos.utils.reactive import backpressure +if TYPE_CHECKING: + from dask.distributed import Client as DimosCluster +else: + DimosCluster = Any + class Detection3DModule(Detection2DModule): color_image: In[Image] diff --git a/dimos/perception/detection/moduleDB.py b/dimos/perception/detection/moduleDB.py index bbfd45143a..bc0a346a59 100644 --- a/dimos/perception/detection/moduleDB.py +++ b/dimos/perception/detection/moduleDB.py @@ -23,13 +23,15 @@ from lcm_msgs.foxglove_msgs import SceneUpdate # type: ignore[import-not-found] from reactivex.observable import Observable -from dimos.core import In, Out, rpc +from dimos.core.core import rpc +from dimos.core.stream import In, Out from dimos.msgs.geometry_msgs import PoseStamped, Quaternion, Transform, Vector3 from dimos.msgs.sensor_msgs import Image, PointCloud2 from dimos.msgs.vision_msgs import Detection2DArray from dimos.perception.detection.module3D import Detection3DModule -from dimos.perception.detection.type import ImageDetections3DPC, TableStr from dimos.perception.detection.type.detection3d import Detection3DPC +from dimos.perception.detection.type.detection3d.imageDetections3DPC import ImageDetections3DPC +from dimos.perception.detection.type.utils import TableStr # Represents an object in space, as collection of 3d detections over time diff --git a/dimos/perception/detection/reid/module.py b/dimos/perception/detection/reid/module.py index f3f2a5a126..0a359746d3 100644 --- a/dimos/perception/detection/reid/module.py +++ b/dimos/perception/detection/reid/module.py @@ -20,13 +20,15 @@ from reactivex import operators as ops from reactivex.observable import Observable -from dimos.core import In, Module, ModuleConfig, Out, rpc +from dimos.core.core import rpc +from dimos.core.module import Module, ModuleConfig +from dimos.core.stream import In, Out from dimos.msgs.foxglove_msgs.Color import Color from dimos.msgs.sensor_msgs import Image from dimos.msgs.vision_msgs import Detection2DArray from dimos.perception.detection.reid.embedding_id_system import EmbeddingIDSystem from dimos.perception.detection.reid.type import IDSystem -from dimos.perception.detection.type import ImageDetections2D +from dimos.perception.detection.type.detection2d.imageDetections2D import ImageDetections2D from dimos.types.timestamped import align_timestamped, to_ros_stamp from dimos.utils.reactive import backpressure diff --git a/dimos/perception/detection/reid/type.py b/dimos/perception/detection/reid/type.py index 28ea719f81..61571e418f 100644 --- a/dimos/perception/detection/reid/type.py +++ b/dimos/perception/detection/reid/type.py @@ -15,8 +15,12 @@ from __future__ import annotations from abc import ABC, abstractmethod +from typing import TYPE_CHECKING -from dimos.perception.detection.type import Detection2DBBox, ImageDetections2D +from dimos.perception.detection.type.detection2d.bbox import Detection2DBBox + +if TYPE_CHECKING: + from dimos.perception.detection.type.detection2d.imageDetections2D import ImageDetections2D class IDSystem(ABC): diff --git a/dimos/perception/detection/test_moduleDB.py b/dimos/perception/detection/test_moduleDB.py index e9815f1f3e..23885a1c60 100644 --- a/dimos/perception/detection/test_moduleDB.py +++ b/dimos/perception/detection/test_moduleDB.py @@ -22,19 +22,19 @@ from dimos.msgs.sensor_msgs import Image, PointCloud2 from dimos.msgs.vision_msgs import Detection2DArray from dimos.perception.detection.moduleDB import ObjectDBModule -from dimos.robot.unitree.connection import go2 +from dimos.robot.unitree.go2 import connection as go2_connection @pytest.mark.module def test_moduleDB(dimos_cluster) -> None: - connection = go2.deploy(dimos_cluster, "fake") + connection = go2_connection.deploy(dimos_cluster, "fake") moduleDB = dimos_cluster.deploy( ObjectDBModule, - camera_info=go2._camera_info_static(), + camera_info=go2_connection._camera_info_static(), goto=lambda obj_id: print(f"Going to {obj_id}"), ) - moduleDB.image.connect(connection.video) + moduleDB.image.connect(connection.color_image) moduleDB.pointcloud.connect(connection.lidar) moduleDB.annotations.transport = LCMTransport("/annotations", ImageAnnotations) diff --git a/dimos/perception/detection/type/__init__.py b/dimos/perception/detection/type/__init__.py index d69d00ba97..00cf943db3 100644 --- a/dimos/perception/detection/type/__init__.py +++ b/dimos/perception/detection/type/__init__.py @@ -1,45 +1,28 @@ -from dimos.perception.detection.type.detection2d import ( # type: ignore[attr-defined] - Detection2D, - Detection2DBBox, - Detection2DPerson, - Detection2DPoint, - Filter2D, - ImageDetections2D, -) -from dimos.perception.detection.type.detection3d import ( - Detection3D, - Detection3DBBox, - Detection3DPC, - ImageDetections3DPC, - PointCloudFilter, - height_filter, - radius_outlier, - raycast, - statistical, -) -from dimos.perception.detection.type.imageDetections import ImageDetections -from dimos.perception.detection.type.utils import TableStr +import lazy_loader as lazy -__all__ = [ - # 2D Detection types - "Detection2D", - "Detection2DBBox", - "Detection2DPerson", - "Detection2DPoint", - # 3D Detection types - "Detection3D", - "Detection3DBBox", - "Detection3DPC", - "Filter2D", - # Base types - "ImageDetections", - "ImageDetections2D", - "ImageDetections3DPC", - # Point cloud filters - "PointCloudFilter", - "TableStr", - "height_filter", - "radius_outlier", - "raycast", - "statistical", -] +__getattr__, __dir__, __all__ = lazy.attach( + __name__, + submod_attrs={ + "detection2d": [ + "Detection2D", + "Detection2DBBox", + "Detection2DPerson", + "Detection2DPoint", + "Filter2D", + "ImageDetections2D", + ], + "detection3d": [ + "Detection3D", + "Detection3DBBox", + "Detection3DPC", + "ImageDetections3DPC", + "PointCloudFilter", + "height_filter", + "radius_outlier", + "raycast", + "statistical", + ], + "imageDetections": ["ImageDetections"], + "utils": ["TableStr"], + }, +) diff --git a/dimos/perception/experimental/temporal_memory/temporal_memory.py b/dimos/perception/experimental/temporal_memory/temporal_memory.py index 29d4ecf3d9..b884b0886f 100644 --- a/dimos/perception/experimental/temporal_memory/temporal_memory.py +++ b/dimos/perception/experimental/temporal_memory/temporal_memory.py @@ -32,13 +32,14 @@ from reactivex import Subject, interval from reactivex.disposable import Disposable -from dimos.agents import skill -from dimos.core import In, rpc +from dimos.core.core import rpc from dimos.core.module import ModuleConfig from dimos.core.skill_module import SkillModule +from dimos.core.stream import In from dimos.models.vl.base import VlModel from dimos.msgs.sensor_msgs import Image from dimos.msgs.sensor_msgs.Image import sharpness_barrier +from dimos.protocol.skill.skill import skill from . import temporal_utils as tu from .clip_filter import ( diff --git a/dimos/perception/experimental/temporal_memory/temporal_memory_deploy.py b/dimos/perception/experimental/temporal_memory/temporal_memory_deploy.py index 611385630e..ab3cc7a0f5 100644 --- a/dimos/perception/experimental/temporal_memory/temporal_memory_deploy.py +++ b/dimos/perception/experimental/temporal_memory/temporal_memory_deploy.py @@ -17,17 +17,21 @@ """ import os +from typing import TYPE_CHECKING -from dimos import spec -from dimos.core import DimosCluster +from dimos.core._dask_exports import DimosCluster from dimos.models.vl.base import VlModel +from dimos.spec import Camera as CameraSpec from .temporal_memory import TemporalMemory, TemporalMemoryConfig +if TYPE_CHECKING: + from dimos.msgs.sensor_msgs import Image + def deploy( dimos: DimosCluster, - camera: spec.Camera, + camera: CameraSpec, vlm: VlModel | None = None, config: TemporalMemoryConfig | None = None, ) -> TemporalMemory: @@ -52,7 +56,7 @@ def deploy( if camera.color_image.transport is None: from dimos.core.transport import JpegShmTransport - transport = JpegShmTransport("/temporal_memory/color_image") + transport: JpegShmTransport[Image] = JpegShmTransport("/temporal_memory/color_image") camera.color_image.transport = transport temporal_memory.color_image.connect(camera.color_image) diff --git a/dimos/perception/object_tracker.py b/dimos/perception/object_tracker.py index 54a5873435..da415ac32a 100644 --- a/dimos/perception/object_tracker.py +++ b/dimos/perception/object_tracker.py @@ -25,9 +25,12 @@ ObjectHypothesisWithPose, ) import numpy as np +from numpy.typing import NDArray from reactivex.disposable import Disposable -from dimos.core import In, Module, ModuleConfig, Out, rpc +from dimos.core.core import rpc +from dimos.core.module import Module, ModuleConfig +from dimos.core.stream import In, Out from dimos.msgs.geometry_msgs import Pose, Quaternion, Transform, Vector3 from dimos.msgs.sensor_msgs import ( CameraInfo, @@ -555,9 +558,9 @@ def _process_tracking(self) -> None: viz_msg = Image.from_numpy(viz_image) self.tracked_overlay.publish(viz_msg) - def _draw_reid_matches(self, image: np.ndarray) -> np.ndarray: # type: ignore[type-arg] + def _draw_reid_matches(self, image: NDArray[np.uint8]) -> NDArray[np.uint8]: # type: ignore[type-arg] """Draw REID feature matches on the image.""" - viz_image = image.copy() + viz_image: NDArray[np.uint8] = image.copy() # type: ignore[type-arg] x1, y1, _x2, _y2 = self.last_roi_bbox # type: ignore[misc] diff --git a/dimos/perception/object_tracker_2d.py b/dimos/perception/object_tracker_2d.py index f5d39745c3..1264b0e92b 100644 --- a/dimos/perception/object_tracker_2d.py +++ b/dimos/perception/object_tracker_2d.py @@ -29,9 +29,12 @@ Pose2D, ) import numpy as np +from numpy.typing import NDArray from reactivex.disposable import Disposable -from dimos.core import In, Module, ModuleConfig, Out, rpc +from dimos.core.core import rpc +from dimos.core.module import Module, ModuleConfig +from dimos.core.stream import In, Out from dimos.msgs.sensor_msgs import Image, ImageFormat from dimos.msgs.std_msgs import Header from dimos.msgs.vision_msgs import Detection2DArray @@ -289,9 +292,9 @@ def _process_tracking(self) -> None: viz_msg = Image.from_numpy(viz_copy, format=ImageFormat.RGB) self.tracked_overlay.publish(viz_msg) - def _draw_visualization(self, image: np.ndarray, bbox: list[int]) -> np.ndarray: # type: ignore[type-arg] + def _draw_visualization(self, image: NDArray[np.uint8], bbox: list[int]) -> NDArray[np.uint8]: # type: ignore[type-arg] """Draw tracking visualization.""" - viz_image = image.copy() + viz_image: NDArray[np.uint8] = image.copy() # type: ignore[type-arg] x1, y1, x2, y2 = bbox cv2.rectangle(viz_image, (x1, y1), (x2, y2), (0, 255, 0), 2) cv2.putText(viz_image, "TRACKING", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2) diff --git a/dimos/perception/object_tracker_3d.py b/dimos/perception/object_tracker_3d.py index fa6361ac65..f8143dc861 100644 --- a/dimos/perception/object_tracker_3d.py +++ b/dimos/perception/object_tracker_3d.py @@ -283,7 +283,7 @@ def _draw_reid_overlay(self, image: np.ndarray) -> np.ndarray: # type: ignore[t """Draw Re-ID feature matches on visualization.""" import cv2 - viz_image = image.copy() + viz_image: np.ndarray = image.copy() # type: ignore[type-arg] x1, y1, _x2, _y2 = self.last_roi_bbox # type: ignore[attr-defined] # Draw keypoints diff --git a/dimos/perception/spatial_perception.py b/dimos/perception/spatial_perception.py index e33820f22c..7bb83b67cd 100644 --- a/dimos/perception/spatial_perception.py +++ b/dimos/perception/spatial_perception.py @@ -203,7 +203,7 @@ def set_video(image_msg: Image) -> None: # Start periodic processing using interval unsub = interval(self._process_interval).subscribe(lambda _: self._process_frame()) # type: ignore[assignment] - self._disposables.add(Disposable(unsub)) + self._disposables.add(unsub) @rpc def stop(self) -> None: diff --git a/dimos/perception/test_spatial_memory_module.py b/dimos/perception/test_spatial_memory_module.py index 47518b889b..11b8eec562 100644 --- a/dimos/perception/test_spatial_memory_module.py +++ b/dimos/perception/test_spatial_memory_module.py @@ -25,7 +25,7 @@ from dimos.msgs.sensor_msgs import Image from dimos.perception.spatial_perception import SpatialMemory from dimos.protocol import pubsub -from dimos.robot.unitree_webrtc.type.odometry import Odometry +from dimos.robot.unitree.type.odometry import Odometry from dimos.utils.data import get_data from dimos.utils.logging_config import setup_logger from dimos.utils.testing import TimedSensorReplay diff --git a/dimos/protocol/mcp/test_mcp_module.py b/dimos/protocol/mcp/test_mcp_module.py index 2a247e6ff0..15ed512845 100644 --- a/dimos/protocol/mcp/test_mcp_module.py +++ b/dimos/protocol/mcp/test_mcp_module.py @@ -30,7 +30,9 @@ def test_unitree_blueprint_has_mcp() -> None: - contents = Path("dimos/robot/unitree_webrtc/unitree_go2_blueprints.py").read_text() + contents = Path( + "dimos/robot/unitree/go2/blueprints/agentic/unitree_go2_agentic_mcp.py" + ).read_text() assert "agentic_mcp" in contents assert "MCPModule.blueprint()" in contents diff --git a/dimos/protocol/rpc/pubsubrpc.py b/dimos/protocol/rpc/pubsubrpc.py index 394f1afc15..3b77227218 100644 --- a/dimos/protocol/rpc/pubsubrpc.py +++ b/dimos/protocol/rpc/pubsubrpc.py @@ -81,7 +81,7 @@ def __init__(self, *args: Any, **kwargs: Any) -> None: def __getstate__(self) -> dict[str, Any]: state: dict[str, Any] if hasattr(super(), "__getstate__"): - state = super().__getstate__() # type: ignore[assignment] + state = super().__getstate__() # type: ignore[assignment, misc] else: state = self.__dict__.copy() diff --git a/dimos/protocol/skill/skill.py b/dimos/protocol/skill/skill.py index 373bb463a7..33e71a590b 100644 --- a/dimos/protocol/skill/skill.py +++ b/dimos/protocol/skill/skill.py @@ -16,7 +16,7 @@ from collections.abc import Callable from concurrent.futures import ThreadPoolExecutor from dataclasses import dataclass -from typing import Any +from typing import Any, ParamSpec, TypeVar, cast # from dimos.core.core import rpc from dimos.protocol.skill.comms import LCMSkillComms, SkillCommsSpec @@ -57,7 +57,11 @@ # the average of all values is returned to the agent -def rpc(fn: Callable[..., Any]) -> Callable[..., Any]: +P = ParamSpec("P") +R = TypeVar("R") + + +def rpc(fn: Callable[P, R]) -> Callable[P, R]: fn.__rpc__ = True # type: ignore[attr-defined] return fn @@ -68,16 +72,16 @@ def skill( ret: Return = Return.call_agent, output: Output = Output.standard, hide_skill: bool = False, -) -> Callable: # type: ignore[type-arg] - def decorator(f: Callable[..., Any]) -> Any: - def wrapper(self, *args, **kwargs): # type: ignore[no-untyped-def] +) -> Callable[[Callable[P, R]], Callable[P, R]]: + def decorator(f: Callable[P, R]) -> Callable[P, R]: + def wrapper(self: Any, *args: P.args, **kwargs: P.kwargs) -> R: skill = f"{f.__name__}" call_id = kwargs.get("call_id", None) if call_id: del kwargs["call_id"] - return self.call_skill(call_id, skill, args, kwargs) + return cast("R", self.call_skill(call_id, skill, args, kwargs)) # type: ignore[attr-defined] # def run_function(): # return self.call_skill(call_id, skill, args, kwargs) # @@ -108,7 +112,7 @@ def wrapper(self, *args, **kwargs): # type: ignore[no-untyped-def] wrapper._skill_config = skill_config # type: ignore[attr-defined] wrapper.__name__ = f.__name__ # Preserve original function name wrapper.__doc__ = f.__doc__ # Preserve original docstring - return wrapper + return cast("Callable[P, R]", wrapper) return decorator diff --git a/dimos/robot/all_blueprints.py b/dimos/robot/all_blueprints.py index a4a953062c..11c520466a 100644 --- a/dimos/robot/all_blueprints.py +++ b/dimos/robot/all_blueprints.py @@ -32,7 +32,7 @@ "coordinator-xarm6": "dimos.control.blueprints:coordinator_xarm6", "coordinator-xarm7": "dimos.control.blueprints:coordinator_xarm7", "demo-camera": "dimos.hardware.sensors.camera.module:demo_camera", - "demo-error-on-name-conflicts": "dimos.robot.unitree_webrtc.demo_error_on_name_conflicts:demo_error_on_name_conflicts", + "demo-error-on-name-conflicts": "dimos.robot.unitree.demo_error_on_name_conflicts:demo_error_on_name_conflicts", "demo-google-maps-skill": "dimos.agents.skills.demo_google_maps_skill:demo_google_maps_skill", "demo-gps-nav": "dimos.agents.skills.demo_gps_nav:demo_gps_nav", "demo-grasping": "dimos.manipulation.grasping.demo_grasping:demo_grasping", @@ -40,27 +40,28 @@ "demo-osm": "dimos.mapping.osm.demo_osm:demo_osm", "demo-skill": "dimos.agents.skills.demo_skill:demo_skill", "dual-xarm6-planner": "dimos.manipulation.manipulation_blueprints:dual_xarm6_planner", - "unitree-g1": "dimos.robot.unitree_webrtc.unitree_g1_blueprints:unitree_g1", - "unitree-g1-agentic": "dimos.robot.unitree_webrtc.unitree_g1_blueprints:unitree_g1_agentic", - "unitree-g1-agentic-sim": "dimos.robot.unitree_webrtc.unitree_g1_blueprints:unitree_g1_agentic_sim", - "unitree-g1-basic": "dimos.robot.unitree_webrtc.unitree_g1_blueprints:unitree_g1_basic", - "unitree-g1-basic-sim": "dimos.robot.unitree_webrtc.unitree_g1_blueprints:unitree_g1_basic_sim", - "unitree-g1-detection": "dimos.robot.unitree_webrtc.unitree_g1_blueprints:unitree_g1_detection", - "unitree-g1-full": "dimos.robot.unitree_webrtc.unitree_g1_blueprints:unitree_g1_full", - "unitree-g1-joystick": "dimos.robot.unitree_webrtc.unitree_g1_blueprints:unitree_g1_joystick", - "unitree-g1-shm": "dimos.robot.unitree_webrtc.unitree_g1_blueprints:unitree_g1_shm", - "unitree-g1-sim": "dimos.robot.unitree_webrtc.unitree_g1_blueprints:unitree_g1_sim", - "unitree-go2": "dimos.robot.unitree_webrtc.unitree_go2_blueprints:unitree_go2", - "unitree-go2-agentic": "dimos.robot.unitree_webrtc.unitree_go2_blueprints:unitree_go2_agentic", - "unitree-go2-agentic-huggingface": "dimos.robot.unitree_webrtc.unitree_go2_blueprints:unitree_go2_agentic_huggingface", - "unitree-go2-agentic-mcp": "dimos.robot.unitree_webrtc.unitree_go2_blueprints:unitree_go2_agentic_mcp", - "unitree-go2-agentic-ollama": "dimos.robot.unitree_webrtc.unitree_go2_blueprints:unitree_go2_agentic_ollama", - "unitree-go2-basic": "dimos.robot.unitree_webrtc.unitree_go2_blueprints:unitree_go2_basic", - "unitree-go2-detection": "dimos.robot.unitree_webrtc.unitree_go2_blueprints:unitree_go2_detection", - "unitree-go2-ros": "dimos.robot.unitree_webrtc.unitree_go2_blueprints:unitree_go2_ros", - "unitree-go2-spatial": "dimos.robot.unitree_webrtc.unitree_go2_blueprints:unitree_go2_spatial", - "unitree-go2-temporal-memory": "dimos.robot.unitree_webrtc.unitree_go2_blueprints:unitree_go2_temporal_memory", - "unitree-go2-vlm-stream-test": "dimos.robot.unitree_webrtc.unitree_go2_blueprints:unitree_go2_vlm_stream_test", + "uintree-g1-primitive-no-nav": "dimos.robot.unitree.g1.blueprints.primitive.uintree_g1_primitive_no_nav:uintree_g1_primitive_no_nav", + "unitree-g1": "dimos.robot.unitree.g1.blueprints.perceptive.unitree_g1:unitree_g1", + "unitree-g1-agentic": "dimos.robot.unitree.g1.blueprints.agentic.unitree_g1_agentic:unitree_g1_agentic", + "unitree-g1-agentic-sim": "dimos.robot.unitree.g1.blueprints.agentic.unitree_g1_agentic_sim:unitree_g1_agentic_sim", + "unitree-g1-basic": "dimos.robot.unitree.g1.blueprints.basic.unitree_g1_basic:unitree_g1_basic", + "unitree-g1-basic-sim": "dimos.robot.unitree.g1.blueprints.basic.unitree_g1_basic_sim:unitree_g1_basic_sim", + "unitree-g1-detection": "dimos.robot.unitree.g1.blueprints.perceptive.unitree_g1_detection:unitree_g1_detection", + "unitree-g1-full": "dimos.robot.unitree.g1.blueprints.agentic.unitree_g1_full:unitree_g1_full", + "unitree-g1-joystick": "dimos.robot.unitree.g1.blueprints.basic.unitree_g1_joystick:unitree_g1_joystick", + "unitree-g1-shm": "dimos.robot.unitree.g1.blueprints.perceptive.unitree_g1_shm:unitree_g1_shm", + "unitree-g1-sim": "dimos.robot.unitree.g1.blueprints.perceptive.unitree_g1_sim:unitree_g1_sim", + "unitree-go2": "dimos.robot.unitree.go2.blueprints.smart.unitree_go2:unitree_go2", + "unitree-go2-agentic": "dimos.robot.unitree.go2.blueprints.agentic.unitree_go2_agentic:unitree_go2_agentic", + "unitree-go2-agentic-huggingface": "dimos.robot.unitree.go2.blueprints.agentic.unitree_go2_agentic_huggingface:unitree_go2_agentic_huggingface", + "unitree-go2-agentic-mcp": "dimos.robot.unitree.go2.blueprints.agentic.unitree_go2_agentic_mcp:unitree_go2_agentic_mcp", + "unitree-go2-agentic-ollama": "dimos.robot.unitree.go2.blueprints.agentic.unitree_go2_agentic_ollama:unitree_go2_agentic_ollama", + "unitree-go2-basic": "dimos.robot.unitree.go2.blueprints.basic.unitree_go2_basic:unitree_go2_basic", + "unitree-go2-detection": "dimos.robot.unitree.go2.blueprints.smart.unitree_go2_detection:unitree_go2_detection", + "unitree-go2-ros": "dimos.robot.unitree.go2.blueprints.smart.unitree_go2_ros:unitree_go2_ros", + "unitree-go2-spatial": "dimos.robot.unitree.go2.blueprints.smart.unitree_go2_spatial:unitree_go2_spatial", + "unitree-go2-temporal-memory": "dimos.robot.unitree.go2.blueprints.agentic.unitree_go2_temporal_memory:unitree_go2_temporal_memory", + "unitree-go2-vlm-stream-test": "dimos.robot.unitree.go2.blueprints.smart.unitree_go2_vlm_stream_test:unitree_go2_vlm_stream_test", "xarm-perception": "dimos.manipulation.manipulation_blueprints:xarm_perception", "xarm6-planner-only": "dimos.manipulation.manipulation_blueprints:xarm6_planner_only", "xarm7-planner-coordinator": "dimos.manipulation.manipulation_blueprints:xarm7_planner_coordinator", @@ -75,23 +76,23 @@ "cost_mapper": "dimos.mapping.costmapper", "demo_calculator_skill": "dimos.agents.skills.demo_calculator_skill", "demo_robot": "dimos.agents.skills.demo_robot", - "depth_module": "dimos.robot.unitree_webrtc.depth_module", + "depth_module": "dimos.robot.unitree.depth_module", "detection3d_module": "dimos.perception.detection.module3D", "detection_db_module": "dimos.perception.detection.moduleDB", "foxglove_bridge": "dimos.robot.foxglove_bridge", - "g1_connection": "dimos.robot.unitree.connection.g1", - "g1_sim_connection": "dimos.robot.unitree.connection.g1sim", - "g1_skills": "dimos.robot.unitree_webrtc.unitree_g1_skill_container", - "go2_connection": "dimos.robot.unitree.connection.go2", + "g1_connection": "dimos.robot.unitree.g1.connection", + "g1_sim_connection": "dimos.robot.unitree.g1.sim", + "g1_skills": "dimos.robot.unitree.g1.skill_container", + "go2_connection": "dimos.robot.unitree.go2.connection", "google_maps_skill": "dimos.agents.skills.google_maps_skill_container", "gps_nav_skill": "dimos.agents.skills.gps_nav_skill", "grasping_module": "dimos.manipulation.grasping.grasping", "human_input": "dimos.agents.cli.human", "joint_trajectory_controller": "dimos.manipulation.control.trajectory_controller.joint_trajectory_controller", - "keyboard_teleop": "dimos.robot.unitree_webrtc.keyboard_teleop", + "keyboard_teleop": "dimos.robot.unitree.keyboard_teleop", "llm_agent": "dimos.agents.agent", "manipulation_module": "dimos.manipulation.manipulation_module", - "mapper": "dimos.robot.unitree_webrtc.type.map", + "mapper": "dimos.robot.unitree.type.map", "navigation_skill": "dimos.agents.skills.navigation", "object_scene_registration_module": "dimos.perception.object_scene_registration", "object_tracking": "dimos.perception.object_tracker", @@ -107,7 +108,7 @@ "speak_skill": "dimos.agents.skills.speak_skill", "temporal_memory": "dimos.perception.experimental.temporal_memory.temporal_memory", "twist_teleop_module": "dimos.teleop.quest.quest_extensions", - "unitree_skills": "dimos.robot.unitree_webrtc.unitree_skill_container", + "unitree_skills": "dimos.robot.unitree.unitree_skill_container", "utilization": "dimos.utils.monitoring", "visualizing_teleop_module": "dimos.teleop.quest.quest_extensions", "vlm_agent": "dimos.agents.vlm_agent", diff --git a/dimos/robot/cli/dimos.py b/dimos/robot/cli/dimos.py index 59c08cca96..fc9235a834 100644 --- a/dimos/robot/cli/dimos.py +++ b/dimos/robot/cli/dimos.py @@ -15,17 +15,12 @@ from enum import Enum import inspect import sys -from typing import Any, Optional, get_args, get_origin +from typing import Any, get_args, get_origin import typer -from dimos.core.blueprints import autoconnect from dimos.core.global_config import GlobalConfig, global_config -from dimos.protocol import pubsub from dimos.robot.all_blueprints import all_blueprints -from dimos.robot.cli.topic import topic_echo, topic_send -from dimos.robot.get_all_blueprints import get_blueprint_by_name, get_module_by_name -from dimos.utils.logging_config import setup_exception_handler RobotType = Enum("RobotType", {key.replace("-", "_").upper(): key for key in all_blueprints.keys()}) # type: ignore[misc] @@ -49,7 +44,7 @@ def create_dynamic_callback(): # type: ignore[no-untyped-def] # Handle Optional types # Check for Optional/Union with None - if get_origin(field_type) is type(Optional[str]): # noqa: UP045 + if get_origin(field_type) is type(str | None): inner_types = get_args(field_type) if len(inner_types) == 2 and type(None) in inner_types: # It's Optional[T], get the actual type T @@ -73,7 +68,7 @@ def create_dynamic_callback(): # type: ignore[no-untyped-def] f"--{cli_option_name}/--no-{cli_option_name}", help=f"Override {field_name} in GlobalConfig", ), - annotation=Optional[bool], # noqa: UP045 + annotation=bool | None, ) else: # For non-boolean fields, use regular option @@ -85,7 +80,7 @@ def create_dynamic_callback(): # type: ignore[no-untyped-def] f"--{cli_option_name}", help=f"Override {field_name} in GlobalConfig", ), - annotation=Optional[actual_type], # noqa: UP045 + annotation=actual_type | None, ) params.append(param) @@ -110,6 +105,11 @@ def run( ), ) -> None: """Start a robot blueprint""" + from dimos.core.blueprints import autoconnect + from dimos.protocol import pubsub + from dimos.robot.get_all_blueprints import get_blueprint_by_name, get_module_by_name + from dimos.utils.logging_config import setup_exception_handler + setup_exception_handler() cli_config_overrides: dict[str, Any] = ctx.obj @@ -128,6 +128,7 @@ def run( @main.command() def show_config(ctx: typer.Context) -> None: """Show current config settings and their values.""" + cli_config_overrides: dict[str, Any] = ctx.obj global_config.update(**cli_config_overrides) @@ -138,6 +139,8 @@ def show_config(ctx: typer.Context) -> None: @main.command() def list() -> None: """List all available blueprints.""" + from dimos.robot.all_blueprints import all_blueprints + blueprints = [name for name in all_blueprints.keys() if not name.startswith("demo-")] for blueprint_name in sorted(blueprints): typer.echo(blueprint_name) @@ -191,6 +194,8 @@ def echo( help="Optional message type (e.g., PoseStamped). If omitted, infer from '/topic#pkg.Msg'.", ), ) -> None: + from dimos.robot.cli.topic import topic_echo + topic_echo(topic, type_name) @@ -199,6 +204,8 @@ def send( topic: str = typer.Argument(..., help="Topic name to send to (e.g., /goal_request)"), message_expr: str = typer.Argument(..., help="Python expression for the message"), ) -> None: + from dimos.robot.cli.topic import topic_send + topic_send(topic, message_expr) diff --git a/dimos/robot/drone/__init__.py b/dimos/robot/drone/__init__.py index 5d4eed4dae..1ed8521b8b 100644 --- a/dimos/robot/drone/__init__.py +++ b/dimos/robot/drone/__init__.py @@ -14,9 +14,14 @@ """Generic drone module for MAVLink-based drones.""" -from .camera_module import DroneCameraModule -from .connection_module import DroneConnectionModule -from .drone import Drone -from .mavlink_connection import MavlinkConnection +import lazy_loader as lazy -__all__ = ["Drone", "DroneCameraModule", "DroneConnectionModule", "MavlinkConnection"] +__getattr__, __dir__, __all__ = lazy.attach( + __name__, + submod_attrs={ + "camera_module": ["DroneCameraModule"], + "connection_module": ["DroneConnectionModule"], + "drone": ["Drone"], + "mavlink_connection": ["MavlinkConnection"], + }, +) diff --git a/dimos/robot/drone/camera_module.py b/dimos/robot/drone/camera_module.py index 7806c3eab8..8ba88fd028 100644 --- a/dimos/robot/drone/camera_module.py +++ b/dimos/robot/drone/camera_module.py @@ -98,11 +98,11 @@ def __init__( logger.info(f"DroneCameraModule initialized with intrinsics: {camera_intrinsics}") @rpc - def start(self) -> bool: + def start(self) -> None: """Start the camera module.""" if self._running: logger.warning("Camera module already running") - return True + return # Start processing thread for depth (which will init Metric3D and handle video) self._running = True @@ -111,7 +111,7 @@ def start(self) -> bool: self._processing_thread.start() logger.info("Camera module started") - return True + return def _on_video_frame(self, frame: Image) -> None: """Handle incoming video frame.""" diff --git a/dimos/robot/drone/connection_module.py b/dimos/robot/drone/connection_module.py index 865d98c3d3..92dfb1db38 100644 --- a/dimos/robot/drone/connection_module.py +++ b/dimos/robot/drone/connection_module.py @@ -24,7 +24,9 @@ from dimos_lcm.std_msgs import String from reactivex.disposable import CompositeDisposable, Disposable -from dimos.core import In, Module, Out, rpc +from dimos.core.core import rpc +from dimos.core.module import Module +from dimos.core.stream import In, Out from dimos.mapping.types import LatLon from dimos.msgs.geometry_msgs import PoseStamped, Quaternion, Transform, Twist, Vector3 from dimos.msgs.sensor_msgs import Image @@ -101,7 +103,7 @@ def __init__( Module.__init__(self, *args, **kwargs) @rpc - def start(self) -> bool: + def start(self) -> None: """Start the connection and subscribe to sensor streams.""" # Check for replay mode if self.connection_string == "replay": @@ -118,7 +120,7 @@ def start(self) -> bool: if not self.connection.connected: logger.error("Failed to connect to drone") - return False + return # Start video stream (already created above) if self.video_stream.start(): @@ -170,7 +172,7 @@ def start(self) -> bool: self._telemetry_thread.start() logger.info("Drone connection module started") - return True + return def _store_and_publish_frame(self, frame: Image) -> None: """Store the latest video frame and publish it.""" diff --git a/dimos/robot/drone/drone_tracking_module.py b/dimos/robot/drone/drone_tracking_module.py index e6560142d1..e1b633a05b 100644 --- a/dimos/robot/drone/drone_tracking_module.py +++ b/dimos/robot/drone/drone_tracking_module.py @@ -23,6 +23,7 @@ import cv2 from dimos_lcm.std_msgs import String import numpy as np +from numpy.typing import NDArray from dimos.core import In, Module, Out, rpc from dimos.models.qwen.video_query import get_bbox_from_qwen_frame @@ -113,7 +114,7 @@ def _get_latest_frame(self) -> np.ndarray[Any, np.dtype[Any]] | None: return data @rpc - def start(self) -> bool: + def start(self) -> None: """Start the tracking module and subscribe to video input.""" if self.video_input.transport: self.video_input.subscribe(self._on_new_frame) @@ -124,7 +125,7 @@ def start(self) -> bool: if self.follow_object_cmd.transport: self.follow_object_cmd.subscribe(self._on_follow_object_cmd) - return True + return @rpc def stop(self) -> None: @@ -308,10 +309,10 @@ def _visual_servoing_loop(self, tracker: Any, duration: float) -> None: def _draw_tracking_overlay( self, - frame: np.ndarray[Any, np.dtype[Any]], + frame: NDArray[np.uint8], bbox: tuple[int, int, int, int], center: tuple[int, int], - ) -> np.ndarray[Any, np.dtype[Any]]: + ) -> NDArray[np.uint8]: # type: ignore[type-arg] """Draw tracking visualization overlay. Args: @@ -322,7 +323,7 @@ def _draw_tracking_overlay( Returns: Frame with overlay drawn """ - overlay = frame.copy() + overlay: NDArray[np.uint8] = frame.copy() # type: ignore[type-arg] x, y, w, h = bbox # Draw tracking box (green) diff --git a/dimos/robot/drone/test_drone.py b/dimos/robot/drone/test_drone.py index bfbaa9ed54..d9075beae3 100644 --- a/dimos/robot/drone/test_drone.py +++ b/dimos/robot/drone/test_drone.py @@ -264,9 +264,8 @@ def test_connection_module_replay_mode(self) -> None: try: # Start should use Fake classes - result = module.start() + module.start() - self.assertTrue(result) mock_fake_conn.assert_called_once_with("replay") mock_fake_video.assert_called_once() finally: @@ -380,20 +379,19 @@ def replay_side_effect(store_name: str): try: print("\n[TEST] Starting connection module in replay mode...") - result = module.start() + module.start() # Give time for messages to process import time time.sleep(0.1) - print(f"\n[TEST] Module started: {result}") + print("\n[TEST] Module started") print(f"[TEST] Total odom messages published: {len(published_odom)}") print(f"[TEST] Total video frames published: {len(published_video)}") print(f"[TEST] Total status messages published: {len(published_status)}") # Verify module started and is processing messages - self.assertTrue(result) self.assertIsNotNone(module.connection) self.assertIsNotNone(module.video_stream) @@ -877,8 +875,7 @@ def replay_stream_subscribe(callback) -> None: module.movecmd = MagicMock() # Start module - result = module.start() - self.assertTrue(result) + module.start() # Give time for processing time.sleep(0.2) diff --git a/dimos/robot/unitree_webrtc/testing/__init__.py b/dimos/robot/unitree/__init__.py similarity index 100% rename from dimos/robot/unitree_webrtc/testing/__init__.py rename to dimos/robot/unitree/__init__.py diff --git a/dimos/robot/unitree_webrtc/unitree_b1/README.md b/dimos/robot/unitree/b1/README.md similarity index 98% rename from dimos/robot/unitree_webrtc/unitree_b1/README.md rename to dimos/robot/unitree/b1/README.md index f59e6a57ae..1443067a2a 100644 --- a/dimos/robot/unitree_webrtc/unitree_b1/README.md +++ b/dimos/robot/unitree/b1/README.md @@ -112,7 +112,7 @@ pip install -e .[cpu,sim] #### With Joystick Control (Recommended for Testing) ```bash -python -m dimos.robot.unitree_webrtc.unitree_b1.unitree_b1 \ +python -m dimos.robot.unitree.b1.unitree_b1 \ --ip 192.168.12.1 \ --port 9090 \ --joystick @@ -129,7 +129,7 @@ python -m dimos.robot.unitree_webrtc.unitree_b1.unitree_b1 \ #### Test Mode (No Robot Required) ```bash -python -m dimos.robot.unitree_webrtc.unitree_b1.unitree_b1 \ +python -m dimos.robot.unitree.b1.unitree_b1 \ --test \ --joystick ``` diff --git a/dimos/robot/unitree_webrtc/unitree_b1/__init__.py b/dimos/robot/unitree/b1/__init__.py similarity index 100% rename from dimos/robot/unitree_webrtc/unitree_b1/__init__.py rename to dimos/robot/unitree/b1/__init__.py diff --git a/dimos/robot/unitree_webrtc/unitree_b1/b1_command.py b/dimos/robot/unitree/b1/b1_command.py similarity index 100% rename from dimos/robot/unitree_webrtc/unitree_b1/b1_command.py rename to dimos/robot/unitree/b1/b1_command.py diff --git a/dimos/robot/unitree_webrtc/unitree_b1/connection.py b/dimos/robot/unitree/b1/connection.py similarity index 100% rename from dimos/robot/unitree_webrtc/unitree_b1/connection.py rename to dimos/robot/unitree/b1/connection.py diff --git a/dimos/robot/unitree_webrtc/unitree_b1/joystick_module.py b/dimos/robot/unitree/b1/joystick_module.py similarity index 99% rename from dimos/robot/unitree_webrtc/unitree_b1/joystick_module.py rename to dimos/robot/unitree/b1/joystick_module.py index 9aa02af058..bb07094973 100644 --- a/dimos/robot/unitree_webrtc/unitree_b1/joystick_module.py +++ b/dimos/robot/unitree/b1/joystick_module.py @@ -47,7 +47,7 @@ def __init__(self, *args, **kwargs) -> None: # type: ignore[no-untyped-def] self.current_mode = 0 # Start in IDLE mode for safety @rpc - def start(self) -> bool: + def start(self) -> None: """Initialize pygame and start control loop.""" super().start() @@ -56,7 +56,7 @@ def start(self) -> bool: import pygame # noqa: F401 except ImportError: print("ERROR: pygame not installed. Install with: pip install pygame") - return False + return self.keys_held = set() # type: ignore[var-annotated] self.pygame_ready = True @@ -66,7 +66,7 @@ def start(self) -> bool: self._thread = threading.Thread(target=self._pygame_loop, daemon=True) self._thread.start() - return True + return @rpc def stop(self) -> None: diff --git a/dimos/robot/unitree_webrtc/unitree_b1/joystick_server_udp.cpp b/dimos/robot/unitree/b1/joystick_server_udp.cpp similarity index 100% rename from dimos/robot/unitree_webrtc/unitree_b1/joystick_server_udp.cpp rename to dimos/robot/unitree/b1/joystick_server_udp.cpp diff --git a/dimos/robot/unitree_webrtc/unitree_b1/test_connection.py b/dimos/robot/unitree/b1/test_connection.py similarity index 100% rename from dimos/robot/unitree_webrtc/unitree_b1/test_connection.py rename to dimos/robot/unitree/b1/test_connection.py diff --git a/dimos/robot/unitree_webrtc/unitree_b1/unitree_b1.py b/dimos/robot/unitree/b1/unitree_b1.py similarity index 98% rename from dimos/robot/unitree_webrtc/unitree_b1/unitree_b1.py rename to dimos/robot/unitree/b1/unitree_b1.py index 9302e8f66f..a2dd6c718d 100644 --- a/dimos/robot/unitree_webrtc/unitree_b1/unitree_b1.py +++ b/dimos/robot/unitree/b1/unitree_b1.py @@ -32,7 +32,7 @@ from dimos.msgs.std_msgs import Int32 from dimos.msgs.tf2_msgs.TFMessage import TFMessage from dimos.robot.robot import Robot -from dimos.robot.unitree_webrtc.unitree_b1.connection import ( +from dimos.robot.unitree.b1.connection import ( B1ConnectionModule, MockB1ConnectionModule, ) @@ -110,7 +110,7 @@ def start(self) -> None: # Deploy joystick move_vel control if self.enable_joystick: - from dimos.robot.unitree_webrtc.unitree_b1.joystick_module import JoystickModule + from dimos.robot.unitree.b1.joystick_module import JoystickModule self.joystick = self._dimos.deploy(JoystickModule) # type: ignore[assignment] self.joystick.twist_out.transport = core.LCMTransport("/cmd_vel", TwistStamped) # type: ignore[attr-defined] diff --git a/dimos/robot/unitree/connection/connection.py b/dimos/robot/unitree/connection.py similarity index 98% rename from dimos/robot/unitree/connection/connection.py rename to dimos/robot/unitree/connection.py index fd365aca33..217c7e72b0 100644 --- a/dimos/robot/unitree/connection/connection.py +++ b/dimos/robot/unitree/connection.py @@ -39,9 +39,9 @@ from dimos.msgs.geometry_msgs import Pose, Transform, Twist from dimos.msgs.sensor_msgs import Image, PointCloud2 from dimos.msgs.sensor_msgs.Image import ImageFormat -from dimos.robot.unitree_webrtc.type.lidar import RawLidarMsg, pointcloud2_from_webrtc_lidar -from dimos.robot.unitree_webrtc.type.lowstate import LowStateMsg -from dimos.robot.unitree_webrtc.type.odometry import Odometry +from dimos.robot.unitree.type.lidar import RawLidarMsg, pointcloud2_from_webrtc_lidar +from dimos.robot.unitree.type.lowstate import LowStateMsg +from dimos.robot.unitree.type.odometry import Odometry from dimos.utils.decorators.decorators import simple_mcache from dimos.utils.reactive import backpressure, callback_to_observable diff --git a/dimos/robot/unitree/connection/__init__.py b/dimos/robot/unitree/connection/__init__.py deleted file mode 100644 index 5c1dff1922..0000000000 --- a/dimos/robot/unitree/connection/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -import dimos.robot.unitree.connection.g1 as g1 -import dimos.robot.unitree.connection.go2 as go2 - -__all__ = ["g1", "go2"] diff --git a/dimos/robot/unitree_webrtc/demo_error_on_name_conflicts.py b/dimos/robot/unitree/demo_error_on_name_conflicts.py similarity index 100% rename from dimos/robot/unitree_webrtc/demo_error_on_name_conflicts.py rename to dimos/robot/unitree/demo_error_on_name_conflicts.py diff --git a/dimos/robot/unitree_webrtc/depth_module.py b/dimos/robot/unitree/depth_module.py similarity index 100% rename from dimos/robot/unitree_webrtc/depth_module.py rename to dimos/robot/unitree/depth_module.py diff --git a/dimos/robot/unitree/g1/blueprints/__init__.py b/dimos/robot/unitree/g1/blueprints/__init__.py new file mode 100644 index 0000000000..ebc18da8d3 --- /dev/null +++ b/dimos/robot/unitree/g1/blueprints/__init__.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python3 +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Cascaded G1 blueprints split into focused modules.""" + +import lazy_loader as lazy + +__getattr__, __dir__, __all__ = lazy.attach( + __name__, + submod_attrs={ + "agentic._agentic_skills": ["_agentic_skills"], + "agentic.unitree_g1_agentic": ["unitree_g1_agentic"], + "agentic.unitree_g1_agentic_sim": ["unitree_g1_agentic_sim"], + "agentic.unitree_g1_full": ["unitree_g1_full"], + "basic.unitree_g1_basic": ["unitree_g1_basic"], + "basic.unitree_g1_basic_sim": ["unitree_g1_basic_sim"], + "basic.unitree_g1_joystick": ["unitree_g1_joystick"], + "perceptive._perception_and_memory": ["_perception_and_memory"], + "perceptive.unitree_g1": ["unitree_g1"], + "perceptive.unitree_g1_detection": ["unitree_g1_detection"], + "perceptive.unitree_g1_shm": ["unitree_g1_shm"], + "perceptive.unitree_g1_sim": ["unitree_g1_sim"], + "primitive.uintree_g1_primitive_no_nav": ["uintree_g1_primitive_no_nav", "basic_no_nav"], + }, +) diff --git a/dimos/robot/unitree/g1/blueprints/agentic/__init__.py b/dimos/robot/unitree/g1/blueprints/agentic/__init__.py new file mode 100644 index 0000000000..5e6db90d91 --- /dev/null +++ b/dimos/robot/unitree/g1/blueprints/agentic/__init__.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python3 +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Agentic blueprints for Unitree G1.""" diff --git a/dimos/robot/unitree/g1/blueprints/agentic/_agentic_skills.py b/dimos/robot/unitree/g1/blueprints/agentic/_agentic_skills.py new file mode 100644 index 0000000000..5ca139f968 --- /dev/null +++ b/dimos/robot/unitree/g1/blueprints/agentic/_agentic_skills.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python3 +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Agentic skills used by higher-level G1 blueprints.""" + +from dimos.agents.agent import llm_agent +from dimos.agents.cli.human import human_input +from dimos.agents.skills.navigation import navigation_skill +from dimos.core.blueprints import autoconnect +from dimos.robot.unitree.g1.skill_container import g1_skills + +_agentic_skills = autoconnect( + llm_agent(), + human_input(), + navigation_skill(), + g1_skills(), +) + +__all__ = ["_agentic_skills"] diff --git a/dimos/robot/unitree/g1/blueprints/agentic/unitree_g1_agentic.py b/dimos/robot/unitree/g1/blueprints/agentic/unitree_g1_agentic.py new file mode 100644 index 0000000000..a90c2bfe2c --- /dev/null +++ b/dimos/robot/unitree/g1/blueprints/agentic/unitree_g1_agentic.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python3 +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Full G1 stack with agentic skills.""" + +from dimos.core.blueprints import autoconnect +from dimos.robot.unitree.g1.blueprints.agentic._agentic_skills import _agentic_skills +from dimos.robot.unitree.g1.blueprints.perceptive.unitree_g1 import unitree_g1 + +unitree_g1_agentic = autoconnect( + unitree_g1, + _agentic_skills, +) + +__all__ = ["unitree_g1_agentic"] diff --git a/dimos/robot/unitree/g1/blueprints/agentic/unitree_g1_agentic_sim.py b/dimos/robot/unitree/g1/blueprints/agentic/unitree_g1_agentic_sim.py new file mode 100644 index 0000000000..b7371b96b5 --- /dev/null +++ b/dimos/robot/unitree/g1/blueprints/agentic/unitree_g1_agentic_sim.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python3 +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Agentic G1 sim stack.""" + +from dimos.core.blueprints import autoconnect +from dimos.robot.unitree.g1.blueprints.agentic._agentic_skills import _agentic_skills +from dimos.robot.unitree.g1.blueprints.perceptive.unitree_g1_sim import unitree_g1_sim + +unitree_g1_agentic_sim = autoconnect( + unitree_g1_sim, + _agentic_skills, +) + +__all__ = ["unitree_g1_agentic_sim"] diff --git a/dimos/robot/unitree/g1/blueprints/agentic/unitree_g1_full.py b/dimos/robot/unitree/g1/blueprints/agentic/unitree_g1_full.py new file mode 100644 index 0000000000..7f826f2eec --- /dev/null +++ b/dimos/robot/unitree/g1/blueprints/agentic/unitree_g1_full.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Full featured G1 stack with agentic skills and teleop.""" + +from dimos.core.blueprints import autoconnect +from dimos.robot.unitree.g1.blueprints.agentic._agentic_skills import _agentic_skills +from dimos.robot.unitree.g1.blueprints.perceptive.unitree_g1_shm import unitree_g1_shm +from dimos.robot.unitree.keyboard_teleop import keyboard_teleop + +unitree_g1_full = autoconnect( + unitree_g1_shm, + _agentic_skills, + keyboard_teleop(), +) + +__all__ = ["unitree_g1_full"] diff --git a/dimos/robot/unitree/g1/blueprints/basic/__init__.py b/dimos/robot/unitree/g1/blueprints/basic/__init__.py new file mode 100644 index 0000000000..87e6586f56 --- /dev/null +++ b/dimos/robot/unitree/g1/blueprints/basic/__init__.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python3 +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Basic blueprints for Unitree G1.""" diff --git a/dimos/robot/unitree/g1/blueprints/basic/unitree_g1_basic.py b/dimos/robot/unitree/g1/blueprints/basic/unitree_g1_basic.py new file mode 100644 index 0000000000..1fb591e895 --- /dev/null +++ b/dimos/robot/unitree/g1/blueprints/basic/unitree_g1_basic.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python3 +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Basic G1 stack: base sensors plus real robot connection and ROS nav.""" + +from dimos.core.blueprints import autoconnect +from dimos.navigation.rosnav import ros_nav +from dimos.robot.unitree.g1.blueprints.primitive.uintree_g1_primitive_no_nav import ( + uintree_g1_primitive_no_nav, +) +from dimos.robot.unitree.g1.connection import g1_connection + +unitree_g1_basic = autoconnect( + uintree_g1_primitive_no_nav, + g1_connection(), + ros_nav(), +) + +__all__ = ["unitree_g1_basic"] diff --git a/dimos/robot/unitree/g1/blueprints/basic/unitree_g1_basic_sim.py b/dimos/robot/unitree/g1/blueprints/basic/unitree_g1_basic_sim.py new file mode 100644 index 0000000000..603a9535ee --- /dev/null +++ b/dimos/robot/unitree/g1/blueprints/basic/unitree_g1_basic_sim.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python3 +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Basic G1 sim stack: base sensors plus sim connection and planner.""" + +from dimos.core.blueprints import autoconnect +from dimos.navigation.replanning_a_star.module import replanning_a_star_planner +from dimos.robot.unitree.g1.blueprints.primitive.uintree_g1_primitive_no_nav import ( + uintree_g1_primitive_no_nav, +) +from dimos.robot.unitree.g1.sim import g1_sim_connection + +unitree_g1_basic_sim = autoconnect( + uintree_g1_primitive_no_nav, + g1_sim_connection(), + replanning_a_star_planner(), +) + +__all__ = ["unitree_g1_basic_sim"] diff --git a/dimos/robot/unitree/g1/blueprints/basic/unitree_g1_joystick.py b/dimos/robot/unitree/g1/blueprints/basic/unitree_g1_joystick.py new file mode 100644 index 0000000000..0242556189 --- /dev/null +++ b/dimos/robot/unitree/g1/blueprints/basic/unitree_g1_joystick.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python3 +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""G1 stack with keyboard teleop.""" + +from dimos.core.blueprints import autoconnect +from dimos.robot.unitree.g1.blueprints.basic.unitree_g1_basic import unitree_g1_basic +from dimos.robot.unitree.keyboard_teleop import keyboard_teleop + +unitree_g1_joystick = autoconnect( + unitree_g1_basic, + keyboard_teleop(), # Pygame-based joystick control +) + +__all__ = ["unitree_g1_joystick"] diff --git a/dimos/robot/unitree/g1/blueprints/perceptive/__init__.py b/dimos/robot/unitree/g1/blueprints/perceptive/__init__.py new file mode 100644 index 0000000000..9bd838e8b8 --- /dev/null +++ b/dimos/robot/unitree/g1/blueprints/perceptive/__init__.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python3 +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Perceptive blueprints for Unitree G1.""" diff --git a/dimos/robot/unitree/g1/blueprints/perceptive/_perception_and_memory.py b/dimos/robot/unitree/g1/blueprints/perceptive/_perception_and_memory.py new file mode 100644 index 0000000000..47dc2588b9 --- /dev/null +++ b/dimos/robot/unitree/g1/blueprints/perceptive/_perception_and_memory.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Perception and memory modules used by higher-level G1 blueprints.""" + +from dimos.core.blueprints import autoconnect +from dimos.perception.object_tracker import object_tracking +from dimos.perception.spatial_perception import spatial_memory +from dimos.utils.monitoring import utilization + +_perception_and_memory = autoconnect( + spatial_memory(), + object_tracking(frame_id="camera_link"), + utilization(), +) + +__all__ = ["_perception_and_memory"] diff --git a/dimos/robot/unitree/g1/blueprints/perceptive/unitree_g1.py b/dimos/robot/unitree/g1/blueprints/perceptive/unitree_g1.py new file mode 100644 index 0000000000..483928ec54 --- /dev/null +++ b/dimos/robot/unitree/g1/blueprints/perceptive/unitree_g1.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""G1 stack with perception and memory.""" + +from dimos.core.blueprints import autoconnect +from dimos.robot.unitree.g1.blueprints.basic.unitree_g1_basic import unitree_g1_basic +from dimos.robot.unitree.g1.blueprints.perceptive._perception_and_memory import ( + _perception_and_memory, +) + +unitree_g1 = autoconnect( + unitree_g1_basic, + _perception_and_memory, +).global_config(n_dask_workers=8) + +__all__ = ["unitree_g1"] diff --git a/dimos/robot/unitree/g1/blueprints/perceptive/unitree_g1_detection.py b/dimos/robot/unitree/g1/blueprints/perceptive/unitree_g1_detection.py new file mode 100644 index 0000000000..6e2da40a2c --- /dev/null +++ b/dimos/robot/unitree/g1/blueprints/perceptive/unitree_g1_detection.py @@ -0,0 +1,111 @@ +#!/usr/bin/env python3 +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""G1 stack with person tracking and 3D detection.""" + +from dimos_lcm.foxglove_msgs import SceneUpdate +from dimos_lcm.foxglove_msgs.ImageAnnotations import ImageAnnotations + +from dimos.core.blueprints import autoconnect +from dimos.core.transport import LCMTransport +from dimos.hardware.sensors.camera import zed +from dimos.msgs.geometry_msgs import PoseStamped +from dimos.msgs.sensor_msgs import Image, PointCloud2 +from dimos.msgs.vision_msgs import Detection2DArray +from dimos.perception.detection.detectors.person.yolo import YoloPersonDetector +from dimos.perception.detection.module3D import Detection3DModule, detection3d_module +from dimos.perception.detection.moduleDB import ObjectDBModule, detection_db_module +from dimos.perception.detection.person_tracker import PersonTracker, person_tracker_module +from dimos.robot.unitree.g1.blueprints.basic.unitree_g1_basic import unitree_g1_basic + +unitree_g1_detection = ( + autoconnect( + unitree_g1_basic, + # Person detection modules with YOLO + detection3d_module( + camera_info=zed.CameraInfo.SingleWebcam, + detector=YoloPersonDetector, + ), + detection_db_module( + camera_info=zed.CameraInfo.SingleWebcam, + filter=lambda det: det.class_id == 0, # Filter for person class only + ), + person_tracker_module( + cameraInfo=zed.CameraInfo.SingleWebcam, + ), + ) + .global_config(n_dask_workers=8) + .remappings( + [ + # Connect detection modules to camera and lidar + (Detection3DModule, "image", "color_image"), + (Detection3DModule, "pointcloud", "pointcloud"), + (ObjectDBModule, "image", "color_image"), + (ObjectDBModule, "pointcloud", "pointcloud"), + (PersonTracker, "image", "color_image"), + (PersonTracker, "detections", "detections_2d"), + ] + ) + .transports( + { + # Detection 3D module outputs + ("detections", Detection3DModule): LCMTransport( + "/detector3d/detections", Detection2DArray + ), + ("annotations", Detection3DModule): LCMTransport( + "/detector3d/annotations", ImageAnnotations + ), + ("scene_update", Detection3DModule): LCMTransport( + "/detector3d/scene_update", SceneUpdate + ), + ("detected_pointcloud_0", Detection3DModule): LCMTransport( + "/detector3d/pointcloud/0", PointCloud2 + ), + ("detected_pointcloud_1", Detection3DModule): LCMTransport( + "/detector3d/pointcloud/1", PointCloud2 + ), + ("detected_pointcloud_2", Detection3DModule): LCMTransport( + "/detector3d/pointcloud/2", PointCloud2 + ), + ("detected_image_0", Detection3DModule): LCMTransport("/detector3d/image/0", Image), + ("detected_image_1", Detection3DModule): LCMTransport("/detector3d/image/1", Image), + ("detected_image_2", Detection3DModule): LCMTransport("/detector3d/image/2", Image), + # Detection DB module outputs + ("detections", ObjectDBModule): LCMTransport( + "/detectorDB/detections", Detection2DArray + ), + ("annotations", ObjectDBModule): LCMTransport( + "/detectorDB/annotations", ImageAnnotations + ), + ("scene_update", ObjectDBModule): LCMTransport("/detectorDB/scene_update", SceneUpdate), + ("detected_pointcloud_0", ObjectDBModule): LCMTransport( + "/detectorDB/pointcloud/0", PointCloud2 + ), + ("detected_pointcloud_1", ObjectDBModule): LCMTransport( + "/detectorDB/pointcloud/1", PointCloud2 + ), + ("detected_pointcloud_2", ObjectDBModule): LCMTransport( + "/detectorDB/pointcloud/2", PointCloud2 + ), + ("detected_image_0", ObjectDBModule): LCMTransport("/detectorDB/image/0", Image), + ("detected_image_1", ObjectDBModule): LCMTransport("/detectorDB/image/1", Image), + ("detected_image_2", ObjectDBModule): LCMTransport("/detectorDB/image/2", Image), + # Person tracker outputs + ("target", PersonTracker): LCMTransport("/person_tracker/target", PoseStamped), + } + ) +) + +__all__ = ["unitree_g1_detection"] diff --git a/dimos/robot/unitree/g1/blueprints/perceptive/unitree_g1_shm.py b/dimos/robot/unitree/g1/blueprints/perceptive/unitree_g1_shm.py new file mode 100644 index 0000000000..5d2eefad94 --- /dev/null +++ b/dimos/robot/unitree/g1/blueprints/perceptive/unitree_g1_shm.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python3 +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""G1 stack with shared memory image transport.""" + +from dimos.constants import DEFAULT_CAPACITY_COLOR_IMAGE +from dimos.core.blueprints import autoconnect +from dimos.core.transport import pSHMTransport +from dimos.msgs.sensor_msgs import Image +from dimos.robot.foxglove_bridge import foxglove_bridge +from dimos.robot.unitree.g1.blueprints.perceptive.unitree_g1 import unitree_g1 + +unitree_g1_shm = autoconnect( + unitree_g1.transports( + { + ("color_image", Image): pSHMTransport( + "/g1/color_image", default_capacity=DEFAULT_CAPACITY_COLOR_IMAGE + ), + } + ), + foxglove_bridge( + shm_channels=[ + "/g1/color_image#sensor_msgs.Image", + ] + ), +) + +__all__ = ["unitree_g1_shm"] diff --git a/dimos/robot/unitree/g1/blueprints/perceptive/unitree_g1_sim.py b/dimos/robot/unitree/g1/blueprints/perceptive/unitree_g1_sim.py new file mode 100644 index 0000000000..059102c7a5 --- /dev/null +++ b/dimos/robot/unitree/g1/blueprints/perceptive/unitree_g1_sim.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""G1 sim stack with perception and memory.""" + +from dimos.core.blueprints import autoconnect +from dimos.robot.unitree.g1.blueprints.basic.unitree_g1_basic_sim import unitree_g1_basic_sim +from dimos.robot.unitree.g1.blueprints.perceptive._perception_and_memory import ( + _perception_and_memory, +) + +unitree_g1_sim = autoconnect( + unitree_g1_basic_sim, + _perception_and_memory, +).global_config(n_dask_workers=8) + +__all__ = ["unitree_g1_sim"] diff --git a/dimos/robot/unitree/g1/blueprints/primitive/__init__.py b/dimos/robot/unitree/g1/blueprints/primitive/__init__.py new file mode 100644 index 0000000000..833f767728 --- /dev/null +++ b/dimos/robot/unitree/g1/blueprints/primitive/__init__.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python3 +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Primitive blueprints for Unitree G1.""" diff --git a/dimos/robot/unitree/g1/blueprints/primitive/uintree_g1_primitive_no_nav.py b/dimos/robot/unitree/g1/blueprints/primitive/uintree_g1_primitive_no_nav.py new file mode 100644 index 0000000000..c61297be90 --- /dev/null +++ b/dimos/robot/unitree/g1/blueprints/primitive/uintree_g1_primitive_no_nav.py @@ -0,0 +1,84 @@ +#!/usr/bin/env python3 +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Minimal G1 stack without navigation, used as a base for larger blueprints.""" + +from dimos_lcm.sensor_msgs import CameraInfo + +from dimos.core.blueprints import autoconnect +from dimos.core.transport import LCMTransport +from dimos.hardware.sensors.camera import zed +from dimos.hardware.sensors.camera.module import camera_module # type: ignore[attr-defined] +from dimos.hardware.sensors.camera.webcam import Webcam +from dimos.mapping.costmapper import cost_mapper +from dimos.mapping.voxels import voxel_mapper +from dimos.msgs.geometry_msgs import PoseStamped, Quaternion, Transform, Twist, Vector3 +from dimos.msgs.nav_msgs import Odometry, Path +from dimos.msgs.sensor_msgs import Image, PointCloud2 +from dimos.msgs.std_msgs import Bool +from dimos.navigation.frontier_exploration import wavefront_frontier_explorer +from dimos.robot.foxglove_bridge import foxglove_bridge +from dimos.web.websocket_vis.websocket_vis_module import websocket_vis + +uintree_g1_primitive_no_nav = ( + autoconnect( + camera_module( + transform=Transform( + translation=Vector3(0.05, 0.0, 0.6), # height of camera on G1 robot + rotation=Quaternion.from_euler(Vector3(0.0, 0.2, 0.0)), + frame_id="sensor", + child_frame_id="camera_link", + ), + hardware=lambda: Webcam( + camera_index=0, + fps=15, + stereo_slice="left", + camera_info=zed.CameraInfo.SingleWebcam, + ), + ), + voxel_mapper(voxel_size=0.1), + cost_mapper(), + wavefront_frontier_explorer(), + # Visualization + websocket_vis(), + foxglove_bridge(), + ) + .global_config(n_dask_workers=4, robot_model="unitree_g1") + .transports( + { + # G1 uses Twist for movement commands + ("cmd_vel", Twist): LCMTransport("/cmd_vel", Twist), + # State estimation from ROS + ("state_estimation", Odometry): LCMTransport("/state_estimation", Odometry), + # Odometry output from ROSNavigationModule + ("odom", PoseStamped): LCMTransport("/odom", PoseStamped), + # Navigation module topics from nav_bot + ("goal_req", PoseStamped): LCMTransport("/goal_req", PoseStamped), + ("goal_active", PoseStamped): LCMTransport("/goal_active", PoseStamped), + ("path_active", Path): LCMTransport("/path_active", Path), + ("pointcloud", PointCloud2): LCMTransport("/lidar", PointCloud2), + ("global_pointcloud", PointCloud2): LCMTransport("/map", PointCloud2), + # Original navigation topics for backwards compatibility + ("goal_pose", PoseStamped): LCMTransport("/goal_pose", PoseStamped), + ("goal_reached", Bool): LCMTransport("/goal_reached", Bool), + ("cancel_goal", Bool): LCMTransport("/cancel_goal", Bool), + # Camera topics (if camera module is added) + ("color_image", Image): LCMTransport("/g1/color_image", Image), + ("camera_info", CameraInfo): LCMTransport("/g1/camera_info", CameraInfo), + } + ) +) + +__all__ = ["uintree_g1_primitive_no_nav"] diff --git a/dimos/robot/unitree/connection/g1.py b/dimos/robot/unitree/g1/connection.py similarity index 97% rename from dimos/robot/unitree/connection/g1.py rename to dimos/robot/unitree/g1/connection.py index 7f8e7233d0..f12d0ee0e6 100644 --- a/dimos/robot/unitree/connection/g1.py +++ b/dimos/robot/unitree/g1/connection.py @@ -21,7 +21,7 @@ from dimos.core import DimosCluster, In, Module, rpc from dimos.core.global_config import GlobalConfig, global_config from dimos.msgs.geometry_msgs import Twist -from dimos.robot.unitree.connection.connection import UnitreeWebRTCConnection +from dimos.robot.unitree.connection import UnitreeWebRTCConnection from dimos.utils.logging_config import setup_logger logger = setup_logger() diff --git a/dimos/robot/unitree/g1/g1agent.py b/dimos/robot/unitree/g1/g1agent.py deleted file mode 100644 index a95a905b7d..0000000000 --- a/dimos/robot/unitree/g1/g1agent.py +++ /dev/null @@ -1,48 +0,0 @@ -# Copyright 2025-2026 Dimensional Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from dimos import agents -from dimos.agents.skills.navigation import NavigationSkillContainer -from dimos.core import DimosCluster -from dimos.perception import spatial_perception -from dimos.robot.unitree.g1 import g1detector - - -def deploy(dimos: DimosCluster, ip: str): # type: ignore[no-untyped-def] - g1 = g1detector.deploy(dimos, ip) - - nav = g1.get("nav") - camera = g1.get("camera") - detector3d = g1.get("detector3d") - connection = g1.get("connection") - - spatialmem = spatial_perception.deploy(dimos, camera) - - navskills = dimos.deploy( # type: ignore[attr-defined] - NavigationSkillContainer, - spatialmem, - nav, - detector3d, - ) - navskills.start() - - agent = agents.deploy( # type: ignore[attr-defined] - dimos, - "You are controling a humanoid robot", - skill_containers=[connection, nav, camera, spatialmem, navskills], - ) - agent.run_implicit_skill("current_position") - agent.run_implicit_skill("video_stream") - - return {"agent": agent, "spatialmem": spatialmem, **g1} diff --git a/dimos/robot/unitree/g1/g1detector.py b/dimos/robot/unitree/g1/g1detector.py deleted file mode 100644 index 55986eb087..0000000000 --- a/dimos/robot/unitree/g1/g1detector.py +++ /dev/null @@ -1,41 +0,0 @@ -# Copyright 2025-2026 Dimensional Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from dimos.core import DimosCluster -from dimos.perception.detection import module3D, moduleDB -from dimos.perception.detection.detectors.person.yolo import YoloPersonDetector -from dimos.robot.unitree.g1 import g1zed - - -def deploy(dimos: DimosCluster, ip: str): # type: ignore[no-untyped-def] - g1 = g1zed.deploy(dimos, ip) - - nav = g1.get("nav") - camera = g1.get("camera") - - person_detector = module3D.deploy( - dimos, - camera=camera, - lidar=nav, - detector=YoloPersonDetector, - ) - - detector3d = moduleDB.deploy( # type: ignore[attr-defined] - dimos, - camera=camera, - lidar=nav, - filter=lambda det: det.class_id != 0, - ) - - return {"person_detector": person_detector, "detector3d": detector3d, **g1} diff --git a/dimos/robot/unitree/g1/g1zed.py b/dimos/robot/unitree/g1/g1zed.py deleted file mode 100644 index cafcbec909..0000000000 --- a/dimos/robot/unitree/g1/g1zed.py +++ /dev/null @@ -1,90 +0,0 @@ -# Copyright 2025-2026 Dimensional Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -from typing import TypedDict, cast - -from dimos.constants import DEFAULT_CAPACITY_COLOR_IMAGE -from dimos.core import DimosCluster, LCMTransport, pSHMTransport -from dimos.hardware.sensors.camera import zed -from dimos.hardware.sensors.camera.module import CameraModule -from dimos.hardware.sensors.camera.webcam import Webcam -from dimos.msgs.geometry_msgs import ( - Quaternion, - Transform, - Vector3, -) -from dimos.msgs.sensor_msgs import CameraInfo -from dimos.navigation import rosnav -from dimos.navigation.rosnav import ROSNav -from dimos.robot import foxglove_bridge -from dimos.robot.unitree.connection import g1 -from dimos.robot.unitree.connection.g1 import G1Connection -from dimos.utils.logging_config import setup_logger - -logger = setup_logger() - - -class G1ZedDeployResult(TypedDict): - nav: ROSNav - connection: G1Connection - camera: CameraModule - camerainfo: CameraInfo - - -def deploy_g1_monozed(dimos: DimosCluster) -> CameraModule: - camera = cast( - "CameraModule", - dimos.deploy( # type: ignore[attr-defined] - CameraModule, - frequency=4.0, - transform=Transform( - translation=Vector3(0.05, 0.0, 0.0), - rotation=Quaternion.from_euler(Vector3(0.0, 0.0, 0.0)), - frame_id="sensor", - child_frame_id="camera_link", - ), - hardware=lambda: Webcam( - camera_index=0, - fps=5, - stereo_slice="left", - camera_info=zed.CameraInfo.SingleWebcam, - ), - ), - ) - - camera.color_image.transport = pSHMTransport( - "/image", default_capacity=DEFAULT_CAPACITY_COLOR_IMAGE - ) - camera.camera_info.transport = LCMTransport("/camera_info", CameraInfo) - camera.start() - return camera - - -def deploy(dimos: DimosCluster, ip: str): # type: ignore[no-untyped-def] - nav = rosnav.deploy( # type: ignore[call-arg] - dimos, - sensor_to_base_link_transform=Transform( - frame_id="sensor", child_frame_id="base_link", translation=Vector3(0.0, 0.0, 1.5) - ), - ) - connection = g1.deploy(dimos, ip, nav) - zedcam = deploy_g1_monozed(dimos) - - foxglove_bridge.deploy(dimos) - - return { - "nav": nav, - "connection": connection, - "camera": zedcam, - } diff --git a/dimos/robot/unitree/connection/g1sim.py b/dimos/robot/unitree/g1/sim.py similarity index 94% rename from dimos/robot/unitree/connection/g1sim.py rename to dimos/robot/unitree/g1/sim.py index d964d86758..4502e62a1f 100644 --- a/dimos/robot/unitree/connection/g1sim.py +++ b/dimos/robot/unitree/g1/sim.py @@ -28,11 +28,11 @@ Vector3, ) from dimos.msgs.sensor_msgs import PointCloud2 -from dimos.robot.unitree_webrtc.type.odometry import Odometry as SimOdometry +from dimos.robot.unitree.type.odometry import Odometry as SimOdometry from dimos.utils.logging_config import setup_logger if TYPE_CHECKING: - from dimos.robot.unitree_webrtc.mujoco_connection import MujocoConnection + from dimos.robot.unitree.mujoco_connection import MujocoConnection logger = setup_logger() @@ -60,7 +60,7 @@ def __init__( def start(self) -> None: super().start() - from dimos.robot.unitree_webrtc.mujoco_connection import MujocoConnection + from dimos.robot.unitree.mujoco_connection import MujocoConnection self.connection = MujocoConnection(self._global_config) assert self.connection is not None diff --git a/dimos/robot/unitree_webrtc/unitree_g1_skill_container.py b/dimos/robot/unitree/g1/skill_container.py similarity index 100% rename from dimos/robot/unitree_webrtc/unitree_g1_skill_container.py rename to dimos/robot/unitree/g1/skill_container.py diff --git a/dimos/robot/unitree/go2/blueprints/__init__.py b/dimos/robot/unitree/go2/blueprints/__init__.py new file mode 100644 index 0000000000..cbc49694f3 --- /dev/null +++ b/dimos/robot/unitree/go2/blueprints/__init__.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python3 +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Cascaded GO2 blueprints split into focused modules.""" + +import lazy_loader as lazy + +__getattr__, __dir__, __all__ = lazy.attach( + __name__, + submod_attrs={ + "agentic._common_agentic": ["_common_agentic"], + "agentic.unitree_go2_agentic": ["unitree_go2_agentic"], + "agentic.unitree_go2_agentic_huggingface": ["unitree_go2_agentic_huggingface"], + "agentic.unitree_go2_agentic_mcp": ["unitree_go2_agentic_mcp"], + "agentic.unitree_go2_agentic_ollama": ["unitree_go2_agentic_ollama"], + "agentic.unitree_go2_temporal_memory": ["unitree_go2_temporal_memory"], + "basic.unitree_go2_basic": ["_linux", "_mac", "unitree_go2_basic"], + "smart._with_jpeg": ["_with_jpeglcm"], + "smart.unitree_go2": ["unitree_go2"], + "smart.unitree_go2_detection": ["unitree_go2_detection"], + "smart.unitree_go2_ros": ["unitree_go2_ros"], + "smart.unitree_go2_spatial": ["unitree_go2_spatial"], + "smart.unitree_go2_vlm_stream_test": ["unitree_go2_vlm_stream_test"], + }, +) diff --git a/dimos/robot/unitree/go2/blueprints/agentic/__init__.py b/dimos/robot/unitree/go2/blueprints/agentic/__init__.py new file mode 100644 index 0000000000..84d1b41b23 --- /dev/null +++ b/dimos/robot/unitree/go2/blueprints/agentic/__init__.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python3 +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Agentic blueprints for Unitree GO2.""" diff --git a/dimos/robot/unitree/go2/blueprints/agentic/_common_agentic.py b/dimos/robot/unitree/go2/blueprints/agentic/_common_agentic.py new file mode 100644 index 0000000000..cf9a0ae086 --- /dev/null +++ b/dimos/robot/unitree/go2/blueprints/agentic/_common_agentic.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python3 +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dimos.agents.cli.human import human_input +from dimos.agents.cli.web import web_input +from dimos.agents.skills.navigation import navigation_skill +from dimos.agents.skills.person_follow import person_follow_skill +from dimos.agents.skills.speak_skill import speak_skill +from dimos.core.blueprints import autoconnect +from dimos.robot.unitree.go2.connection import GO2Connection +from dimos.robot.unitree.unitree_skill_container import unitree_skills + +_common_agentic = autoconnect( + human_input(), + navigation_skill(), + person_follow_skill(camera_info=GO2Connection.camera_info_static), + unitree_skills(), + web_input(), + speak_skill(), +) + +__all__ = ["_common_agentic"] diff --git a/dimos/robot/unitree/go2/blueprints/agentic/unitree_go2_agentic.py b/dimos/robot/unitree/go2/blueprints/agentic/unitree_go2_agentic.py new file mode 100644 index 0000000000..0db6d16980 --- /dev/null +++ b/dimos/robot/unitree/go2/blueprints/agentic/unitree_go2_agentic.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python3 +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dimos.agents.agent import llm_agent +from dimos.core.blueprints import autoconnect +from dimos.robot.unitree.go2.blueprints.agentic._common_agentic import _common_agentic +from dimos.robot.unitree.go2.blueprints.smart.unitree_go2_spatial import unitree_go2_spatial + +unitree_go2_agentic = autoconnect( + unitree_go2_spatial, + llm_agent(), + _common_agentic, +) + +__all__ = ["unitree_go2_agentic"] diff --git a/dimos/robot/unitree/go2/blueprints/agentic/unitree_go2_agentic_huggingface.py b/dimos/robot/unitree/go2/blueprints/agentic/unitree_go2_agentic_huggingface.py new file mode 100644 index 0000000000..90198c2493 --- /dev/null +++ b/dimos/robot/unitree/go2/blueprints/agentic/unitree_go2_agentic_huggingface.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python3 +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dimos.agents.agent import llm_agent +from dimos.agents.spec import Provider +from dimos.core.blueprints import autoconnect +from dimos.robot.unitree.go2.blueprints.agentic._common_agentic import _common_agentic +from dimos.robot.unitree.go2.blueprints.smart.unitree_go2_spatial import unitree_go2_spatial + +unitree_go2_agentic_huggingface = autoconnect( + unitree_go2_spatial, + llm_agent( + model="Qwen/Qwen2.5-1.5B-Instruct", + provider=Provider.HUGGINGFACE, # type: ignore[attr-defined] + ), + _common_agentic, +) + +__all__ = ["unitree_go2_agentic_huggingface"] diff --git a/dimos/robot/unitree/go2/blueprints/agentic/unitree_go2_agentic_mcp.py b/dimos/robot/unitree/go2/blueprints/agentic/unitree_go2_agentic_mcp.py new file mode 100644 index 0000000000..bbc3e4c216 --- /dev/null +++ b/dimos/robot/unitree/go2/blueprints/agentic/unitree_go2_agentic_mcp.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python3 +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dimos.core.blueprints import autoconnect +from dimos.protocol.mcp.mcp import MCPModule +from dimos.robot.unitree.go2.blueprints.agentic.unitree_go2_agentic import unitree_go2_agentic + +unitree_go2_agentic_mcp = autoconnect( + unitree_go2_agentic, + MCPModule.blueprint(), +) + +__all__ = ["unitree_go2_agentic_mcp"] diff --git a/dimos/robot/unitree/go2/blueprints/agentic/unitree_go2_agentic_ollama.py b/dimos/robot/unitree/go2/blueprints/agentic/unitree_go2_agentic_ollama.py new file mode 100644 index 0000000000..529f50f4ae --- /dev/null +++ b/dimos/robot/unitree/go2/blueprints/agentic/unitree_go2_agentic_ollama.py @@ -0,0 +1,34 @@ +#!/usr/bin/env python3 +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dimos.agents.agent import llm_agent +from dimos.agents.ollama_agent import ollama_installed +from dimos.agents.spec import Provider +from dimos.core.blueprints import autoconnect +from dimos.robot.unitree.go2.blueprints.agentic._common_agentic import _common_agentic +from dimos.robot.unitree.go2.blueprints.smart.unitree_go2_spatial import unitree_go2_spatial + +unitree_go2_agentic_ollama = autoconnect( + unitree_go2_spatial, + llm_agent( + model="qwen3:8b", + provider=Provider.OLLAMA, # type: ignore[attr-defined] + ), + _common_agentic, +).requirements( + ollama_installed, +) + +__all__ = ["unitree_go2_agentic_ollama"] diff --git a/dimos/robot/unitree/go2/blueprints/agentic/unitree_go2_temporal_memory.py b/dimos/robot/unitree/go2/blueprints/agentic/unitree_go2_temporal_memory.py new file mode 100644 index 0000000000..017ccaba2b --- /dev/null +++ b/dimos/robot/unitree/go2/blueprints/agentic/unitree_go2_temporal_memory.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python3 +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dimos.core.blueprints import autoconnect +from dimos.perception.experimental.temporal_memory import temporal_memory +from dimos.robot.unitree.go2.blueprints.agentic.unitree_go2_agentic import unitree_go2_agentic + +unitree_go2_temporal_memory = autoconnect( + unitree_go2_agentic, + temporal_memory(), +) + +__all__ = ["unitree_go2_temporal_memory"] diff --git a/dimos/robot/unitree/go2/blueprints/basic/__init__.py b/dimos/robot/unitree/go2/blueprints/basic/__init__.py new file mode 100644 index 0000000000..79964b0297 --- /dev/null +++ b/dimos/robot/unitree/go2/blueprints/basic/__init__.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python3 +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Basic blueprints for Unitree GO2.""" diff --git a/dimos/robot/unitree/go2/blueprints/basic/unitree_go2_basic.py b/dimos/robot/unitree/go2/blueprints/basic/unitree_go2_basic.py new file mode 100644 index 0000000000..3cb84dfd9f --- /dev/null +++ b/dimos/robot/unitree/go2/blueprints/basic/unitree_go2_basic.py @@ -0,0 +1,104 @@ +#!/usr/bin/env python3 + +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import platform + +from dimos.constants import DEFAULT_CAPACITY_COLOR_IMAGE +from dimos.core.blueprints import autoconnect +from dimos.core.global_config import global_config +from dimos.core.transport import pSHMTransport +from dimos.msgs.sensor_msgs import Image +from dimos.protocol.pubsub.impl.lcmpubsub import LCM +from dimos.robot.unitree.go2.connection import go2_connection +from dimos.web.websocket_vis.websocket_vis_module import websocket_vis + +# Mac has some issue with high bandwidth UDP, so we use pSHMTransport for color_image +# actually we can use pSHMTransport for all platforms, and for all streams +# TODO need a global transport toggle on blueprints/global config +_mac_transports: dict[tuple[str, type], pSHMTransport[Image]] = { + ("color_image", Image): pSHMTransport( + "color_image", default_capacity=DEFAULT_CAPACITY_COLOR_IMAGE + ), +} + +_transports_base = ( + autoconnect() if platform.system() == "Linux" else autoconnect().transports(_mac_transports) +) + +rerun_config = { + # any pubsub that supports subscribe_all and topic that supports str(topic) + # is acceptable here + "pubsubs": [LCM(autoconf=True)], + # Custom converters for specific rerun entity paths + # Normally all these would be specified in their respectative modules + # Until this is implemented we have central overrides here + # + # This is unsustainable once we move to multi robot etc + "visual_override": { + "world/camera_info": lambda camera_info: camera_info.to_rerun( + image_topic="/world/color_image", + optical_frame="camera_optical", + ), + "world/global_map": lambda grid: grid.to_rerun(voxel_size=0.1), + "world/navigation_costmap": lambda grid: grid.to_rerun( + colormap="Accent", + z_offset=0.015, + opacity=0.2, + background="#484981", + ), + }, + # slapping a go2 shaped box on top of tf/base_link + "static": { + "world/tf/base_link": lambda rr: [ + rr.Boxes3D( + half_sizes=[0.35, 0.155, 0.2], + colors=[(0, 255, 127)], + fill_mode="wireframe", + ), + rr.Transform3D(parent_frame="tf#/base_link"), + ] + }, +} + + +match global_config.viewer_backend: + case "foxglove": + from dimos.robot.foxglove_bridge import foxglove_bridge + + with_vis = autoconnect( + _transports_base, + foxglove_bridge(shm_channels=["/color_image#sensor_msgs.Image"]), + ) + case "rerun": + from dimos.visualization.rerun.bridge import rerun_bridge + + with_vis = autoconnect(_transports_base, rerun_bridge(**rerun_config)) + case "rerun-web": + from dimos.visualization.rerun.bridge import rerun_bridge + + with_vis = autoconnect(_transports_base, rerun_bridge(viewer_mode="web", **rerun_config)) + case _: + with_vis = _transports_base + +unitree_go2_basic = autoconnect( + with_vis, + go2_connection(), + websocket_vis(), +).global_config(n_dask_workers=4, robot_model="unitree_go2") + +__all__ = [ + "unitree_go2_basic", +] diff --git a/dimos/robot/unitree/go2/blueprints/smart/__init__.py b/dimos/robot/unitree/go2/blueprints/smart/__init__.py new file mode 100644 index 0000000000..7d5bdbc3ab --- /dev/null +++ b/dimos/robot/unitree/go2/blueprints/smart/__init__.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python3 +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Smart blueprints for Unitree GO2.""" diff --git a/dimos/robot/unitree/go2/blueprints/smart/_with_jpeg.py b/dimos/robot/unitree/go2/blueprints/smart/_with_jpeg.py new file mode 100644 index 0000000000..9c77d599cf --- /dev/null +++ b/dimos/robot/unitree/go2/blueprints/smart/_with_jpeg.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python3 +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dimos.core.transport import JpegLcmTransport +from dimos.msgs.sensor_msgs import Image +from dimos.robot.unitree.go2.blueprints.smart.unitree_go2 import unitree_go2 + +_with_jpeglcm = unitree_go2.transports( + { + ("color_image", Image): JpegLcmTransport("/color_image", Image), + } +) + +__all__ = ["_with_jpeglcm"] diff --git a/dimos/robot/unitree/go2/blueprints/smart/unitree_go2.py b/dimos/robot/unitree/go2/blueprints/smart/unitree_go2.py new file mode 100644 index 0000000000..5d096444d5 --- /dev/null +++ b/dimos/robot/unitree/go2/blueprints/smart/unitree_go2.py @@ -0,0 +1,31 @@ +#!/usr/bin/env python3 +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dimos.core.blueprints import autoconnect +from dimos.mapping.costmapper import cost_mapper +from dimos.mapping.voxels import voxel_mapper +from dimos.navigation.frontier_exploration import wavefront_frontier_explorer +from dimos.navigation.replanning_a_star.module import replanning_a_star_planner +from dimos.robot.unitree.go2.blueprints.basic.unitree_go2_basic import unitree_go2_basic + +unitree_go2 = autoconnect( + unitree_go2_basic, + voxel_mapper(voxel_size=0.1), + cost_mapper(), + replanning_a_star_planner(), + wavefront_frontier_explorer(), +).global_config(n_dask_workers=6, robot_model="unitree_go2") + +__all__ = ["unitree_go2"] diff --git a/dimos/robot/unitree/go2/blueprints/smart/unitree_go2_detection.py b/dimos/robot/unitree/go2/blueprints/smart/unitree_go2_detection.py new file mode 100644 index 0000000000..f2edf2cb3b --- /dev/null +++ b/dimos/robot/unitree/go2/blueprints/smart/unitree_go2_detection.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python3 +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dimos_lcm.foxglove_msgs.ImageAnnotations import ( + ImageAnnotations, # type: ignore[import-untyped] +) +from dimos_lcm.foxglove_msgs.SceneUpdate import SceneUpdate # type: ignore[import-untyped] + +from dimos.core.blueprints import autoconnect +from dimos.core.transport import LCMTransport +from dimos.msgs.sensor_msgs import Image, PointCloud2 +from dimos.msgs.vision_msgs import Detection2DArray +from dimos.perception.detection.module3D import Detection3DModule, detection3d_module +from dimos.robot.unitree.go2.blueprints.smart.unitree_go2 import unitree_go2 +from dimos.robot.unitree.go2.connection import GO2Connection + +unitree_go2_detection = ( + autoconnect( + unitree_go2, + detection3d_module( + camera_info=GO2Connection.camera_info_static, + ), + ) + .remappings( + [ + (Detection3DModule, "pointcloud", "global_map"), + ] + ) + .transports( + { + # Detection 3D module outputs + ("detections", Detection3DModule): LCMTransport( + "/detector3d/detections", Detection2DArray + ), + ("annotations", Detection3DModule): LCMTransport( + "/detector3d/annotations", ImageAnnotations + ), + ("scene_update", Detection3DModule): LCMTransport( + "/detector3d/scene_update", SceneUpdate + ), + ("detected_pointcloud_0", Detection3DModule): LCMTransport( + "/detector3d/pointcloud/0", PointCloud2 + ), + ("detected_pointcloud_1", Detection3DModule): LCMTransport( + "/detector3d/pointcloud/1", PointCloud2 + ), + ("detected_pointcloud_2", Detection3DModule): LCMTransport( + "/detector3d/pointcloud/2", PointCloud2 + ), + ("detected_image_0", Detection3DModule): LCMTransport("/detector3d/image/0", Image), + ("detected_image_1", Detection3DModule): LCMTransport("/detector3d/image/1", Image), + ("detected_image_2", Detection3DModule): LCMTransport("/detector3d/image/2", Image), + } + ) +) + +__all__ = ["unitree_go2_detection"] diff --git a/dimos/robot/unitree/go2/blueprints/smart/unitree_go2_ros.py b/dimos/robot/unitree/go2/blueprints/smart/unitree_go2_ros.py new file mode 100644 index 0000000000..a335b1e9af --- /dev/null +++ b/dimos/robot/unitree/go2/blueprints/smart/unitree_go2_ros.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python3 +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dimos.core.transport import ROSTransport +from dimos.msgs.geometry_msgs import PoseStamped +from dimos.msgs.sensor_msgs import Image, PointCloud2 +from dimos.robot.unitree.go2.blueprints.smart.unitree_go2 import unitree_go2 + +unitree_go2_ros = unitree_go2.transports( + { + ("lidar", PointCloud2): ROSTransport("lidar", PointCloud2), + ("global_map", PointCloud2): ROSTransport("global_map", PointCloud2), + ("odom", PoseStamped): ROSTransport("odom", PoseStamped), + ("color_image", Image): ROSTransport("color_image", Image), + } +) + +__all__ = ["unitree_go2_ros"] diff --git a/dimos/robot/unitree/go2/blueprints/smart/unitree_go2_spatial.py b/dimos/robot/unitree/go2/blueprints/smart/unitree_go2_spatial.py new file mode 100644 index 0000000000..e2695f9bfb --- /dev/null +++ b/dimos/robot/unitree/go2/blueprints/smart/unitree_go2_spatial.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python3 +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dimos.core.blueprints import autoconnect +from dimos.perception.spatial_perception import spatial_memory +from dimos.robot.unitree.go2.blueprints.smart.unitree_go2 import unitree_go2 +from dimos.utils.monitoring import utilization + +unitree_go2_spatial = autoconnect( + unitree_go2, + spatial_memory(), + utilization(), +).global_config(n_dask_workers=8) + +__all__ = ["unitree_go2_spatial"] diff --git a/dimos/robot/unitree/go2/blueprints/smart/unitree_go2_vlm_stream_test.py b/dimos/robot/unitree/go2/blueprints/smart/unitree_go2_vlm_stream_test.py new file mode 100644 index 0000000000..194d3973c6 --- /dev/null +++ b/dimos/robot/unitree/go2/blueprints/smart/unitree_go2_vlm_stream_test.py @@ -0,0 +1,27 @@ +#!/usr/bin/env python3 +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from dimos.agents.vlm_agent import vlm_agent +from dimos.agents.vlm_stream_tester import vlm_stream_tester +from dimos.core.blueprints import autoconnect +from dimos.robot.unitree.go2.blueprints.basic.unitree_go2_basic import unitree_go2_basic + +unitree_go2_vlm_stream_test = autoconnect( + unitree_go2_basic, + vlm_agent(), + vlm_stream_tester(), +) + +__all__ = ["unitree_go2_vlm_stream_test"] diff --git a/dimos/robot/unitree/connection/go2.py b/dimos/robot/unitree/go2/connection.py similarity index 98% rename from dimos/robot/unitree/connection/go2.py rename to dimos/robot/unitree/go2/connection.py index 58145c9371..ba708ef4df 100644 --- a/dimos/robot/unitree/connection/go2.py +++ b/dimos/robot/unitree/go2/connection.py @@ -36,7 +36,7 @@ from dimos.msgs.sensor_msgs.Image import ImageFormat from dimos.protocol.skill.skill import skill from dimos.protocol.skill.type import Output -from dimos.robot.unitree.connection.connection import UnitreeWebRTCConnection +from dimos.robot.unitree.connection import UnitreeWebRTCConnection from dimos.utils.data import get_data from dimos.utils.decorators.decorators import simple_mcache from dimos.utils.testing import TimedSensorReplay, TimedSensorStorage @@ -182,7 +182,7 @@ def __init__( # type: ignore[no-untyped-def] if ip in ["fake", "mock", "replay"] or connection_type == "replay": self.connection = ReplayConnection() elif ip == "mujoco" or connection_type == "mujoco": - from dimos.robot.unitree_webrtc.mujoco_connection import MujocoConnection + from dimos.robot.unitree.mujoco_connection import MujocoConnection self.connection = MujocoConnection(self._global_config) else: diff --git a/dimos/robot/unitree/go2/go2.py b/dimos/robot/unitree/go2/go2.py deleted file mode 100644 index d2e7e74674..0000000000 --- a/dimos/robot/unitree/go2/go2.py +++ /dev/null @@ -1,37 +0,0 @@ -# Copyright 2025-2026 Dimensional Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import logging - -from dimos.core import DimosCluster -from dimos.robot import foxglove_bridge -from dimos.robot.unitree.connection import go2 -from dimos.utils.logging_config import setup_logger - -logger = setup_logger(level=logging.INFO) - - -def deploy(dimos: DimosCluster, ip: str): # type: ignore[no-untyped-def] - connection = go2.deploy(dimos, ip) - foxglove_bridge.deploy(dimos) - - # detector = moduleDB.deploy( - # dimos, - # camera=connection, - # lidar=connection, - # ) - - # agent = agents.deploy(dimos) - # agent.register_skills(detector) - return connection diff --git a/dimos/robot/unitree_webrtc/keyboard_teleop.py b/dimos/robot/unitree/keyboard_teleop.py similarity index 99% rename from dimos/robot/unitree_webrtc/keyboard_teleop.py rename to dimos/robot/unitree/keyboard_teleop.py index 8e0d987127..3d7d4c263e 100644 --- a/dimos/robot/unitree_webrtc/keyboard_teleop.py +++ b/dimos/robot/unitree/keyboard_teleop.py @@ -45,7 +45,7 @@ def __init__(self) -> None: self._stop_event = threading.Event() @rpc - def start(self) -> bool: + def start(self) -> None: super().start() self._keys_held = set() @@ -54,7 +54,7 @@ def start(self) -> bool: self._thread = threading.Thread(target=self._pygame_loop, daemon=True) self._thread.start() - return True + return @rpc def stop(self) -> None: diff --git a/dimos/robot/unitree_webrtc/modular/detect.py b/dimos/robot/unitree/modular/detect.py similarity index 96% rename from dimos/robot/unitree_webrtc/modular/detect.py rename to dimos/robot/unitree/modular/detect.py index 8f92d15e81..e5999e9fd8 100644 --- a/dimos/robot/unitree_webrtc/modular/detect.py +++ b/dimos/robot/unitree/modular/detect.py @@ -18,8 +18,8 @@ from dimos.msgs.sensor_msgs import Image, PointCloud2 from dimos.msgs.std_msgs import Header -from dimos.robot.unitree_webrtc.type.lidar import pointcloud2_from_webrtc_lidar -from dimos.robot.unitree_webrtc.type.odometry import Odometry +from dimos.robot.unitree.type.lidar import pointcloud2_from_webrtc_lidar +from dimos.robot.unitree.type.odometry import Odometry image_resize_factor = 1 originalwidth, originalheight = (1280, 720) @@ -141,7 +141,7 @@ def process_data(): # type: ignore[no-untyped-def] Detection2DModule, build_imageannotations, ) - from dimos.robot.unitree_webrtc.type.odometry import Odometry + from dimos.robot.unitree.type.odometry import Odometry from dimos.utils.data import get_data from dimos.utils.testing import TimedSensorReplay diff --git a/dimos/robot/unitree_webrtc/mujoco_connection.py b/dimos/robot/unitree/mujoco_connection.py similarity index 99% rename from dimos/robot/unitree_webrtc/mujoco_connection.py rename to dimos/robot/unitree/mujoco_connection.py index 1bfdef2e3c..f998ae1dd9 100644 --- a/dimos/robot/unitree_webrtc/mujoco_connection.py +++ b/dimos/robot/unitree/mujoco_connection.py @@ -37,7 +37,7 @@ from dimos.core.global_config import GlobalConfig from dimos.msgs.geometry_msgs import Quaternion, Twist, Vector3 from dimos.msgs.sensor_msgs import CameraInfo, Image, ImageFormat, PointCloud2 -from dimos.robot.unitree_webrtc.type.odometry import Odometry +from dimos.robot.unitree.type.odometry import Odometry from dimos.simulation.mujoco.constants import ( LAUNCHER_PATH, LIDAR_FPS, diff --git a/dimos/robot/unitree_webrtc/params/front_camera_720.yaml b/dimos/robot/unitree/params/front_camera_720.yaml similarity index 100% rename from dimos/robot/unitree_webrtc/params/front_camera_720.yaml rename to dimos/robot/unitree/params/front_camera_720.yaml diff --git a/dimos/robot/unitree_webrtc/params/sim_camera.yaml b/dimos/robot/unitree/params/sim_camera.yaml similarity index 100% rename from dimos/robot/unitree_webrtc/params/sim_camera.yaml rename to dimos/robot/unitree/params/sim_camera.yaml diff --git a/dimos/robot/unitree_webrtc/rosnav.py b/dimos/robot/unitree/rosnav.py similarity index 98% rename from dimos/robot/unitree_webrtc/rosnav.py rename to dimos/robot/unitree/rosnav.py index 3244ecfd05..7a9b98b678 100644 --- a/dimos/robot/unitree_webrtc/rosnav.py +++ b/dimos/robot/unitree/rosnav.py @@ -119,7 +119,7 @@ def go_to(self, pose: PoseStamped, timeout: float = 60.0) -> bool: return False @rpc - def stop(self) -> bool: + def stop(self) -> None: """ Cancel current navigation by publishing to cancel_goal. @@ -131,6 +131,6 @@ def stop(self) -> bool: if self.cancel_goal: cancel_msg = Bool(data=True) self.cancel_goal.publish(cancel_msg) - return True + return - return False + return diff --git a/dimos/robot/unitree/run.py b/dimos/robot/unitree/run.py deleted file mode 100644 index 5b17ad7a9d..0000000000 --- a/dimos/robot/unitree/run.py +++ /dev/null @@ -1,115 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2025-2026 Dimensional Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -""" -Centralized runner for modular Unitree robot deployment scripts. - -Usage: - python run.py g1agent --ip 192.168.1.100 - python run.py g1/g1zed --ip $ROBOT_IP - python run.py go2/go2.py --ip $ROBOT_IP - python run.py connection/g1.py --ip $ROBOT_IP -""" - -import argparse -import importlib -import os -import sys - -from dotenv import load_dotenv - -from dimos.core import start, wait_exit - - -def main() -> None: - load_dotenv() - - parser = argparse.ArgumentParser(description="Unitree Robot Modular Deployment Runner") - parser.add_argument( - "module", - help="Module name/path to run (e.g., g1agent, g1/g1zed, go2/go2.py)", - ) - parser.add_argument( - "--ip", - default=os.getenv("ROBOT_IP"), - help="Robot IP address (default: ROBOT_IP from .env)", - ) - parser.add_argument( - "--workers", - type=int, - default=8, - help="Number of worker threads for DimosCluster (default: 8)", - ) - - args = parser.parse_args() - - # Validate IP address - if not args.ip: - print("ERROR: Robot IP address not provided") - print("Please provide --ip or set ROBOT_IP in .env") - sys.exit(1) - - # Parse the module path - module_path = args.module - - # Remove .py extension if present - if module_path.endswith(".py"): - module_path = module_path[:-3] - - # Convert path separators to dots for import - module_path = module_path.replace("/", ".") - - # Import the module - try: - # Build the full import path - full_module_path = f"dimos.robot.unitree.{module_path}" - print(f"Importing module: {full_module_path}") - module = importlib.import_module(full_module_path) - except ImportError: - # Try as a relative import from the unitree package - try: - module = importlib.import_module(f".{module_path}", package="dimos.robot.unitree") - except ImportError as e2: - import traceback - - traceback.print_exc() - - print(f"\nERROR: Could not import module '{args.module}'") - print("Tried importing as:") - print(f" 1. {full_module_path}") - print(" 2. Relative import from dimos.robot.unitree") - print("Make sure the module exists in dimos/robot/unitree/") - print(f"Import error: {e2}") - - sys.exit(1) - - # Verify deploy function exists - if not hasattr(module, "deploy"): - print(f"ERROR: Module '{args.module}' does not have a 'deploy' function") - sys.exit(1) - - print(f"Running {args.module}.deploy() with IP {args.ip}") - - # Run the standard deployment pattern - dimos = start(args.workers) - try: - module.deploy(dimos, args.ip) - wait_exit() - finally: - dimos.close_all() # type: ignore[attr-defined] - - -if __name__ == "__main__": - main() diff --git a/dimos/robot/unitree/testing/__init__.py b/dimos/robot/unitree/testing/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/dimos/robot/unitree_webrtc/testing/helpers.py b/dimos/robot/unitree/testing/helpers.py similarity index 100% rename from dimos/robot/unitree_webrtc/testing/helpers.py rename to dimos/robot/unitree/testing/helpers.py diff --git a/dimos/robot/unitree_webrtc/testing/mock.py b/dimos/robot/unitree/testing/mock.py similarity index 97% rename from dimos/robot/unitree_webrtc/testing/mock.py rename to dimos/robot/unitree/testing/mock.py index 2af1754cb4..26e6a90018 100644 --- a/dimos/robot/unitree_webrtc/testing/mock.py +++ b/dimos/robot/unitree/testing/mock.py @@ -22,7 +22,7 @@ from reactivex.observable import Observable from dimos.msgs.sensor_msgs import PointCloud2 -from dimos.robot.unitree_webrtc.type.lidar import RawLidarMsg, pointcloud2_from_webrtc_lidar +from dimos.robot.unitree.type.lidar import RawLidarMsg, pointcloud2_from_webrtc_lidar class Mock: diff --git a/dimos/robot/unitree_webrtc/testing/test_actors.py b/dimos/robot/unitree/testing/test_actors.py similarity index 97% rename from dimos/robot/unitree_webrtc/testing/test_actors.py rename to dimos/robot/unitree/testing/test_actors.py index e02f292c8b..9366092eb6 100644 --- a/dimos/robot/unitree_webrtc/testing/test_actors.py +++ b/dimos/robot/unitree/testing/test_actors.py @@ -20,7 +20,7 @@ from dimos import core from dimos.core import Module, rpc from dimos.msgs.sensor_msgs import PointCloud2 -from dimos.robot.unitree_webrtc.type.map import Map as Mapper +from dimos.robot.unitree.type.map import Map as Mapper @pytest.fixture diff --git a/dimos/robot/unitree_webrtc/testing/test_tooling.py b/dimos/robot/unitree/testing/test_tooling.py similarity index 89% rename from dimos/robot/unitree_webrtc/testing/test_tooling.py rename to dimos/robot/unitree/testing/test_tooling.py index 50b689931e..d1f2eeb169 100644 --- a/dimos/robot/unitree_webrtc/testing/test_tooling.py +++ b/dimos/robot/unitree/testing/test_tooling.py @@ -16,8 +16,8 @@ import pytest -from dimos.robot.unitree_webrtc.type.lidar import pointcloud2_from_webrtc_lidar -from dimos.robot.unitree_webrtc.type.odometry import Odometry +from dimos.robot.unitree.type.lidar import pointcloud2_from_webrtc_lidar +from dimos.robot.unitree.type.odometry import Odometry from dimos.utils.reactive import backpressure from dimos.utils.testing import TimedSensorReplay diff --git a/dimos/robot/unitree/type/__init__.py b/dimos/robot/unitree/type/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/dimos/robot/unitree/type/lidar.py b/dimos/robot/unitree/type/lidar.py new file mode 100644 index 0000000000..df2909dc38 --- /dev/null +++ b/dimos/robot/unitree/type/lidar.py @@ -0,0 +1,74 @@ +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Unitree WebRTC lidar message parsing utilities.""" + +import time +from typing import TypedDict + +import numpy as np +import open3d as o3d # type: ignore[import-untyped] + +from dimos.msgs.sensor_msgs import PointCloud2 + +# Backwards compatibility alias for pickled data +LidarMessage = PointCloud2 + + +class RawLidarPoints(TypedDict): + points: np.ndarray # type: ignore[type-arg] # Shape (N, 3) array of 3D points [x, y, z] + + +class RawLidarData(TypedDict): + """Data portion of the LIDAR message""" + + frame_id: str + origin: list[float] + resolution: float + src_size: int + stamp: float + width: list[int] + data: RawLidarPoints + + +class RawLidarMsg(TypedDict): + """Static type definition for raw LIDAR message from Unitree WebRTC.""" + + type: str + topic: str + data: RawLidarData + + +def pointcloud2_from_webrtc_lidar(raw_message: RawLidarMsg, ts: float | None = None) -> PointCloud2: + """Convert a raw Unitree WebRTC lidar message to PointCloud2. + + Args: + raw_message: Raw lidar message from Unitree WebRTC API + ts: Optional timestamp override. If None, uses current time. + + Returns: + PointCloud2 message with the lidar points + """ + data = raw_message["data"] + points = data["data"]["points"] + + pointcloud = o3d.geometry.PointCloud() + pointcloud.points = o3d.utility.Vector3dVector(points) + + return PointCloud2( + pointcloud=pointcloud, + # webrtc stamp is broken (e.g., "stamp": 1.758148e+09), use current time + ts=ts if ts is not None else time.time(), + frame_id="world", + ) diff --git a/dimos/robot/unitree/type/lowstate.py b/dimos/robot/unitree/type/lowstate.py new file mode 100644 index 0000000000..3e7926424a --- /dev/null +++ b/dimos/robot/unitree/type/lowstate.py @@ -0,0 +1,93 @@ +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from typing import Literal, TypedDict + +raw_odom_msg_sample = { + "type": "msg", + "topic": "rt/lf/lowstate", + "data": { + "imu_state": {"rpy": [0.008086, -0.007515, 2.981771]}, + "motor_state": [ + {"q": 0.098092, "temperature": 40, "lost": 0, "reserve": [0, 674]}, + {"q": 0.757921, "temperature": 32, "lost": 0, "reserve": [0, 674]}, + {"q": -1.490911, "temperature": 38, "lost": 6, "reserve": [0, 674]}, + {"q": -0.072477, "temperature": 42, "lost": 0, "reserve": [0, 674]}, + {"q": 1.020276, "temperature": 32, "lost": 5, "reserve": [0, 674]}, + {"q": -2.007172, "temperature": 38, "lost": 5, "reserve": [0, 674]}, + {"q": 0.071382, "temperature": 50, "lost": 5, "reserve": [0, 674]}, + {"q": 0.963379, "temperature": 36, "lost": 6, "reserve": [0, 674]}, + {"q": -1.978311, "temperature": 40, "lost": 5, "reserve": [0, 674]}, + {"q": -0.051066, "temperature": 48, "lost": 0, "reserve": [0, 674]}, + {"q": 0.73103, "temperature": 34, "lost": 10, "reserve": [0, 674]}, + {"q": -1.466473, "temperature": 38, "lost": 6, "reserve": [0, 674]}, + {"q": 0, "temperature": 0, "lost": 0, "reserve": [0, 0]}, + {"q": 0, "temperature": 0, "lost": 0, "reserve": [0, 0]}, + {"q": 0, "temperature": 0, "lost": 0, "reserve": [0, 0]}, + {"q": 0, "temperature": 0, "lost": 0, "reserve": [0, 0]}, + {"q": 0, "temperature": 0, "lost": 0, "reserve": [0, 0]}, + {"q": 0, "temperature": 0, "lost": 0, "reserve": [0, 0]}, + {"q": 0, "temperature": 0, "lost": 0, "reserve": [0, 0]}, + {"q": 0, "temperature": 0, "lost": 0, "reserve": [0, 0]}, + ], + "bms_state": { + "version_high": 1, + "version_low": 18, + "soc": 55, + "current": -2481, + "cycle": 56, + "bq_ntc": [30, 29], + "mcu_ntc": [33, 32], + }, + "foot_force": [97, 84, 81, 81], + "temperature_ntc1": 48, + "power_v": 28.331045, + }, +} + + +class MotorState(TypedDict): + q: float + temperature: int + lost: int + reserve: list[int] + + +class ImuState(TypedDict): + rpy: list[float] + + +class BmsState(TypedDict): + version_high: int + version_low: int + soc: int + current: int + cycle: int + bq_ntc: list[int] + mcu_ntc: list[int] + + +class LowStateData(TypedDict): + imu_state: ImuState + motor_state: list[MotorState] + bms_state: BmsState + foot_force: list[int] + temperature_ntc1: int + power_v: float + + +class LowStateMsg(TypedDict): + type: Literal["msg"] + topic: str + data: LowStateData diff --git a/dimos/robot/unitree/type/map.py b/dimos/robot/unitree/type/map.py new file mode 100644 index 0000000000..a771467246 --- /dev/null +++ b/dimos/robot/unitree/type/map.py @@ -0,0 +1,128 @@ +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from pathlib import Path +import time +from typing import Any + +import open3d as o3d # type: ignore[import-untyped] +from reactivex import interval +from reactivex.disposable import Disposable + +from dimos.core import DimosCluster, In, LCMTransport, Module, Out, rpc +from dimos.core.global_config import GlobalConfig, global_config +from dimos.mapping.pointclouds.accumulators.general import GeneralPointCloudAccumulator +from dimos.mapping.pointclouds.accumulators.protocol import PointCloudAccumulator +from dimos.mapping.pointclouds.occupancy import general_occupancy +from dimos.msgs.nav_msgs import OccupancyGrid +from dimos.msgs.sensor_msgs import PointCloud2 +from dimos.robot.unitree.go2.connection import Go2ConnectionProtocol + + +class Map(Module): + lidar: In[PointCloud2] + global_map: Out[PointCloud2] + global_costmap: Out[OccupancyGrid] + + _point_cloud_accumulator: PointCloudAccumulator + _global_config: GlobalConfig + _preloaded_occupancy: OccupancyGrid | None = None + + def __init__( # type: ignore[no-untyped-def] + self, + voxel_size: float = 0.05, + cost_resolution: float = 0.05, + global_publish_interval: float | None = None, + min_height: float = 0.10, + max_height: float = 0.5, + cfg: GlobalConfig = global_config, + **kwargs, + ) -> None: + self.voxel_size = voxel_size + self.cost_resolution = cost_resolution + self.global_publish_interval = global_publish_interval + self.min_height = min_height + self.max_height = max_height + self._global_config = cfg + self._point_cloud_accumulator = GeneralPointCloudAccumulator( + self.voxel_size, self._global_config + ) + + if self._global_config.simulation: + self.min_height = 0.3 + + super().__init__(**kwargs) + + @rpc + def start(self) -> None: + super().start() + + self._disposables.add(Disposable(self.lidar.subscribe(self.add_frame))) + + if self.global_publish_interval is not None: + unsub = interval(self.global_publish_interval).subscribe(self._publish) + self._disposables.add(unsub) + + @rpc + def stop(self) -> None: + super().stop() + + def to_PointCloud2(self) -> PointCloud2: + return PointCloud2( + pointcloud=self._point_cloud_accumulator.get_point_cloud(), + ts=time.time(), + ) + + # TODO: Why is this RPC? + @rpc + def add_frame(self, frame: PointCloud2) -> None: + self._point_cloud_accumulator.add(frame.pointcloud) + + @property + def o3d_geometry(self) -> o3d.geometry.PointCloud: + return self._point_cloud_accumulator.get_point_cloud() + + def _publish(self, _: Any) -> None: + self.global_map.publish(self.to_PointCloud2()) + + occupancygrid = general_occupancy( + self.to_PointCloud2(), + resolution=self.cost_resolution, + min_height=self.min_height, + max_height=self.max_height, + ) + + # When debugging occupancy navigation, load a predefined occupancy grid. + if self._global_config.mujoco_global_costmap_from_occupancy: + if self._preloaded_occupancy is None: + path = Path(self._global_config.mujoco_global_costmap_from_occupancy) + self._preloaded_occupancy = OccupancyGrid.from_path(path) + occupancygrid = self._preloaded_occupancy + + self.global_costmap.publish(occupancygrid) + + +mapper = Map.blueprint + + +def deploy(dimos: DimosCluster, connection: Go2ConnectionProtocol): # type: ignore[no-untyped-def] + mapper = dimos.deploy(Map, global_publish_interval=1.0) # type: ignore[attr-defined] + mapper.global_map.transport = LCMTransport("/global_map", PointCloud2) + mapper.global_costmap.transport = LCMTransport("/global_costmap", OccupancyGrid) + mapper.lidar.connect(connection.pointcloud) # type: ignore[attr-defined] + mapper.start() + return mapper + + +__all__ = ["Map", "mapper"] diff --git a/dimos/robot/unitree/type/odometry.py b/dimos/robot/unitree/type/odometry.py new file mode 100644 index 0000000000..aa664b32ef --- /dev/null +++ b/dimos/robot/unitree/type/odometry.py @@ -0,0 +1,102 @@ +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from typing import Literal, TypedDict + +from dimos.msgs.geometry_msgs import PoseStamped, Quaternion, Vector3 +from dimos.robot.unitree.type.timeseries import ( + Timestamped, +) +from dimos.types.timestamped import to_timestamp + +raw_odometry_msg_sample = { + "type": "msg", + "topic": "rt/utlidar/robot_pose", + "data": { + "header": {"stamp": {"sec": 1746565669, "nanosec": 448350564}, "frame_id": "odom"}, + "pose": { + "position": {"x": 5.961965, "y": -2.916958, "z": 0.319509}, + "orientation": {"x": 0.002787, "y": -0.000902, "z": -0.970244, "w": -0.242112}, + }, + }, +} + + +class TimeStamp(TypedDict): + sec: int + nanosec: int + + +class Header(TypedDict): + stamp: TimeStamp + frame_id: str + + +class RawPosition(TypedDict): + x: float + y: float + z: float + + +class Orientation(TypedDict): + x: float + y: float + z: float + w: float + + +class PoseData(TypedDict): + position: RawPosition + orientation: Orientation + + +class OdometryData(TypedDict): + header: Header + pose: PoseData + + +class RawOdometryMessage(TypedDict): + type: Literal["msg"] + topic: str + data: OdometryData + + +class Odometry(PoseStamped, Timestamped): # type: ignore[misc] + name = "geometry_msgs.PoseStamped" + + def __init__(self, frame_id: str = "base_link", *args, **kwargs) -> None: # type: ignore[no-untyped-def] + super().__init__(frame_id=frame_id, *args, **kwargs) # type: ignore[misc] + + @classmethod + def from_msg(cls, msg: RawOdometryMessage) -> "Odometry": + pose = msg["data"]["pose"] + + # Extract position + pos = Vector3( + pose["position"].get("x"), + pose["position"].get("y"), + pose["position"].get("z"), + ) + + rot = Quaternion( + pose["orientation"].get("x"), + pose["orientation"].get("y"), + pose["orientation"].get("z"), + pose["orientation"].get("w"), + ) + + ts = to_timestamp(msg["data"]["header"]["stamp"]) + return Odometry(position=pos, orientation=rot, ts=ts, frame_id="world") + + def __repr__(self) -> str: + return f"Odom pos({self.position}), rot({self.orientation})" diff --git a/dimos/robot/unitree_webrtc/type/test_lidar.py b/dimos/robot/unitree/type/test_lidar.py similarity index 83% rename from dimos/robot/unitree_webrtc/type/test_lidar.py rename to dimos/robot/unitree/type/test_lidar.py index 7543fe63a7..719088d77a 100644 --- a/dimos/robot/unitree_webrtc/type/test_lidar.py +++ b/dimos/robot/unitree/type/test_lidar.py @@ -14,9 +14,10 @@ # limitations under the License. import itertools +from typing import cast from dimos.msgs.sensor_msgs import PointCloud2 -from dimos.robot.unitree_webrtc.type.lidar import pointcloud2_from_webrtc_lidar +from dimos.robot.unitree.type.lidar import RawLidarMsg, pointcloud2_from_webrtc_lidar from dimos.utils.testing import SensorReplay @@ -25,5 +26,5 @@ def test_init() -> None: for raw_frame in itertools.islice(lidar.iterate(), 5): assert isinstance(raw_frame, dict) - frame = pointcloud2_from_webrtc_lidar(raw_frame) + frame = pointcloud2_from_webrtc_lidar(cast("RawLidarMsg", raw_frame)) assert isinstance(frame, PointCloud2) diff --git a/dimos/robot/unitree_webrtc/type/test_odometry.py b/dimos/robot/unitree/type/test_odometry.py similarity index 97% rename from dimos/robot/unitree_webrtc/type/test_odometry.py rename to dimos/robot/unitree/type/test_odometry.py index e277455cdd..0fdd5f5ad9 100644 --- a/dimos/robot/unitree_webrtc/type/test_odometry.py +++ b/dimos/robot/unitree/type/test_odometry.py @@ -19,7 +19,7 @@ import pytest import reactivex.operators as ops -from dimos.robot.unitree_webrtc.type.odometry import Odometry +from dimos.robot.unitree.type.odometry import Odometry from dimos.utils.testing import SensorReplay _EXPECTED_TOTAL_RAD = -4.05212 diff --git a/dimos/robot/unitree_webrtc/type/test_timeseries.py b/dimos/robot/unitree/type/test_timeseries.py similarity index 95% rename from dimos/robot/unitree_webrtc/type/test_timeseries.py rename to dimos/robot/unitree/type/test_timeseries.py index 2c7606d9f2..5164d91a94 100644 --- a/dimos/robot/unitree_webrtc/type/test_timeseries.py +++ b/dimos/robot/unitree/type/test_timeseries.py @@ -14,7 +14,7 @@ from datetime import datetime, timedelta -from dimos.robot.unitree_webrtc.type.timeseries import TEvent, TList +from dimos.robot.unitree.type.timeseries import TEvent, TList fixed_date = datetime(2025, 5, 13, 15, 2, 5).astimezone() start_event = TEvent(fixed_date, 1) diff --git a/dimos/robot/unitree/type/timeseries.py b/dimos/robot/unitree/type/timeseries.py new file mode 100644 index 0000000000..b75a41b932 --- /dev/null +++ b/dimos/robot/unitree/type/timeseries.py @@ -0,0 +1,149 @@ +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import annotations + +from abc import ABC, abstractmethod +from datetime import datetime, timedelta, timezone +from typing import TYPE_CHECKING, Generic, TypedDict, TypeVar, Union + +if TYPE_CHECKING: + from collections.abc import Iterable + +PAYLOAD = TypeVar("PAYLOAD") + + +class RosStamp(TypedDict): + sec: int + nanosec: int + + +EpochLike = Union[int, float, datetime, RosStamp] + + +def from_ros_stamp(stamp: dict[str, int], tz: timezone | None = None) -> datetime: + """Convert ROS-style timestamp {'sec': int, 'nanosec': int} to datetime.""" + return datetime.fromtimestamp(stamp["sec"] + stamp["nanosec"] / 1e9, tz=tz) + + +def to_human_readable(ts: EpochLike) -> str: + dt = to_datetime(ts) + return dt.strftime("%Y-%m-%d %H:%M:%S") + + +def to_datetime(ts: EpochLike, tz: timezone | None = None) -> datetime: + if isinstance(ts, datetime): + # if ts.tzinfo is None: + # ts = ts.astimezone(tz) + return ts + if isinstance(ts, int | float): + return datetime.fromtimestamp(ts, tz=tz) + if isinstance(ts, dict) and "sec" in ts and "nanosec" in ts: + return datetime.fromtimestamp(ts["sec"] + ts["nanosec"] / 1e9, tz=tz) + raise TypeError("unsupported timestamp type") + + +class Timestamped(ABC): + """Abstract class for an event with a timestamp.""" + + ts: datetime + + def __init__(self, ts: EpochLike) -> None: + self.ts = to_datetime(ts) + + +class TEvent(Timestamped, Generic[PAYLOAD]): + """Concrete class for an event with a timestamp and data.""" + + def __init__(self, timestamp: EpochLike, data: PAYLOAD) -> None: + super().__init__(timestamp) + self.data = data + + def __eq__(self, other: object) -> bool: + if not isinstance(other, TEvent): + return NotImplemented + return self.ts == other.ts and self.data == other.data + + def __repr__(self) -> str: + return f"TEvent(ts={self.ts}, data={self.data})" + + +EVENT = TypeVar("EVENT", bound=Timestamped) # any object that is a subclass of Timestamped + + +class Timeseries(ABC, Generic[EVENT]): + """Abstract class for an iterable of events with timestamps.""" + + @abstractmethod + def __iter__(self) -> Iterable[EVENT]: ... + + @property + def start_time(self) -> datetime: + """Return the timestamp of the earliest event, assuming the data is sorted.""" + return next(iter(self)).ts # type: ignore[call-overload, no-any-return, type-var] + + @property + def end_time(self) -> datetime: + """Return the timestamp of the latest event, assuming the data is sorted.""" + return next(reversed(list(self))).ts # type: ignore[call-overload, no-any-return] + + @property + def frequency(self) -> float: + """Calculate the frequency of events in Hz.""" + return len(list(self)) / (self.duration().total_seconds() or 1) # type: ignore[call-overload] + + def time_range(self) -> tuple[datetime, datetime]: + """Return (earliest_ts, latest_ts). Empty input ⇒ ValueError.""" + return self.start_time, self.end_time + + def duration(self) -> timedelta: + """Total time spanned by the iterable (Δ = last - first).""" + return self.end_time - self.start_time + + def closest_to(self, timestamp: EpochLike) -> EVENT: + """Return the event closest to the given timestamp. Assumes timeseries is sorted.""" + print("closest to", timestamp) + target = to_datetime(timestamp) + print("converted to", target) + target_ts = target.timestamp() + + closest = None + min_dist = float("inf") + + for event in self: # type: ignore[attr-defined] + dist = abs(event.ts - target_ts) + if dist > min_dist: + break + + min_dist = dist + closest = event + + print(f"closest: {closest}") + return closest # type: ignore[return-value] + + def __repr__(self) -> str: + """Return a string representation of the Timeseries.""" + return f"Timeseries(date={self.start_time.strftime('%Y-%m-%d')}, start={self.start_time.strftime('%H:%M:%S')}, end={self.end_time.strftime('%H:%M:%S')}, duration={self.duration()}, events={len(list(self))}, freq={self.frequency:.2f}Hz)" # type: ignore[call-overload] + + def __str__(self) -> str: + """Return a string representation of the Timeseries.""" + return self.__repr__() + + +class TList(list[EVENT], Timeseries[EVENT]): + """A test class that inherits from both list and Timeseries.""" + + def __repr__(self) -> str: + """Return a string representation of the TList using Timeseries repr method.""" + return Timeseries.__repr__(self) diff --git a/dimos/robot/unitree/type/vector.py b/dimos/robot/unitree/type/vector.py new file mode 100644 index 0000000000..58438c0a98 --- /dev/null +++ b/dimos/robot/unitree/type/vector.py @@ -0,0 +1,442 @@ +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import builtins +from collections.abc import Iterable +from typing import ( + Any, + Protocol, + TypeVar, + Union, + runtime_checkable, +) + +import numpy as np +from numpy.typing import NDArray + +T = TypeVar("T", bound="Vector") + + +class Vector: + """A wrapper around numpy arrays for vector operations with intuitive syntax.""" + + def __init__(self, *args: Any) -> None: + """Initialize a vector from components or another iterable. + + Examples: + Vector(1, 2) # 2D vector + Vector(1, 2, 3) # 3D vector + Vector([1, 2, 3]) # From list + Vector(np.array([1, 2, 3])) # From numpy array + """ + if len(args) == 1 and hasattr(args[0], "__iter__"): + self._data = np.array(args[0], dtype=float) + elif len(args) == 1: + self._data = np.array([args[0].x, args[0].y, args[0].z], dtype=float) + + else: + self._data = np.array(args, dtype=float) + + @property + def yaw(self) -> float: + return self.x + + @property + def tuple(self) -> tuple[float, ...]: + """Tuple representation of the vector.""" + return tuple(self._data) + + @property + def x(self) -> float: + """X component of the vector.""" + return self._data[0] if len(self._data) > 0 else 0.0 + + @property + def y(self) -> float: + """Y component of the vector.""" + return self._data[1] if len(self._data) > 1 else 0.0 + + @property + def z(self) -> float: + """Z component of the vector.""" + return self._data[2] if len(self._data) > 2 else 0.0 + + @property + def dim(self) -> int: + """Dimensionality of the vector.""" + return len(self._data) + + @property + def data(self) -> NDArray[np.float64]: + """Get the underlying numpy array.""" + return self._data + + def __len__(self) -> int: + return len(self._data) + + def __getitem__(self, idx: int) -> float: + return float(self._data[idx]) + + def __iter__(self) -> Iterable[float]: + return iter(self._data) # type: ignore[no-any-return] + + def __repr__(self) -> str: + components = ",".join(f"{x:.6g}" for x in self._data) + return f"({components})" + + def __str__(self) -> str: + if self.dim < 2: + return self.__repr__() + + def getArrow() -> str: + repr = ["←", "↖", "↑", "↗", "→", "↘", "↓", "↙"] + + if self.y == 0 and self.x == 0: + return "·" + + # Calculate angle in radians and convert to directional index + angle = np.arctan2(self.y, self.x) + # Map angle to 0-7 index (8 directions) with proper orientation + dir_index = int(((angle + np.pi) * 4 / np.pi) % 8) + # Get directional arrow symbol + return repr[dir_index] + + return f"{getArrow()} Vector {self.__repr__()}" + + def serialize(self) -> dict: # type: ignore[type-arg] + """Serialize the vector to a dictionary.""" + return {"type": "vector", "c": self._data.tolist()} + + def __eq__(self, other: Any) -> bool: + if isinstance(other, Vector): + return np.array_equal(self._data, other._data) + return np.array_equal(self._data, np.array(other, dtype=float)) + + def __add__(self: T, other: Union["Vector", Iterable[float]]) -> T: + if isinstance(other, Vector): + return self.__class__(self._data + other._data) + return self.__class__(self._data + np.array(other, dtype=float)) + + def __sub__(self: T, other: Union["Vector", Iterable[float]]) -> T: + if isinstance(other, Vector): + return self.__class__(self._data - other._data) + return self.__class__(self._data - np.array(other, dtype=float)) + + def __mul__(self: T, scalar: float) -> T: + return self.__class__(self._data * scalar) + + def __rmul__(self: T, scalar: float) -> T: + return self.__mul__(scalar) + + def __truediv__(self: T, scalar: float) -> T: + return self.__class__(self._data / scalar) + + def __neg__(self: T) -> T: + return self.__class__(-self._data) + + def dot(self, other: Union["Vector", Iterable[float]]) -> float: + """Compute dot product.""" + if isinstance(other, Vector): + return float(np.dot(self._data, other._data)) + return float(np.dot(self._data, np.array(other, dtype=float))) + + def cross(self: T, other: Union["Vector", Iterable[float]]) -> T: + """Compute cross product (3D vectors only).""" + if self.dim != 3: + raise ValueError("Cross product is only defined for 3D vectors") + + if isinstance(other, Vector): + other_data = other._data + else: + other_data = np.array(other, dtype=float) + + if len(other_data) != 3: + raise ValueError("Cross product requires two 3D vectors") + + return self.__class__(np.cross(self._data, other_data)) + + def length(self) -> float: + """Compute the Euclidean length (magnitude) of the vector.""" + return float(np.linalg.norm(self._data)) + + def length_squared(self) -> float: + """Compute the squared length of the vector (faster than length()).""" + return float(np.sum(self._data * self._data)) + + def normalize(self: T) -> T: + """Return a normalized unit vector in the same direction.""" + length = self.length() + if length < 1e-10: # Avoid division by near-zero + return self.__class__(np.zeros_like(self._data)) + return self.__class__(self._data / length) + + def to_2d(self: T) -> T: + """Convert a vector to a 2D vector by taking only the x and y components.""" + return self.__class__(self._data[:2]) + + def distance(self, other: Union["Vector", Iterable[float]]) -> float: + """Compute Euclidean distance to another vector.""" + if isinstance(other, Vector): + return float(np.linalg.norm(self._data - other._data)) + return float(np.linalg.norm(self._data - np.array(other, dtype=float))) + + def distance_squared(self, other: Union["Vector", Iterable[float]]) -> float: + """Compute squared Euclidean distance to another vector (faster than distance()).""" + if isinstance(other, Vector): + diff = self._data - other._data + else: + diff = self._data - np.array(other, dtype=float) + return float(np.sum(diff * diff)) + + def angle(self, other: Union["Vector", Iterable[float]]) -> float: + """Compute the angle (in radians) between this vector and another.""" + if self.length() < 1e-10 or (isinstance(other, Vector) and other.length() < 1e-10): + return 0.0 + + if isinstance(other, Vector): + other_data = other._data + else: + other_data = np.array(other, dtype=float) + + cos_angle = np.clip( + np.dot(self._data, other_data) + / (np.linalg.norm(self._data) * np.linalg.norm(other_data)), + -1.0, + 1.0, + ) + return float(np.arccos(cos_angle)) + + def project(self: T, onto: Union["Vector", Iterable[float]]) -> T: + """Project this vector onto another vector.""" + if isinstance(onto, Vector): + onto_data = onto._data + else: + onto_data = np.array(onto, dtype=float) + + onto_length_sq = np.sum(onto_data * onto_data) + if onto_length_sq < 1e-10: + return self.__class__(np.zeros_like(self._data)) + + scalar_projection = np.dot(self._data, onto_data) / onto_length_sq + return self.__class__(scalar_projection * onto_data) + + @classmethod + def zeros(cls: type[T], dim: int) -> T: + """Create a zero vector of given dimension.""" + return cls(np.zeros(dim)) + + @classmethod + def ones(cls: type[T], dim: int) -> T: + """Create a vector of ones with given dimension.""" + return cls(np.ones(dim)) + + @classmethod + def unit_x(cls: type[T], dim: int = 3) -> T: + """Create a unit vector in the x direction.""" + v = np.zeros(dim) + v[0] = 1.0 + return cls(v) + + @classmethod + def unit_y(cls: type[T], dim: int = 3) -> T: + """Create a unit vector in the y direction.""" + v = np.zeros(dim) + v[1] = 1.0 + return cls(v) + + @classmethod + def unit_z(cls: type[T], dim: int = 3) -> T: + """Create a unit vector in the z direction.""" + v = np.zeros(dim) + if dim > 2: + v[2] = 1.0 + return cls(v) + + def to_list(self) -> list[float]: + """Convert the vector to a list.""" + return [float(x) for x in self._data] + + def to_tuple(self) -> builtins.tuple[float, ...]: + """Convert the vector to a tuple.""" + return tuple(self._data) + + def to_numpy(self) -> NDArray[np.float64]: + """Convert the vector to a numpy array.""" + return self._data + + +# Protocol approach for static type checking +@runtime_checkable +class VectorLike(Protocol): + """Protocol for types that can be treated as vectors.""" + + def __getitem__(self, key: int) -> float: ... + def __len__(self) -> int: ... + def __iter__(self) -> Iterable[float]: ... + + +def to_numpy(value: VectorLike) -> NDArray[np.float64]: + """Convert a vector-compatible value to a numpy array. + + Args: + value: Any vector-like object (Vector, numpy array, tuple, list) + + Returns: + Numpy array representation + """ + if isinstance(value, Vector): + return value.data + elif isinstance(value, np.ndarray): + return value + else: + return np.array(value, dtype=float) + + +def to_vector(value: VectorLike) -> Vector: + """Convert a vector-compatible value to a Vector object. + + Args: + value: Any vector-like object (Vector, numpy array, tuple, list) + + Returns: + Vector object + """ + if isinstance(value, Vector): + return value + else: + return Vector(value) + + +def to_tuple(value: VectorLike) -> tuple[float, ...]: + """Convert a vector-compatible value to a tuple. + + Args: + value: Any vector-like object (Vector, numpy array, tuple, list) + + Returns: + Tuple of floats + """ + if isinstance(value, Vector): + return tuple(float(x) for x in value.data) + elif isinstance(value, np.ndarray): + return tuple(float(x) for x in value) + elif isinstance(value, tuple): + return tuple(float(x) for x in value) + else: + # Convert to list first to ensure we have an indexable sequence + data = [value[i] for i in range(len(value))] + return tuple(float(x) for x in data) + + +def to_list(value: VectorLike) -> list[float]: + """Convert a vector-compatible value to a list. + + Args: + value: Any vector-like object (Vector, numpy array, tuple, list) + + Returns: + List of floats + """ + if isinstance(value, Vector): + return [float(x) for x in value.data] + elif isinstance(value, np.ndarray): + return [float(x) for x in value] + elif isinstance(value, list): + return [float(x) for x in value] + else: + # Convert to list using indexing + return [float(value[i]) for i in range(len(value))] + + +# Helper functions to check dimensionality +def is_2d(value: VectorLike) -> bool: + """Check if a vector-compatible value is 2D. + + Args: + value: Any vector-like object (Vector, numpy array, tuple, list) + + Returns: + True if the value is 2D + """ + if isinstance(value, Vector): + return len(value) == 2 + elif isinstance(value, np.ndarray): + return value.shape[-1] == 2 or value.size == 2 + else: + return len(value) == 2 + + +def is_3d(value: VectorLike) -> bool: + """Check if a vector-compatible value is 3D. + + Args: + value: Any vector-like object (Vector, numpy array, tuple, list) + + Returns: + True if the value is 3D + """ + if isinstance(value, Vector): + return len(value) == 3 + elif isinstance(value, np.ndarray): + return value.shape[-1] == 3 or value.size == 3 + else: + return len(value) == 3 + + +# Extraction functions for XYZ components +def x(value: VectorLike) -> float: + """Get the X component of a vector-compatible value. + + Args: + value: Any vector-like object (Vector, numpy array, tuple, list) + + Returns: + X component as a float + """ + if isinstance(value, Vector): + return value.x + else: + return float(to_numpy(value)[0]) + + +def y(value: VectorLike) -> float: + """Get the Y component of a vector-compatible value. + + Args: + value: Any vector-like object (Vector, numpy array, tuple, list) + + Returns: + Y component as a float + """ + if isinstance(value, Vector): + return value.y + else: + arr = to_numpy(value) + return float(arr[1]) if len(arr) > 1 else 0.0 + + +def z(value: VectorLike) -> float: + """Get the Z component of a vector-compatible value. + + Args: + value: Any vector-like object (Vector, numpy array, tuple, list) + + Returns: + Z component as a float + """ + if isinstance(value, Vector): + return value.z + else: + arr = to_numpy(value) + return float(arr[2]) if len(arr) > 2 else 0.0 diff --git a/dimos/robot/unitree_webrtc/unitree_skill_container.py b/dimos/robot/unitree/unitree_skill_container.py similarity index 98% rename from dimos/robot/unitree_webrtc/unitree_skill_container.py rename to dimos/robot/unitree/unitree_skill_container.py index c3dea43424..5cab15c369 100644 --- a/dimos/robot/unitree_webrtc/unitree_skill_container.py +++ b/dimos/robot/unitree/unitree_skill_container.py @@ -27,7 +27,7 @@ from dimos.navigation.base import NavigationState from dimos.protocol.skill.skill import skill from dimos.protocol.skill.type import Reducer, Stream -from dimos.robot.unitree_webrtc.unitree_skills import UNITREE_WEBRTC_CONTROLS +from dimos.robot.unitree.unitree_skills import UNITREE_WEBRTC_CONTROLS from dimos.utils.logging_config import setup_logger logger = setup_logger() diff --git a/dimos/robot/unitree_webrtc/unitree_skills.py b/dimos/robot/unitree/unitree_skills.py similarity index 100% rename from dimos/robot/unitree_webrtc/unitree_skills.py rename to dimos/robot/unitree/unitree_skills.py diff --git a/dimos/robot/unitree_webrtc/README.md b/dimos/robot/unitree_webrtc/README.md new file mode 100644 index 0000000000..ce39201c8b --- /dev/null +++ b/dimos/robot/unitree_webrtc/README.md @@ -0,0 +1 @@ +This directory only exists because some of the --replay tests depend on its existence (python pickle uses module names/paths so we would need to redo the pickle files). diff --git a/dimos/robot/unitree_webrtc/__init__.py b/dimos/robot/unitree_webrtc/__init__.py index e69de29bb2..b8660ff6fd 100644 --- a/dimos/robot/unitree_webrtc/__init__.py +++ b/dimos/robot/unitree_webrtc/__init__.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python3 +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Compatibility package for legacy dimos.robot.unitree_webrtc imports.""" + +from importlib import import_module +import sys + +_ALIAS_MODULES = { + "demo_error_on_name_conflicts": "dimos.robot.unitree.demo_error_on_name_conflicts", + "depth_module": "dimos.robot.unitree.depth_module", + "keyboard_teleop": "dimos.robot.unitree.keyboard_teleop", + "mujoco_connection": "dimos.robot.unitree.mujoco_connection", + "type": "dimos.robot.unitree.type", + "unitree_g1_skill_container": "dimos.robot.unitree.g1.skill_container", + "unitree_skill_container": "dimos.robot.unitree.unitree_skill_container", + "unitree_skills": "dimos.robot.unitree.unitree_skills", +} + +for alias, target in _ALIAS_MODULES.items(): + sys.modules[f"{__name__}.{alias}"] = import_module(target) diff --git a/dimos/robot/unitree_webrtc/type/__init__.py b/dimos/robot/unitree_webrtc/type/__init__.py index e69de29bb2..03ff4f4563 100644 --- a/dimos/robot/unitree_webrtc/type/__init__.py +++ b/dimos/robot/unitree_webrtc/type/__init__.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python3 +# Copyright 2025-2026 Dimensional Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Compatibility re-exports for legacy dimos.robot.unitree_webrtc.type.* imports.""" + +import importlib + +__all__ = [] + + +def __getattr__(name: str): # type: ignore[no-untyped-def] + module = importlib.import_module("dimos.robot.unitree.type") + try: + return getattr(module, name) + except AttributeError as exc: + raise AttributeError(f"No {__name__} attribute {name}") from exc + + +def __dir__() -> list[str]: + module = importlib.import_module("dimos.robot.unitree.type") + return [name for name in dir(module) if not name.startswith("_")] diff --git a/dimos/robot/unitree_webrtc/type/lidar.py b/dimos/robot/unitree_webrtc/type/lidar.py index df2909dc38..d8dbe98fd2 100644 --- a/dimos/robot/unitree_webrtc/type/lidar.py +++ b/dimos/robot/unitree_webrtc/type/lidar.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 # Copyright 2025-2026 Dimensional Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,63 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -"""Unitree WebRTC lidar message parsing utilities.""" +"""Compatibility re-export for dimos.robot.unitree_webrtc.type.${name}.""" -import time -from typing import TypedDict - -import numpy as np -import open3d as o3d # type: ignore[import-untyped] - -from dimos.msgs.sensor_msgs import PointCloud2 - -# Backwards compatibility alias for pickled data -LidarMessage = PointCloud2 - - -class RawLidarPoints(TypedDict): - points: np.ndarray # type: ignore[type-arg] # Shape (N, 3) array of 3D points [x, y, z] - - -class RawLidarData(TypedDict): - """Data portion of the LIDAR message""" - - frame_id: str - origin: list[float] - resolution: float - src_size: int - stamp: float - width: list[int] - data: RawLidarPoints - - -class RawLidarMsg(TypedDict): - """Static type definition for raw LIDAR message from Unitree WebRTC.""" - - type: str - topic: str - data: RawLidarData - - -def pointcloud2_from_webrtc_lidar(raw_message: RawLidarMsg, ts: float | None = None) -> PointCloud2: - """Convert a raw Unitree WebRTC lidar message to PointCloud2. - - Args: - raw_message: Raw lidar message from Unitree WebRTC API - ts: Optional timestamp override. If None, uses current time. - - Returns: - PointCloud2 message with the lidar points - """ - data = raw_message["data"] - points = data["data"]["points"] - - pointcloud = o3d.geometry.PointCloud() - pointcloud.points = o3d.utility.Vector3dVector(points) - - return PointCloud2( - pointcloud=pointcloud, - # webrtc stamp is broken (e.g., "stamp": 1.758148e+09), use current time - ts=ts if ts is not None else time.time(), - frame_id="world", - ) +from dimos.robot.unitree.type.lidar import * # noqa: F403 diff --git a/dimos/robot/unitree_webrtc/type/lowstate.py b/dimos/robot/unitree_webrtc/type/lowstate.py index 3e7926424a..d92ee4d5b1 100644 --- a/dimos/robot/unitree_webrtc/type/lowstate.py +++ b/dimos/robot/unitree_webrtc/type/lowstate.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 # Copyright 2025-2026 Dimensional Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,82 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Literal, TypedDict +"""Compatibility re-export for dimos.robot.unitree_webrtc.type.${name}.""" -raw_odom_msg_sample = { - "type": "msg", - "topic": "rt/lf/lowstate", - "data": { - "imu_state": {"rpy": [0.008086, -0.007515, 2.981771]}, - "motor_state": [ - {"q": 0.098092, "temperature": 40, "lost": 0, "reserve": [0, 674]}, - {"q": 0.757921, "temperature": 32, "lost": 0, "reserve": [0, 674]}, - {"q": -1.490911, "temperature": 38, "lost": 6, "reserve": [0, 674]}, - {"q": -0.072477, "temperature": 42, "lost": 0, "reserve": [0, 674]}, - {"q": 1.020276, "temperature": 32, "lost": 5, "reserve": [0, 674]}, - {"q": -2.007172, "temperature": 38, "lost": 5, "reserve": [0, 674]}, - {"q": 0.071382, "temperature": 50, "lost": 5, "reserve": [0, 674]}, - {"q": 0.963379, "temperature": 36, "lost": 6, "reserve": [0, 674]}, - {"q": -1.978311, "temperature": 40, "lost": 5, "reserve": [0, 674]}, - {"q": -0.051066, "temperature": 48, "lost": 0, "reserve": [0, 674]}, - {"q": 0.73103, "temperature": 34, "lost": 10, "reserve": [0, 674]}, - {"q": -1.466473, "temperature": 38, "lost": 6, "reserve": [0, 674]}, - {"q": 0, "temperature": 0, "lost": 0, "reserve": [0, 0]}, - {"q": 0, "temperature": 0, "lost": 0, "reserve": [0, 0]}, - {"q": 0, "temperature": 0, "lost": 0, "reserve": [0, 0]}, - {"q": 0, "temperature": 0, "lost": 0, "reserve": [0, 0]}, - {"q": 0, "temperature": 0, "lost": 0, "reserve": [0, 0]}, - {"q": 0, "temperature": 0, "lost": 0, "reserve": [0, 0]}, - {"q": 0, "temperature": 0, "lost": 0, "reserve": [0, 0]}, - {"q": 0, "temperature": 0, "lost": 0, "reserve": [0, 0]}, - ], - "bms_state": { - "version_high": 1, - "version_low": 18, - "soc": 55, - "current": -2481, - "cycle": 56, - "bq_ntc": [30, 29], - "mcu_ntc": [33, 32], - }, - "foot_force": [97, 84, 81, 81], - "temperature_ntc1": 48, - "power_v": 28.331045, - }, -} - - -class MotorState(TypedDict): - q: float - temperature: int - lost: int - reserve: list[int] - - -class ImuState(TypedDict): - rpy: list[float] - - -class BmsState(TypedDict): - version_high: int - version_low: int - soc: int - current: int - cycle: int - bq_ntc: list[int] - mcu_ntc: list[int] - - -class LowStateData(TypedDict): - imu_state: ImuState - motor_state: list[MotorState] - bms_state: BmsState - foot_force: list[int] - temperature_ntc1: int - power_v: float - - -class LowStateMsg(TypedDict): - type: Literal["msg"] - topic: str - data: LowStateData +from dimos.robot.unitree.type.lowstate import * # noqa: F403 diff --git a/dimos/robot/unitree_webrtc/type/map.py b/dimos/robot/unitree_webrtc/type/map.py index aa107ca48a..69bbb409c7 100644 --- a/dimos/robot/unitree_webrtc/type/map.py +++ b/dimos/robot/unitree_webrtc/type/map.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 # Copyright 2025-2026 Dimensional Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,117 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from pathlib import Path -import time -from typing import Any +"""Compatibility re-export for dimos.robot.unitree_webrtc.type.${name}.""" -import open3d as o3d # type: ignore[import-untyped] -from reactivex import interval -from reactivex.disposable import Disposable - -from dimos.core import DimosCluster, In, LCMTransport, Module, Out, rpc -from dimos.core.global_config import GlobalConfig, global_config -from dimos.mapping.pointclouds.accumulators.general import GeneralPointCloudAccumulator -from dimos.mapping.pointclouds.accumulators.protocol import PointCloudAccumulator -from dimos.mapping.pointclouds.occupancy import general_occupancy -from dimos.msgs.nav_msgs import OccupancyGrid -from dimos.msgs.sensor_msgs import PointCloud2 -from dimos.robot.unitree.connection.go2 import Go2ConnectionProtocol - - -class Map(Module): - lidar: In[PointCloud2] - global_map: Out[PointCloud2] - global_costmap: Out[OccupancyGrid] - - _point_cloud_accumulator: PointCloudAccumulator - _global_config: GlobalConfig - _preloaded_occupancy: OccupancyGrid | None = None - - def __init__( # type: ignore[no-untyped-def] - self, - voxel_size: float = 0.05, - cost_resolution: float = 0.05, - global_publish_interval: float | None = None, - min_height: float = 0.10, - max_height: float = 0.5, - cfg: GlobalConfig = global_config, - **kwargs, - ) -> None: - self.voxel_size = voxel_size - self.cost_resolution = cost_resolution - self.global_publish_interval = global_publish_interval - self.min_height = min_height - self.max_height = max_height - self._global_config = cfg - self._point_cloud_accumulator = GeneralPointCloudAccumulator( - self.voxel_size, self._global_config - ) - - if self._global_config.simulation: - self.min_height = 0.3 - - super().__init__(**kwargs) - - @rpc - def start(self) -> None: - super().start() - - self._disposables.add(Disposable(self.lidar.subscribe(self.add_frame))) - - if self.global_publish_interval is not None: - unsub = interval(self.global_publish_interval).subscribe(self._publish) - self._disposables.add(unsub) - - @rpc - def stop(self) -> None: - super().stop() - - def to_PointCloud2(self) -> PointCloud2: - return PointCloud2( - pointcloud=self._point_cloud_accumulator.get_point_cloud(), - ts=time.time(), - ) - - # TODO: Why is this RPC? - @rpc - def add_frame(self, frame: PointCloud2) -> None: - self._point_cloud_accumulator.add(frame.pointcloud) - - @property - def o3d_geometry(self) -> o3d.geometry.PointCloud: - return self._point_cloud_accumulator.get_point_cloud() - - def _publish(self, _: Any) -> None: - self.global_map.publish(self.to_PointCloud2()) - - occupancygrid = general_occupancy( - self.to_PointCloud2(), - resolution=self.cost_resolution, - min_height=self.min_height, - max_height=self.max_height, - ) - - # When debugging occupancy navigation, load a predefined occupancy grid. - if self._global_config.mujoco_global_costmap_from_occupancy: - if self._preloaded_occupancy is None: - path = Path(self._global_config.mujoco_global_costmap_from_occupancy) - self._preloaded_occupancy = OccupancyGrid.from_path(path) - occupancygrid = self._preloaded_occupancy - - self.global_costmap.publish(occupancygrid) - - -mapper = Map.blueprint - - -def deploy(dimos: DimosCluster, connection: Go2ConnectionProtocol): # type: ignore[no-untyped-def] - mapper = dimos.deploy(Map, global_publish_interval=1.0) # type: ignore[attr-defined] - mapper.global_map.transport = LCMTransport("/global_map", PointCloud2) - mapper.global_costmap.transport = LCMTransport("/global_costmap", OccupancyGrid) - mapper.lidar.connect(connection.pointcloud) # type: ignore[attr-defined] - mapper.start() - return mapper - - -__all__ = ["Map", "mapper"] +from dimos.robot.unitree.type.map import * # noqa: F403 diff --git a/dimos/robot/unitree_webrtc/type/odometry.py b/dimos/robot/unitree_webrtc/type/odometry.py index 9f0b400691..111ba0b945 100644 --- a/dimos/robot/unitree_webrtc/type/odometry.py +++ b/dimos/robot/unitree_webrtc/type/odometry.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 # Copyright 2025-2026 Dimensional Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -11,92 +12,7 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -from typing import Literal, TypedDict -from dimos.msgs.geometry_msgs import PoseStamped, Quaternion, Vector3 -from dimos.robot.unitree_webrtc.type.timeseries import ( - Timestamped, -) -from dimos.types.timestamped import to_timestamp +"""Compatibility re-export for dimos.robot.unitree_webrtc.type.${name}.""" -raw_odometry_msg_sample = { - "type": "msg", - "topic": "rt/utlidar/robot_pose", - "data": { - "header": {"stamp": {"sec": 1746565669, "nanosec": 448350564}, "frame_id": "odom"}, - "pose": { - "position": {"x": 5.961965, "y": -2.916958, "z": 0.319509}, - "orientation": {"x": 0.002787, "y": -0.000902, "z": -0.970244, "w": -0.242112}, - }, - }, -} - - -class TimeStamp(TypedDict): - sec: int - nanosec: int - - -class Header(TypedDict): - stamp: TimeStamp - frame_id: str - - -class RawPosition(TypedDict): - x: float - y: float - z: float - - -class Orientation(TypedDict): - x: float - y: float - z: float - w: float - - -class PoseData(TypedDict): - position: RawPosition - orientation: Orientation - - -class OdometryData(TypedDict): - header: Header - pose: PoseData - - -class RawOdometryMessage(TypedDict): - type: Literal["msg"] - topic: str - data: OdometryData - - -class Odometry(PoseStamped, Timestamped): # type: ignore[misc] - name = "geometry_msgs.PoseStamped" - - def __init__(self, frame_id: str = "base_link", *args, **kwargs) -> None: # type: ignore[no-untyped-def] - super().__init__(frame_id=frame_id, *args, **kwargs) # type: ignore[misc] - - @classmethod - def from_msg(cls, msg: RawOdometryMessage) -> "Odometry": - pose = msg["data"]["pose"] - - # Extract position - pos = Vector3( - pose["position"].get("x"), - pose["position"].get("y"), - pose["position"].get("z"), - ) - - rot = Quaternion( - pose["orientation"].get("x"), - pose["orientation"].get("y"), - pose["orientation"].get("z"), - pose["orientation"].get("w"), - ) - - ts = to_timestamp(msg["data"]["header"]["stamp"]) - return Odometry(position=pos, orientation=rot, ts=ts, frame_id="world") - - def __repr__(self) -> str: - return f"Odom pos({self.position}), rot({self.orientation})" +from dimos.robot.unitree.type.odometry import * # noqa: F403 diff --git a/dimos/robot/unitree_webrtc/type/timeseries.py b/dimos/robot/unitree_webrtc/type/timeseries.py index b75a41b932..34f9587ade 100644 --- a/dimos/robot/unitree_webrtc/type/timeseries.py +++ b/dimos/robot/unitree_webrtc/type/timeseries.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 # Copyright 2025-2026 Dimensional Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,138 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -from __future__ import annotations +"""Compatibility re-export for dimos.robot.unitree_webrtc.type.${name}.""" -from abc import ABC, abstractmethod -from datetime import datetime, timedelta, timezone -from typing import TYPE_CHECKING, Generic, TypedDict, TypeVar, Union - -if TYPE_CHECKING: - from collections.abc import Iterable - -PAYLOAD = TypeVar("PAYLOAD") - - -class RosStamp(TypedDict): - sec: int - nanosec: int - - -EpochLike = Union[int, float, datetime, RosStamp] - - -def from_ros_stamp(stamp: dict[str, int], tz: timezone | None = None) -> datetime: - """Convert ROS-style timestamp {'sec': int, 'nanosec': int} to datetime.""" - return datetime.fromtimestamp(stamp["sec"] + stamp["nanosec"] / 1e9, tz=tz) - - -def to_human_readable(ts: EpochLike) -> str: - dt = to_datetime(ts) - return dt.strftime("%Y-%m-%d %H:%M:%S") - - -def to_datetime(ts: EpochLike, tz: timezone | None = None) -> datetime: - if isinstance(ts, datetime): - # if ts.tzinfo is None: - # ts = ts.astimezone(tz) - return ts - if isinstance(ts, int | float): - return datetime.fromtimestamp(ts, tz=tz) - if isinstance(ts, dict) and "sec" in ts and "nanosec" in ts: - return datetime.fromtimestamp(ts["sec"] + ts["nanosec"] / 1e9, tz=tz) - raise TypeError("unsupported timestamp type") - - -class Timestamped(ABC): - """Abstract class for an event with a timestamp.""" - - ts: datetime - - def __init__(self, ts: EpochLike) -> None: - self.ts = to_datetime(ts) - - -class TEvent(Timestamped, Generic[PAYLOAD]): - """Concrete class for an event with a timestamp and data.""" - - def __init__(self, timestamp: EpochLike, data: PAYLOAD) -> None: - super().__init__(timestamp) - self.data = data - - def __eq__(self, other: object) -> bool: - if not isinstance(other, TEvent): - return NotImplemented - return self.ts == other.ts and self.data == other.data - - def __repr__(self) -> str: - return f"TEvent(ts={self.ts}, data={self.data})" - - -EVENT = TypeVar("EVENT", bound=Timestamped) # any object that is a subclass of Timestamped - - -class Timeseries(ABC, Generic[EVENT]): - """Abstract class for an iterable of events with timestamps.""" - - @abstractmethod - def __iter__(self) -> Iterable[EVENT]: ... - - @property - def start_time(self) -> datetime: - """Return the timestamp of the earliest event, assuming the data is sorted.""" - return next(iter(self)).ts # type: ignore[call-overload, no-any-return, type-var] - - @property - def end_time(self) -> datetime: - """Return the timestamp of the latest event, assuming the data is sorted.""" - return next(reversed(list(self))).ts # type: ignore[call-overload, no-any-return] - - @property - def frequency(self) -> float: - """Calculate the frequency of events in Hz.""" - return len(list(self)) / (self.duration().total_seconds() or 1) # type: ignore[call-overload] - - def time_range(self) -> tuple[datetime, datetime]: - """Return (earliest_ts, latest_ts). Empty input ⇒ ValueError.""" - return self.start_time, self.end_time - - def duration(self) -> timedelta: - """Total time spanned by the iterable (Δ = last - first).""" - return self.end_time - self.start_time - - def closest_to(self, timestamp: EpochLike) -> EVENT: - """Return the event closest to the given timestamp. Assumes timeseries is sorted.""" - print("closest to", timestamp) - target = to_datetime(timestamp) - print("converted to", target) - target_ts = target.timestamp() - - closest = None - min_dist = float("inf") - - for event in self: # type: ignore[attr-defined] - dist = abs(event.ts - target_ts) - if dist > min_dist: - break - - min_dist = dist - closest = event - - print(f"closest: {closest}") - return closest # type: ignore[return-value] - - def __repr__(self) -> str: - """Return a string representation of the Timeseries.""" - return f"Timeseries(date={self.start_time.strftime('%Y-%m-%d')}, start={self.start_time.strftime('%H:%M:%S')}, end={self.end_time.strftime('%H:%M:%S')}, duration={self.duration()}, events={len(list(self))}, freq={self.frequency:.2f}Hz)" # type: ignore[call-overload] - - def __str__(self) -> str: - """Return a string representation of the Timeseries.""" - return self.__repr__() - - -class TList(list[EVENT], Timeseries[EVENT]): - """A test class that inherits from both list and Timeseries.""" - - def __repr__(self) -> str: - """Return a string representation of the TList using Timeseries repr method.""" - return Timeseries.__repr__(self) +from dimos.robot.unitree.type.timeseries import * # noqa: F403 diff --git a/dimos/robot/unitree_webrtc/type/vector.py b/dimos/robot/unitree_webrtc/type/vector.py index 58438c0a98..20d07c76e8 100644 --- a/dimos/robot/unitree_webrtc/type/vector.py +++ b/dimos/robot/unitree_webrtc/type/vector.py @@ -1,3 +1,4 @@ +#!/usr/bin/env python3 # Copyright 2025-2026 Dimensional Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); @@ -12,431 +13,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -import builtins -from collections.abc import Iterable -from typing import ( - Any, - Protocol, - TypeVar, - Union, - runtime_checkable, -) +"""Compatibility re-export for dimos.robot.unitree_webrtc.type.${name}.""" -import numpy as np -from numpy.typing import NDArray - -T = TypeVar("T", bound="Vector") - - -class Vector: - """A wrapper around numpy arrays for vector operations with intuitive syntax.""" - - def __init__(self, *args: Any) -> None: - """Initialize a vector from components or another iterable. - - Examples: - Vector(1, 2) # 2D vector - Vector(1, 2, 3) # 3D vector - Vector([1, 2, 3]) # From list - Vector(np.array([1, 2, 3])) # From numpy array - """ - if len(args) == 1 and hasattr(args[0], "__iter__"): - self._data = np.array(args[0], dtype=float) - elif len(args) == 1: - self._data = np.array([args[0].x, args[0].y, args[0].z], dtype=float) - - else: - self._data = np.array(args, dtype=float) - - @property - def yaw(self) -> float: - return self.x - - @property - def tuple(self) -> tuple[float, ...]: - """Tuple representation of the vector.""" - return tuple(self._data) - - @property - def x(self) -> float: - """X component of the vector.""" - return self._data[0] if len(self._data) > 0 else 0.0 - - @property - def y(self) -> float: - """Y component of the vector.""" - return self._data[1] if len(self._data) > 1 else 0.0 - - @property - def z(self) -> float: - """Z component of the vector.""" - return self._data[2] if len(self._data) > 2 else 0.0 - - @property - def dim(self) -> int: - """Dimensionality of the vector.""" - return len(self._data) - - @property - def data(self) -> NDArray[np.float64]: - """Get the underlying numpy array.""" - return self._data - - def __len__(self) -> int: - return len(self._data) - - def __getitem__(self, idx: int) -> float: - return float(self._data[idx]) - - def __iter__(self) -> Iterable[float]: - return iter(self._data) # type: ignore[no-any-return] - - def __repr__(self) -> str: - components = ",".join(f"{x:.6g}" for x in self._data) - return f"({components})" - - def __str__(self) -> str: - if self.dim < 2: - return self.__repr__() - - def getArrow() -> str: - repr = ["←", "↖", "↑", "↗", "→", "↘", "↓", "↙"] - - if self.y == 0 and self.x == 0: - return "·" - - # Calculate angle in radians and convert to directional index - angle = np.arctan2(self.y, self.x) - # Map angle to 0-7 index (8 directions) with proper orientation - dir_index = int(((angle + np.pi) * 4 / np.pi) % 8) - # Get directional arrow symbol - return repr[dir_index] - - return f"{getArrow()} Vector {self.__repr__()}" - - def serialize(self) -> dict: # type: ignore[type-arg] - """Serialize the vector to a dictionary.""" - return {"type": "vector", "c": self._data.tolist()} - - def __eq__(self, other: Any) -> bool: - if isinstance(other, Vector): - return np.array_equal(self._data, other._data) - return np.array_equal(self._data, np.array(other, dtype=float)) - - def __add__(self: T, other: Union["Vector", Iterable[float]]) -> T: - if isinstance(other, Vector): - return self.__class__(self._data + other._data) - return self.__class__(self._data + np.array(other, dtype=float)) - - def __sub__(self: T, other: Union["Vector", Iterable[float]]) -> T: - if isinstance(other, Vector): - return self.__class__(self._data - other._data) - return self.__class__(self._data - np.array(other, dtype=float)) - - def __mul__(self: T, scalar: float) -> T: - return self.__class__(self._data * scalar) - - def __rmul__(self: T, scalar: float) -> T: - return self.__mul__(scalar) - - def __truediv__(self: T, scalar: float) -> T: - return self.__class__(self._data / scalar) - - def __neg__(self: T) -> T: - return self.__class__(-self._data) - - def dot(self, other: Union["Vector", Iterable[float]]) -> float: - """Compute dot product.""" - if isinstance(other, Vector): - return float(np.dot(self._data, other._data)) - return float(np.dot(self._data, np.array(other, dtype=float))) - - def cross(self: T, other: Union["Vector", Iterable[float]]) -> T: - """Compute cross product (3D vectors only).""" - if self.dim != 3: - raise ValueError("Cross product is only defined for 3D vectors") - - if isinstance(other, Vector): - other_data = other._data - else: - other_data = np.array(other, dtype=float) - - if len(other_data) != 3: - raise ValueError("Cross product requires two 3D vectors") - - return self.__class__(np.cross(self._data, other_data)) - - def length(self) -> float: - """Compute the Euclidean length (magnitude) of the vector.""" - return float(np.linalg.norm(self._data)) - - def length_squared(self) -> float: - """Compute the squared length of the vector (faster than length()).""" - return float(np.sum(self._data * self._data)) - - def normalize(self: T) -> T: - """Return a normalized unit vector in the same direction.""" - length = self.length() - if length < 1e-10: # Avoid division by near-zero - return self.__class__(np.zeros_like(self._data)) - return self.__class__(self._data / length) - - def to_2d(self: T) -> T: - """Convert a vector to a 2D vector by taking only the x and y components.""" - return self.__class__(self._data[:2]) - - def distance(self, other: Union["Vector", Iterable[float]]) -> float: - """Compute Euclidean distance to another vector.""" - if isinstance(other, Vector): - return float(np.linalg.norm(self._data - other._data)) - return float(np.linalg.norm(self._data - np.array(other, dtype=float))) - - def distance_squared(self, other: Union["Vector", Iterable[float]]) -> float: - """Compute squared Euclidean distance to another vector (faster than distance()).""" - if isinstance(other, Vector): - diff = self._data - other._data - else: - diff = self._data - np.array(other, dtype=float) - return float(np.sum(diff * diff)) - - def angle(self, other: Union["Vector", Iterable[float]]) -> float: - """Compute the angle (in radians) between this vector and another.""" - if self.length() < 1e-10 or (isinstance(other, Vector) and other.length() < 1e-10): - return 0.0 - - if isinstance(other, Vector): - other_data = other._data - else: - other_data = np.array(other, dtype=float) - - cos_angle = np.clip( - np.dot(self._data, other_data) - / (np.linalg.norm(self._data) * np.linalg.norm(other_data)), - -1.0, - 1.0, - ) - return float(np.arccos(cos_angle)) - - def project(self: T, onto: Union["Vector", Iterable[float]]) -> T: - """Project this vector onto another vector.""" - if isinstance(onto, Vector): - onto_data = onto._data - else: - onto_data = np.array(onto, dtype=float) - - onto_length_sq = np.sum(onto_data * onto_data) - if onto_length_sq < 1e-10: - return self.__class__(np.zeros_like(self._data)) - - scalar_projection = np.dot(self._data, onto_data) / onto_length_sq - return self.__class__(scalar_projection * onto_data) - - @classmethod - def zeros(cls: type[T], dim: int) -> T: - """Create a zero vector of given dimension.""" - return cls(np.zeros(dim)) - - @classmethod - def ones(cls: type[T], dim: int) -> T: - """Create a vector of ones with given dimension.""" - return cls(np.ones(dim)) - - @classmethod - def unit_x(cls: type[T], dim: int = 3) -> T: - """Create a unit vector in the x direction.""" - v = np.zeros(dim) - v[0] = 1.0 - return cls(v) - - @classmethod - def unit_y(cls: type[T], dim: int = 3) -> T: - """Create a unit vector in the y direction.""" - v = np.zeros(dim) - v[1] = 1.0 - return cls(v) - - @classmethod - def unit_z(cls: type[T], dim: int = 3) -> T: - """Create a unit vector in the z direction.""" - v = np.zeros(dim) - if dim > 2: - v[2] = 1.0 - return cls(v) - - def to_list(self) -> list[float]: - """Convert the vector to a list.""" - return [float(x) for x in self._data] - - def to_tuple(self) -> builtins.tuple[float, ...]: - """Convert the vector to a tuple.""" - return tuple(self._data) - - def to_numpy(self) -> NDArray[np.float64]: - """Convert the vector to a numpy array.""" - return self._data - - -# Protocol approach for static type checking -@runtime_checkable -class VectorLike(Protocol): - """Protocol for types that can be treated as vectors.""" - - def __getitem__(self, key: int) -> float: ... - def __len__(self) -> int: ... - def __iter__(self) -> Iterable[float]: ... - - -def to_numpy(value: VectorLike) -> NDArray[np.float64]: - """Convert a vector-compatible value to a numpy array. - - Args: - value: Any vector-like object (Vector, numpy array, tuple, list) - - Returns: - Numpy array representation - """ - if isinstance(value, Vector): - return value.data - elif isinstance(value, np.ndarray): - return value - else: - return np.array(value, dtype=float) - - -def to_vector(value: VectorLike) -> Vector: - """Convert a vector-compatible value to a Vector object. - - Args: - value: Any vector-like object (Vector, numpy array, tuple, list) - - Returns: - Vector object - """ - if isinstance(value, Vector): - return value - else: - return Vector(value) - - -def to_tuple(value: VectorLike) -> tuple[float, ...]: - """Convert a vector-compatible value to a tuple. - - Args: - value: Any vector-like object (Vector, numpy array, tuple, list) - - Returns: - Tuple of floats - """ - if isinstance(value, Vector): - return tuple(float(x) for x in value.data) - elif isinstance(value, np.ndarray): - return tuple(float(x) for x in value) - elif isinstance(value, tuple): - return tuple(float(x) for x in value) - else: - # Convert to list first to ensure we have an indexable sequence - data = [value[i] for i in range(len(value))] - return tuple(float(x) for x in data) - - -def to_list(value: VectorLike) -> list[float]: - """Convert a vector-compatible value to a list. - - Args: - value: Any vector-like object (Vector, numpy array, tuple, list) - - Returns: - List of floats - """ - if isinstance(value, Vector): - return [float(x) for x in value.data] - elif isinstance(value, np.ndarray): - return [float(x) for x in value] - elif isinstance(value, list): - return [float(x) for x in value] - else: - # Convert to list using indexing - return [float(value[i]) for i in range(len(value))] - - -# Helper functions to check dimensionality -def is_2d(value: VectorLike) -> bool: - """Check if a vector-compatible value is 2D. - - Args: - value: Any vector-like object (Vector, numpy array, tuple, list) - - Returns: - True if the value is 2D - """ - if isinstance(value, Vector): - return len(value) == 2 - elif isinstance(value, np.ndarray): - return value.shape[-1] == 2 or value.size == 2 - else: - return len(value) == 2 - - -def is_3d(value: VectorLike) -> bool: - """Check if a vector-compatible value is 3D. - - Args: - value: Any vector-like object (Vector, numpy array, tuple, list) - - Returns: - True if the value is 3D - """ - if isinstance(value, Vector): - return len(value) == 3 - elif isinstance(value, np.ndarray): - return value.shape[-1] == 3 or value.size == 3 - else: - return len(value) == 3 - - -# Extraction functions for XYZ components -def x(value: VectorLike) -> float: - """Get the X component of a vector-compatible value. - - Args: - value: Any vector-like object (Vector, numpy array, tuple, list) - - Returns: - X component as a float - """ - if isinstance(value, Vector): - return value.x - else: - return float(to_numpy(value)[0]) - - -def y(value: VectorLike) -> float: - """Get the Y component of a vector-compatible value. - - Args: - value: Any vector-like object (Vector, numpy array, tuple, list) - - Returns: - Y component as a float - """ - if isinstance(value, Vector): - return value.y - else: - arr = to_numpy(value) - return float(arr[1]) if len(arr) > 1 else 0.0 - - -def z(value: VectorLike) -> float: - """Get the Z component of a vector-compatible value. - - Args: - value: Any vector-like object (Vector, numpy array, tuple, list) - - Returns: - Z component as a float - """ - if isinstance(value, Vector): - return value.z - else: - arr = to_numpy(value) - return float(arr[2]) if len(arr) > 2 else 0.0 +from dimos.robot.unitree.type.vector import * # noqa: F403 diff --git a/dimos/robot/unitree_webrtc/unitree_g1_blueprints.py b/dimos/robot/unitree_webrtc/unitree_g1_blueprints.py deleted file mode 100644 index 562b65838f..0000000000 --- a/dimos/robot/unitree_webrtc/unitree_g1_blueprints.py +++ /dev/null @@ -1,268 +0,0 @@ -#!/usr/bin/env python3 -# Copyright 2025-2026 Dimensional Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Blueprint configurations for Unitree G1 humanoid robot. - -This module provides pre-configured blueprints for various G1 robot setups, -from basic teleoperation to full autonomous agent configurations. -""" - -from dimos_lcm.foxglove_msgs import SceneUpdate -from dimos_lcm.foxglove_msgs.ImageAnnotations import ( - ImageAnnotations, -) -from dimos_lcm.sensor_msgs import CameraInfo - -from dimos.agents.agent import llm_agent -from dimos.agents.cli.human import human_input -from dimos.agents.skills.navigation import navigation_skill -from dimos.constants import DEFAULT_CAPACITY_COLOR_IMAGE -from dimos.core.blueprints import autoconnect -from dimos.core.transport import LCMTransport, pSHMTransport -from dimos.hardware.sensors.camera import zed -from dimos.hardware.sensors.camera.module import camera_module # type: ignore[attr-defined] -from dimos.hardware.sensors.camera.webcam import Webcam -from dimos.mapping.costmapper import cost_mapper -from dimos.mapping.voxels import voxel_mapper -from dimos.msgs.geometry_msgs import ( - PoseStamped, - Quaternion, - Transform, - Twist, - Vector3, -) -from dimos.msgs.nav_msgs import Odometry, Path -from dimos.msgs.sensor_msgs import Image, PointCloud2 -from dimos.msgs.std_msgs import Bool -from dimos.msgs.vision_msgs import Detection2DArray -from dimos.navigation.frontier_exploration import wavefront_frontier_explorer -from dimos.navigation.replanning_a_star.module import replanning_a_star_planner -from dimos.navigation.rosnav import ros_nav -from dimos.perception.detection.detectors.person.yolo import YoloPersonDetector -from dimos.perception.detection.module3D import Detection3DModule, detection3d_module -from dimos.perception.detection.moduleDB import ObjectDBModule, detection_db_module -from dimos.perception.detection.person_tracker import PersonTracker, person_tracker_module -from dimos.perception.object_tracker import object_tracking -from dimos.perception.spatial_perception import spatial_memory -from dimos.robot.foxglove_bridge import foxglove_bridge -from dimos.robot.unitree.connection.g1 import g1_connection -from dimos.robot.unitree.connection.g1sim import g1_sim_connection -from dimos.robot.unitree_webrtc.keyboard_teleop import keyboard_teleop -from dimos.robot.unitree_webrtc.unitree_g1_skill_container import g1_skills -from dimos.utils.monitoring import utilization -from dimos.web.websocket_vis.websocket_vis_module import websocket_vis - -_basic_no_nav = ( - autoconnect( - camera_module( - transform=Transform( - translation=Vector3(0.05, 0.0, 0.6), # height of camera on G1 robot - rotation=Quaternion.from_euler(Vector3(0.0, 0.2, 0.0)), - frame_id="sensor", - child_frame_id="camera_link", - ), - hardware=lambda: Webcam( - camera_index=0, - fps=15, - stereo_slice="left", - camera_info=zed.CameraInfo.SingleWebcam, - ), - ), - voxel_mapper(voxel_size=0.1), - cost_mapper(), - wavefront_frontier_explorer(), - # Visualization - websocket_vis(), - foxglove_bridge(), - ) - .global_config(n_dask_workers=4, robot_model="unitree_g1") - .transports( - { - # G1 uses Twist for movement commands - ("cmd_vel", Twist): LCMTransport("/cmd_vel", Twist), - # State estimation from ROS - ("state_estimation", Odometry): LCMTransport("/state_estimation", Odometry), - # Odometry output from ROSNavigationModule - ("odom", PoseStamped): LCMTransport("/odom", PoseStamped), - # Navigation module topics from nav_bot - ("goal_req", PoseStamped): LCMTransport("/goal_req", PoseStamped), - ("goal_active", PoseStamped): LCMTransport("/goal_active", PoseStamped), - ("path_active", Path): LCMTransport("/path_active", Path), - ("pointcloud", PointCloud2): LCMTransport("/lidar", PointCloud2), - ("global_pointcloud", PointCloud2): LCMTransport("/map", PointCloud2), - # Original navigation topics for backwards compatibility - ("goal_pose", PoseStamped): LCMTransport("/goal_pose", PoseStamped), - ("goal_reached", Bool): LCMTransport("/goal_reached", Bool), - ("cancel_goal", Bool): LCMTransport("/cancel_goal", Bool), - # Camera topics (if camera module is added) - ("color_image", Image): LCMTransport("/g1/color_image", Image), - ("camera_info", CameraInfo): LCMTransport("/g1/camera_info", CameraInfo), - } - ) -) - -unitree_g1_basic = autoconnect( - _basic_no_nav, - g1_connection(), - ros_nav(), -) - -unitree_g1_basic_sim = autoconnect( - _basic_no_nav, - g1_sim_connection(), - replanning_a_star_planner(), -) - -_perception_and_memory = autoconnect( - spatial_memory(), - object_tracking(frame_id="camera_link"), - utilization(), -) - -unitree_g1 = autoconnect( - unitree_g1_basic, - _perception_and_memory, -).global_config(n_dask_workers=8) - -unitree_g1_sim = autoconnect( - unitree_g1_basic_sim, - _perception_and_memory, -).global_config(n_dask_workers=8) - -# Optimized configuration using shared memory for images -unitree_g1_shm = autoconnect( - unitree_g1.transports( - { - ("color_image", Image): pSHMTransport( - "/g1/color_image", default_capacity=DEFAULT_CAPACITY_COLOR_IMAGE - ), - } - ), - foxglove_bridge( - shm_channels=[ - "/g1/color_image#sensor_msgs.Image", - ] - ), -) - -_agentic_skills = autoconnect( - llm_agent(), - human_input(), - navigation_skill(), - g1_skills(), -) - -# Full agentic configuration with LLM and skills -unitree_g1_agentic = autoconnect( - unitree_g1, - _agentic_skills, -) - -unitree_g1_agentic_sim = autoconnect( - unitree_g1_sim, - _agentic_skills, -) - -# Configuration with joystick control for teleoperation -unitree_g1_joystick = autoconnect( - unitree_g1_basic, - keyboard_teleop(), # Pygame-based joystick control -) - -# Detection configuration with person tracking and 3D detection -unitree_g1_detection = ( - autoconnect( - unitree_g1_basic, - # Person detection modules with YOLO - detection3d_module( - camera_info=zed.CameraInfo.SingleWebcam, - detector=YoloPersonDetector, - ), - detection_db_module( - camera_info=zed.CameraInfo.SingleWebcam, - filter=lambda det: det.class_id == 0, # Filter for person class only - ), - person_tracker_module( - cameraInfo=zed.CameraInfo.SingleWebcam, - ), - ) - .global_config(n_dask_workers=8) - .remappings( - [ - # Connect detection modules to camera and lidar - (Detection3DModule, "image", "color_image"), - (Detection3DModule, "pointcloud", "pointcloud"), - (ObjectDBModule, "image", "color_image"), - (ObjectDBModule, "pointcloud", "pointcloud"), - (PersonTracker, "image", "color_image"), - (PersonTracker, "detections", "detections_2d"), - ] - ) - .transports( - { - # Detection 3D module outputs - ("detections", Detection3DModule): LCMTransport( - "/detector3d/detections", Detection2DArray - ), - ("annotations", Detection3DModule): LCMTransport( - "/detector3d/annotations", ImageAnnotations - ), - ("scene_update", Detection3DModule): LCMTransport( - "/detector3d/scene_update", SceneUpdate - ), - ("detected_pointcloud_0", Detection3DModule): LCMTransport( - "/detector3d/pointcloud/0", PointCloud2 - ), - ("detected_pointcloud_1", Detection3DModule): LCMTransport( - "/detector3d/pointcloud/1", PointCloud2 - ), - ("detected_pointcloud_2", Detection3DModule): LCMTransport( - "/detector3d/pointcloud/2", PointCloud2 - ), - ("detected_image_0", Detection3DModule): LCMTransport("/detector3d/image/0", Image), - ("detected_image_1", Detection3DModule): LCMTransport("/detector3d/image/1", Image), - ("detected_image_2", Detection3DModule): LCMTransport("/detector3d/image/2", Image), - # Detection DB module outputs - ("detections", ObjectDBModule): LCMTransport( - "/detectorDB/detections", Detection2DArray - ), - ("annotations", ObjectDBModule): LCMTransport( - "/detectorDB/annotations", ImageAnnotations - ), - ("scene_update", ObjectDBModule): LCMTransport("/detectorDB/scene_update", SceneUpdate), - ("detected_pointcloud_0", ObjectDBModule): LCMTransport( - "/detectorDB/pointcloud/0", PointCloud2 - ), - ("detected_pointcloud_1", ObjectDBModule): LCMTransport( - "/detectorDB/pointcloud/1", PointCloud2 - ), - ("detected_pointcloud_2", ObjectDBModule): LCMTransport( - "/detectorDB/pointcloud/2", PointCloud2 - ), - ("detected_image_0", ObjectDBModule): LCMTransport("/detectorDB/image/0", Image), - ("detected_image_1", ObjectDBModule): LCMTransport("/detectorDB/image/1", Image), - ("detected_image_2", ObjectDBModule): LCMTransport("/detectorDB/image/2", Image), - # Person tracker outputs - ("target", PersonTracker): LCMTransport("/person_tracker/target", PoseStamped), - } - ) -) - -# Full featured configuration with everything -unitree_g1_full = autoconnect( - unitree_g1_shm, - _agentic_skills, - keyboard_teleop(), -) diff --git a/dimos/robot/unitree_webrtc/unitree_go2_blueprints.py b/dimos/robot/unitree_webrtc/unitree_go2_blueprints.py deleted file mode 100644 index 56a3710dcf..0000000000 --- a/dimos/robot/unitree_webrtc/unitree_go2_blueprints.py +++ /dev/null @@ -1,259 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright 2025-2026 Dimensional Inc. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -import platform - -from dimos_lcm.foxglove_msgs.ImageAnnotations import ( - ImageAnnotations, # type: ignore[import-untyped] -) -from dimos_lcm.foxglove_msgs.SceneUpdate import SceneUpdate # type: ignore[import-untyped] - -from dimos.agents.agent import llm_agent -from dimos.agents.cli.human import human_input -from dimos.agents.cli.web import web_input -from dimos.agents.ollama_agent import ollama_installed -from dimos.agents.skills.navigation import navigation_skill -from dimos.agents.skills.person_follow import person_follow_skill -from dimos.agents.skills.speak_skill import speak_skill -from dimos.agents.spec import Provider -from dimos.agents.vlm_agent import vlm_agent -from dimos.agents.vlm_stream_tester import vlm_stream_tester -from dimos.constants import DEFAULT_CAPACITY_COLOR_IMAGE -from dimos.core.blueprints import autoconnect -from dimos.core.global_config import global_config -from dimos.core.transport import ( - JpegLcmTransport, - LCMTransport, - ROSTransport, - pSHMTransport, -) -from dimos.mapping.costmapper import cost_mapper -from dimos.mapping.voxels import voxel_mapper -from dimos.msgs.geometry_msgs import PoseStamped -from dimos.msgs.sensor_msgs import Image, PointCloud2 -from dimos.msgs.vision_msgs import Detection2DArray -from dimos.navigation.frontier_exploration import ( - wavefront_frontier_explorer, -) -from dimos.navigation.replanning_a_star.module import ( - replanning_a_star_planner, -) -from dimos.perception.detection.module3D import Detection3DModule, detection3d_module -from dimos.perception.experimental.temporal_memory import temporal_memory -from dimos.perception.spatial_perception import spatial_memory -from dimos.protocol.mcp.mcp import MCPModule -from dimos.protocol.pubsub.impl.lcmpubsub import LCM -from dimos.robot.unitree.connection.go2 import GO2Connection, go2_connection -from dimos.robot.unitree_webrtc.unitree_skill_container import unitree_skills -from dimos.utils.monitoring import utilization -from dimos.web.websocket_vis.websocket_vis_module import websocket_vis - -# Mac has some issue with high bandwidth UDP, so we use pSHMTransport for color_image -# actually we can use pSHMTransport for all platforms, and for all streams -# TODO need a global transport toggle on blueprints/global config -mac_transports: dict[tuple[str, type], pSHMTransport[Image]] = { - ("color_image", Image): pSHMTransport( - "color_image", default_capacity=DEFAULT_CAPACITY_COLOR_IMAGE - ), -} - -base = autoconnect() if platform.system() == "Linux" else autoconnect().transports(mac_transports) - - -rerun_config = { - # any pubsub that supports subscribe_all and topic that supports str(topic) - # is acceptable here - "pubsubs": [LCM(autoconf=True)], - # Custom converters for specific rerun entity paths - # Normally all these would be specified in their respectative modules - # Until this is implemented we have central overrides here - # - # This is unsustainable once we move to multi robot etc - "visual_override": { - "world/camera_info": lambda camera_info: camera_info.to_rerun( - image_topic="/world/color_image", - optical_frame="camera_optical", - ), - "world/global_map": lambda grid: grid.to_rerun(voxel_size=0.1), - "world/navigation_costmap": lambda grid: grid.to_rerun( - colormap="Accent", - z_offset=0.015, - opacity=0.2, - background="#484981", - ), - }, - # slapping a go2 shaped box on top of tf/base_link - "static": { - "world/tf/base_link": lambda rr: [ - rr.Boxes3D( - half_sizes=[0.35, 0.155, 0.2], - colors=[(0, 255, 127)], - fill_mode="wireframe", - ), - rr.Transform3D(parent_frame="tf#/base_link"), - ] - }, -} - - -match global_config.viewer_backend: - case "foxglove": - from dimos.robot.foxglove_bridge import foxglove_bridge - - with_vis = autoconnect( - base, - foxglove_bridge(shm_channels=["/color_image#sensor_msgs.Image"]), - ) - case "rerun": - from dimos.visualization.rerun.bridge import rerun_bridge - - with_vis = autoconnect(base, rerun_bridge(**rerun_config)) - case "rerun-web": - from dimos.visualization.rerun.bridge import rerun_bridge - - with_vis = autoconnect(base, rerun_bridge(viewer_mode="web", **rerun_config)) - case _: - with_vis = base - - -unitree_go2_basic = autoconnect( - with_vis, - go2_connection(), - websocket_vis(), -).global_config(n_dask_workers=4, robot_model="unitree_go2") - -unitree_go2 = autoconnect( - unitree_go2_basic, - voxel_mapper(voxel_size=0.1), - cost_mapper(), - replanning_a_star_planner(), - wavefront_frontier_explorer(), -).global_config(n_dask_workers=6, robot_model="unitree_go2") - - -unitree_go2_ros = unitree_go2.transports( - { - ("lidar", PointCloud2): ROSTransport("lidar", PointCloud2), - ("global_map", PointCloud2): ROSTransport("global_map", PointCloud2), - ("odom", PoseStamped): ROSTransport("odom", PoseStamped), - ("color_image", Image): ROSTransport("color_image", Image), - } -) - -unitree_go2_detection = ( - autoconnect( - unitree_go2, - detection3d_module( - camera_info=GO2Connection.camera_info_static, - ), - ) - .remappings( - [ - (Detection3DModule, "pointcloud", "global_map"), - ] - ) - .transports( - { - # Detection 3D module outputs - ("detections", Detection3DModule): LCMTransport( - "/detector3d/detections", Detection2DArray - ), - ("annotations", Detection3DModule): LCMTransport( - "/detector3d/annotations", ImageAnnotations - ), - ("scene_update", Detection3DModule): LCMTransport( - "/detector3d/scene_update", SceneUpdate - ), - ("detected_pointcloud_0", Detection3DModule): LCMTransport( - "/detector3d/pointcloud/0", PointCloud2 - ), - ("detected_pointcloud_1", Detection3DModule): LCMTransport( - "/detector3d/pointcloud/1", PointCloud2 - ), - ("detected_pointcloud_2", Detection3DModule): LCMTransport( - "/detector3d/pointcloud/2", PointCloud2 - ), - ("detected_image_0", Detection3DModule): LCMTransport("/detector3d/image/0", Image), - ("detected_image_1", Detection3DModule): LCMTransport("/detector3d/image/1", Image), - ("detected_image_2", Detection3DModule): LCMTransport("/detector3d/image/2", Image), - } - ) -) - - -unitree_go2_spatial = autoconnect( - unitree_go2, - spatial_memory(), - utilization(), -).global_config(n_dask_workers=8) - -_with_jpeglcm = unitree_go2.transports( - { - ("color_image", Image): JpegLcmTransport("/color_image", Image), - } -) - -_common_agentic = autoconnect( - human_input(), - navigation_skill(), - person_follow_skill(camera_info=GO2Connection.camera_info_static), - unitree_skills(), - web_input(), - speak_skill(), -) - -unitree_go2_agentic = autoconnect( - unitree_go2_spatial, - llm_agent(), - _common_agentic, -) - -unitree_go2_agentic_mcp = autoconnect( - unitree_go2_agentic, - MCPModule.blueprint(), -) - -unitree_go2_agentic_ollama = autoconnect( - unitree_go2_spatial, - llm_agent( - model="qwen3:8b", - provider=Provider.OLLAMA, # type: ignore[attr-defined] - ), - _common_agentic, -).requirements( - ollama_installed, -) - -unitree_go2_agentic_huggingface = autoconnect( - unitree_go2_spatial, - llm_agent( - model="Qwen/Qwen2.5-1.5B-Instruct", - provider=Provider.HUGGINGFACE, # type: ignore[attr-defined] - ), - _common_agentic, -) - -unitree_go2_vlm_stream_test = autoconnect( - unitree_go2_basic, - vlm_agent(), - vlm_stream_tester(), -) - -unitree_go2_temporal_memory = autoconnect( - unitree_go2_agentic, - temporal_memory(), -) diff --git a/dimos/simulation/mujoco/mujoco_process.py b/dimos/simulation/mujoco/mujoco_process.py index f3e6eba279..8529de976b 100755 --- a/dimos/simulation/mujoco/mujoco_process.py +++ b/dimos/simulation/mujoco/mujoco_process.py @@ -63,7 +63,8 @@ def get_command(self) -> NDArray[Any]: self._command[0] = linear[0] # forward/backward self._command[1] = linear[1] # left/right self._command[2] = angular[2] # rotation - return self._command.copy() + result: NDArray[Any] = self._command.copy() + return result def stop(self) -> None: """Stop method to satisfy InputController protocol.""" diff --git a/dimos/stream/video_operators.py b/dimos/stream/video_operators.py index 548bba7598..a94b6fa3a1 100644 --- a/dimos/stream/video_operators.py +++ b/dimos/stream/video_operators.py @@ -231,7 +231,7 @@ def _encode_image(image: np.ndarray) -> tuple[str, tuple[int, int]]: # type: ig _, buffer = cv2.imencode(".jpg", image) if buffer is None: raise ValueError("Failed to encode image") - base64_image = base64.b64encode(buffer).decode("utf-8") + base64_image = base64.b64encode(buffer.tobytes()).decode("utf-8") return base64_image, (width, height) except Exception as e: raise e diff --git a/dimos/utils/testing/__init__.py b/dimos/utils/testing/__init__.py index ffb640de39..568cd3604f 100644 --- a/dimos/utils/testing/__init__.py +++ b/dimos/utils/testing/__init__.py @@ -1,11 +1,9 @@ -from dimos.utils.testing.moment import Moment, OutputMoment, SensorMoment -from dimos.utils.testing.replay import SensorReplay, TimedSensorReplay, TimedSensorStorage +import lazy_loader as lazy -__all__ = [ - "Moment", - "OutputMoment", - "SensorMoment", - "SensorReplay", - "TimedSensorReplay", - "TimedSensorStorage", -] +__getattr__, __dir__, __all__ = lazy.attach( + __name__, + submod_attrs={ + "moment": ["Moment", "OutputMoment", "SensorMoment"], + "replay": ["SensorReplay", "TimedSensorReplay", "TimedSensorStorage"], + }, +) diff --git a/dimos/utils/testing/test_moment.py b/dimos/utils/testing/test_moment.py index 92b71e59ac..6764610d0e 100644 --- a/dimos/utils/testing/test_moment.py +++ b/dimos/utils/testing/test_moment.py @@ -17,7 +17,7 @@ from dimos.msgs.geometry_msgs import PoseStamped, Transform from dimos.msgs.sensor_msgs import CameraInfo, Image, PointCloud2 from dimos.protocol.tf import TF -from dimos.robot.unitree.connection import go2 +from dimos.robot.unitree.go2 import connection from dimos.utils.data import get_data from dimos.utils.testing.moment import Moment, SensorMoment @@ -43,14 +43,14 @@ def transforms(self) -> list[Transform]: # back and forth through time and foxglove doesn't get confused odom = self.odom.value odom.ts = time.time() - return go2.GO2Connection._odom_to_tf(odom) + return connection.GO2Connection._odom_to_tf(odom) def publish(self) -> None: t = TF() t.publish(*self.transforms) t.stop() - camera_info = go2._camera_info_static() + camera_info = connection._camera_info_static() camera_info.ts = time.time() camera_info_transport: LCMTransport[CameraInfo] = LCMTransport("/camera_info", CameraInfo) camera_info_transport.publish(camera_info) diff --git a/dimos/utils/testing/test_replay.py b/dimos/utils/testing/test_replay.py index 640fe92979..6b16525148 100644 --- a/dimos/utils/testing/test_replay.py +++ b/dimos/utils/testing/test_replay.py @@ -17,8 +17,8 @@ from reactivex import operators as ops from dimos.msgs.sensor_msgs import PointCloud2 -from dimos.robot.unitree_webrtc.type.lidar import pointcloud2_from_webrtc_lidar -from dimos.robot.unitree_webrtc.type.odometry import Odometry +from dimos.robot.unitree.type.lidar import pointcloud2_from_webrtc_lidar +from dimos.robot.unitree.type.odometry import Odometry from dimos.utils.data import get_data from dimos.utils.testing import replay diff --git a/dimos/web/dimos_interface/__init__.py b/dimos/web/dimos_interface/__init__.py index 5ca28b30e5..3bdc622cee 100644 --- a/dimos/web/dimos_interface/__init__.py +++ b/dimos/web/dimos_interface/__init__.py @@ -2,6 +2,11 @@ Dimensional Interface package """ -from .api.server import FastAPIServer +import lazy_loader as lazy -__all__ = ["FastAPIServer"] +__getattr__, __dir__, __all__ = lazy.attach( + __name__, + submod_attrs={ + "api.server": ["FastAPIServer"], + }, +) diff --git a/docs/api/sensor_streams/storage_replay.md b/docs/api/sensor_streams/storage_replay.md index 1a31591736..c5cbe306a8 100644 --- a/docs/api/sensor_streams/storage_replay.md +++ b/docs/api/sensor_streams/storage_replay.md @@ -159,7 +159,7 @@ replay.stream( ## Usage: Stub Connections for Testing -A common pattern is creating replay-based connection stubs for testing without hardware. From [`robot/unitree/connection/go2.py`](/dimos/robot/unitree/connection/go2.py#L83): +A common pattern is creating replay-based connection stubs for testing without hardware. From [`robot/unitree/go2/connection.py`](/dimos/robot/unitree/go2/connection.py#L83): This is a bit primitive. We'd like to write a higher-order API for recording full module I/O for any module, but this is a work in progress at the moment. diff --git a/docs/api/visualization.md b/docs/api/visualization.md index 9259e7e061..dccdef4a9d 100644 --- a/docs/api/visualization.md +++ b/docs/api/visualization.md @@ -71,7 +71,7 @@ This happens on lower-end hardware (NUC, older laptops) with large maps. ### Increase Voxel Size -Edit [`dimos/robot/unitree_webrtc/unitree_go2_blueprints.py`](/dimos/robot/unitree_webrtc/unitree_go2_blueprints.py) line 82: +Edit [`dimos/robot/unitree/go2/blueprints/__init__.py`](/dimos/robot/unitree/go2/blueprints/__init__.py) line 82: ```python # Before (high detail, slower on large maps) diff --git a/docs/concepts/transports.md b/docs/concepts/transports.md index faac9a2ec5..e4b62b01ce 100644 --- a/docs/concepts/transports.md +++ b/docs/concepts/transports.md @@ -81,7 +81,7 @@ We’ll go through these layers top-down. See [Blueprints](blueprints.md) for the blueprint API. -From [`unitree_go2_blueprints.py`](/dimos/robot/unitree_webrtc/unitree_go2_blueprints.py). +From [`unitree/go2/blueprints/__init__.py`](/dimos/robot/unitree/go2/blueprints/__init__.py). Example: rebind a few streams from the default `LCMTransport` to `ROSTransport` (defined at [`transport.py`](/dimos/core/transport.py#L226)) so you can visualize in **rviz2**. diff --git a/lazy_loader/__init__.pyi b/lazy_loader/__init__.pyi new file mode 100644 index 0000000000..b3c4be8608 --- /dev/null +++ b/lazy_loader/__init__.pyi @@ -0,0 +1,11 @@ +from __future__ import annotations + +from collections.abc import Callable, Mapping, Sequence +from typing import Any + +def attach( + package_name: str, + *, + submodules: Sequence[str] | None = None, + submod_attrs: Mapping[str, Sequence[str]] | None = None, +) -> tuple[Callable[[str], Any], Callable[[], list[str]], list[str]]: ... diff --git a/pyproject.toml b/pyproject.toml index e6b542a65e..3f5ce9bbd0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -42,6 +42,7 @@ dependencies = [ "pydantic", "python-dotenv", "annotation-protocol>=1.4.0", + "lazy_loader", # Multiprocess "dask[complete]==2025.5.1", diff --git a/uv.lock b/uv.lock index 75f839303f..0e98f5dcc9 100644 --- a/uv.lock +++ b/uv.lock @@ -1735,6 +1735,7 @@ dependencies = [ { name = "colorlog" }, { name = "dask", extra = ["complete"] }, { name = "dimos-lcm" }, + { name = "lazy-loader" }, { name = "llvmlite" }, { name = "numba" }, { name = "numpy", version = "2.2.6", source = { registry = "https://pypi.org/simple" }, marker = "python_full_version < '3.11'" }, @@ -2029,6 +2030,7 @@ requires-dist = [ { name = "langchain-text-splitters", marker = "extra == 'agents'", specifier = ">=1,<2" }, { name = "lap", marker = "extra == 'perception'", specifier = ">=0.5.12" }, { name = "lark", marker = "extra == 'misc'" }, + { name = "lazy-loader" }, { name = "lcm", marker = "extra == 'docker'" }, { name = "llvmlite", specifier = ">=0.42.0" }, { name = "lxml-stubs", marker = "extra == 'dev'", specifier = ">=0.5.1,<1" }, @@ -4180,6 +4182,18 @@ wheels = [ { url = "https://files.pythonhosted.org/packages/82/3d/14ce75ef66813643812f3093ab17e46d3a206942ce7376d31ec2d36229e7/lark-1.3.1-py3-none-any.whl", hash = "sha256:c629b661023a014c37da873b4ff58a817398d12635d3bbb2c5a03be7fe5d1e12", size = 113151, upload-time = "2025-10-27T18:25:54.882Z" }, ] +[[package]] +name = "lazy-loader" +version = "0.4" +source = { registry = "https://pypi.org/simple" } +dependencies = [ + { name = "packaging" }, +] +sdist = { url = "https://files.pythonhosted.org/packages/6f/6b/c875b30a1ba490860c93da4cabf479e03f584eba06fe5963f6f6644653d8/lazy_loader-0.4.tar.gz", hash = "sha256:47c75182589b91a4e1a85a136c074285a5ad4d9f39c63e0d7fb76391c4574cd1", size = 15431, upload-time = "2024-04-05T13:03:12.261Z" } +wheels = [ + { url = "https://files.pythonhosted.org/packages/83/60/d497a310bde3f01cb805196ac61b7ad6dc5dcf8dce66634dc34364b20b4f/lazy_loader-0.4-py3-none-any.whl", hash = "sha256:342aa8e14d543a154047afb4ba8ef17f5563baad3fc610d7b15b213b0f119efc", size = 12097, upload-time = "2024-04-05T13:03:10.514Z" }, +] + [[package]] name = "lcm" version = "1.5.2"