Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 5 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -99,10 +99,13 @@ pip install torch==2.0.1 torchvision torchaudio --index-url https://download.pyt
#### Install dependencies
```bash
# CPU only (reccomended to attempt first)
pip install -e '.[cpu,dev]'
uv sync --extra cpu --extra dev

# CUDA install
pip install -e '.[cuda,dev]'
uv sync --extra cuda --extra dev

# Jetson Jetpack 6.2 with CUDA 12.6 (Python >=3.10)
uv sync --extra jetson-jp6-cuda126 --extra dev

# Copy and configure environment variables
cp default.env .env
Expand Down
30 changes: 19 additions & 11 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -198,11 +198,12 @@ cpu = [
cuda = [
"cupy-cuda12x==13.6.0",
"nvidia-nvimgcodec-cu12[all]",
"onnxruntime-gpu>=1.17.1", # Only versions supporting both cuda11 and cuda12
# Exclude on aarch64 Linux where jetson-jp6-cuda126 extra provides Jetson-specific wheels
"onnxruntime-gpu>=1.17.1 ; platform_machine != 'aarch64' or sys_platform != 'linux'",
"ctransformers[cuda]==0.2.27",
"mmengine>=0.10.3",
"mmcv>=2.1.0",
"xformers>=0.0.20",
"xformers>=0.0.20 ; not (platform_machine == 'aarch64' and sys_platform == 'linux')",

# Detic GPU stack
"mss",
Expand Down Expand Up @@ -257,15 +258,13 @@ sim = [
"playground>=0.0.5",
]

# NOTE: jetson-jp6-cuda126 extra is disabled due to 404 errors from wheel URLs
# The pypi.jetson-ai-lab.io URLs are currently unavailable. Update with working URLs when available.
# jetson-jp6-cuda126 = [
# # Jetson Jetpack 6.2 with CUDA 12.6 specific wheels (aarch64 Linux only)
# "torch @ https://pypi.jetson-ai-lab.io/jp6/cu126/+f/.../torch-2.8.0-cp310-cp310-linux_aarch64.whl ; platform_machine == 'aarch64' and sys_platform == 'linux'",
# "torchvision @ https://pypi.jetson-ai-lab.io/jp6/cu126/+f/.../torchvision-0.23.0-cp310-cp310-linux_aarch64.whl ; platform_machine == 'aarch64' and sys_platform == 'linux'",
# "onnxruntime-gpu @ https://pypi.jetson-ai-lab.io/jp6/cu126/+f/.../onnxruntime_gpu-1.23.0-cp310-cp310-linux_aarch64.whl ; platform_machine == 'aarch64' and sys_platform == 'linux'",
# "xformers @ https://pypi.jetson-ai-lab.io/jp6/cu126/+f/.../xformers-0.0.33-cp39-abi3-linux_aarch64.whl ; platform_machine == 'aarch64' and sys_platform == 'linux'",
# ]
jetson-jp6-cuda126 = [
# Jetson Jetpack 6.2 with CUDA 12.6 specific wheels Tegra 239 (aarch64 Linux) only
"torch==2.8.0 ; platform_machine == 'aarch64' and sys_platform == 'linux'",
"torchvision==0.23.0 ; platform_machine == 'aarch64' and sys_platform == 'linux'",
"onnxruntime-gpu==1.23.0 ; platform_machine == 'aarch64' and sys_platform == 'linux'",
"xformers==0.0.32 ; platform_machine == 'aarch64' and sys_platform == 'linux'",
]

drone = [
"pymavlink"
Expand Down Expand Up @@ -382,3 +381,12 @@ default-groups = []
clip = { git = "https://github.com/openai/CLIP.git" }
contact-graspnet-pytorch = { git = "https://github.com/dimensionalOS/contact_graspnet_pytorch.git" }
detectron2 = { git = "https://github.com/facebookresearch/detectron2.git", tag = "v0.6" }
torch = { index = "jetson-jp6-cuda126", marker = "platform_machine == 'aarch64' and sys_platform == 'linux'" }
torchvision = { index = "jetson-jp6-cuda126", marker = "platform_machine == 'aarch64' and sys_platform == 'linux'" }
onnxruntime-gpu = { index = "jetson-jp6-cuda126", marker = "platform_machine == 'aarch64' and sys_platform == 'linux'" }
xformers = { index = "jetson-jp6-cuda126", marker = "platform_machine == 'aarch64' and sys_platform == 'linux'" }

[[tool.uv.index]]
name = "jetson-jp6-cuda126"
url = "https://pypi.jetson-ai-lab.io/jp6/cu126"
explicit = true
Comment on lines +384 to +392
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The source overrides apply globally to ALL aarch64 Linux installations, not just when the jetson-jp6-cuda126 extra is used. This means:

  1. If a user on aarch64 Linux installs without the jetson extra (e.g., uv sync --extra cpu), they won't get torch/torchvision, but if they manually install torch from PyPI, uv will try to use the Jetson index due to these overrides
  2. These packages will ALWAYS come from the Jetson index on aarch64 Linux, even for non-Jetson ARM devices

This could cause issues for users on non-Jetson aarch64 Linux systems (like Apple Silicon running Linux VMs, AWS Graviton, or other ARM servers) who would unexpectedly get Jetson-optimized wheels that may not work on their hardware.

Consider whether the marker should be more restrictive to only apply when the jetson extra is actually requested, or document this behavior clearly. The current design means the Jetson index becomes the default source for these packages on ALL aarch64 Linux systems.

Loading
Loading