diff --git a/activate_venv.sh b/activate_venv.sh
deleted file mode 100755
index 7b3f2b2..0000000
--- a/activate_venv.sh
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/bin/bash
-# UAV-UGV Simulation - Environment Activation
-# Usage: source activate_venv.sh
-
-SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
-
-# ROS 2 Setup
-if [ -f "/opt/ros/jazzy/setup.bash" ]; then
- source /opt/ros/jazzy/setup.bash
- ROS_VER="jazzy"
-elif [ -f "/opt/ros/humble/setup.bash" ]; then
- source /opt/ros/humble/setup.bash
- ROS_VER="humble"
-else
- echo "[WARN] ROS 2 not found in /opt/ros/"
- ROS_VER="none"
-fi
-
-# ArduPilot Environment
-export ARDUPILOT_HOME="$HOME/ardupilot"
-export PATH="$ARDUPILOT_HOME/Tools/autotest:$PATH"
-export PATH="$HOME/.local/bin:$PATH"
-
-# Deactivate existing venv
-if [ -n "$VIRTUAL_ENV" ]; then
- deactivate 2>/dev/null || true
-fi
-
-# Activate project venv
-if [ -f "$SCRIPT_DIR/venv/bin/activate" ]; then
- source "$SCRIPT_DIR/venv/bin/activate"
-fi
-
-# Gazebo paths (new Gazebo - Ignition/Harmonic)
-export GZ_SIM_RESOURCE_PATH="$SCRIPT_DIR/models:$SCRIPT_DIR/worlds:${GZ_SIM_RESOURCE_PATH:-}"
-
-# ArduPilot Gazebo plugin
-if [ -d "$HOME/ardupilot_gazebo/build" ]; then
- export GZ_SIM_SYSTEM_PLUGIN_PATH="$HOME/ardupilot_gazebo/build:${GZ_SIM_SYSTEM_PLUGIN_PATH:-}"
- export GZ_SIM_RESOURCE_PATH="$HOME/ardupilot_gazebo/models:$HOME/ardupilot_gazebo/worlds:$GZ_SIM_RESOURCE_PATH"
-fi
-
-# WSL environment
-if grep -qEi "(microsoft|wsl)" /proc/version 2>/dev/null; then
- if [ -d "/mnt/wslg" ]; then
- export DISPLAY=:0
- else
- export DISPLAY=$(cat /etc/resolv.conf 2>/dev/null | grep nameserver | awk '{print $2}'):0
- fi
- export LIBGL_ALWAYS_INDIRECT=0
- export MESA_GL_VERSION_OVERRIDE=3.3
-fi
-
-echo -e "\033[0;32mEnvironment activated (ROS 2 $ROS_VER)\033[0m"
-echo ""
-echo "Run simulation: bash scripts/run_simulation.sh"
diff --git a/config/ardupilot_gps_denied.parm b/config/ardupilot_gps_denied.parm
index eb75a2a..c96095e 100644
--- a/config/ardupilot_gps_denied.parm
+++ b/config/ardupilot_gps_denied.parm
@@ -70,3 +70,9 @@ FS_GCS_ENABLE 0
# ====================
LOG_BITMASK 176126
LOG_DISARMED 0
+
+# ====================
+# Landing Speed
+# ====================
+LAND_SPEED 150 # Final descent cm/s (default 50)
+LAND_SPEED_HIGH 250 # Initial descent cm/s (default 0=WPNAV_SPEED_DN)
diff --git a/config/geofence_params.yaml b/config/geofence_params.yaml
deleted file mode 100644
index c1ccb70..0000000
--- a/config/geofence_params.yaml
+++ /dev/null
@@ -1,76 +0,0 @@
-# Geofence Configuration
-# GPS is ONLY used for safety boundaries, NOT navigation
-
-/**:
- ros__parameters:
-
- geofence:
- enabled: true
-
- # Fence type
- type: "polygon" # Options: "polygon", "circle", "cylinder"
-
- # Polygon fence (GPS coordinates for safety boundary)
- polygon_fence:
- points:
- - latitude: 47.397742
- longitude: 8.545594
- - latitude: 47.398242
- longitude: 8.545594
- - latitude: 47.398242
- longitude: 8.546094
- - latitude: 47.397742
- longitude: 8.546094
-
- # Close the polygon automatically
- auto_close: true
-
- # Circular fence (alternative to polygon)
- circle_fence:
- center_latitude: 47.397742
- center_longitude: 8.545594
- radius_meters: 100
-
- # Cylinder fence (circle + altitude limits)
- cylinder_fence:
- center_latitude: 47.397742
- center_longitude: 8.545594
- radius_meters: 100
- min_altitude: 0 # meters AGL
- max_altitude: 50 # meters AGL
-
- # Altitude limits (applies to all fence types)
- altitude_limits:
- min_altitude: 0.0 # meters AGL
- max_altitude: 50.0 # meters AGL
- enable_floor: true
- enable_ceiling: true
-
- # Fence actions
- actions:
- on_breach: "RTL" # Options: "RTL", "LAND", "HOLD", "ALERT_ONLY"
- warning_distance: 10.0 # meters from fence
- warning_action: "ALERT" # Options: "ALERT", "SLOW_DOWN"
-
- # Monitoring
- check_rate: 10 # Hz
- consecutive_breaches_required: 3 # Require 3 consecutive breaches before action
-
- # Visualization
- publish_markers: true
- marker_topic: "/geofence/markers"
- marker_color:
- safe: {r: 0.0, g: 1.0, b: 0.0, a: 0.5}
- warning: {r: 1.0, g: 1.0, b: 0.0, a: 0.7}
- breach: {r: 1.0, g: 0.0, b: 0.0, a: 0.9}
-
- # Multi-vehicle support
- vehicles:
- uav:
- enabled: true
- namespace: "/uav"
- gps_topic: "/uav/mavros/global_position/global"
- ugv:
- enabled: true
- namespace: "/ugv"
- gps_topic: "/ugv/gps/fix"
diff --git a/config/gui.config b/config/gui.config
new file mode 100644
index 0000000..6a769b1
--- /dev/null
+++ b/config/gui.config
@@ -0,0 +1,19 @@
+
+
+
+
+ Gimbal Camera
+ docked
+
+ /world/uav_ugv_search/model/iris_with_gimbal/model/gimbal/link/pitch_link/sensor/camera/image
+ true
+
+
+
+
+ Downward Camera
+ docked
+
+ /uav/camera/downward
+ true
+
diff --git a/config/mavros_params.yaml b/config/mavros_params.yaml
deleted file mode 100644
index d5a79dc..0000000
--- a/config/mavros_params.yaml
+++ /dev/null
@@ -1,72 +0,0 @@
-# MAVROS Configuration for GPS-Denied Navigation
-# GPS is DISABLED for navigation, ENABLED only for geofencing
-
-/**:
- ros__parameters:
- use_sim_time: true
-
- # Connection
- fcu_protocol: "v2.0"
-
- # System
- system_id: 1
- component_id: 1
-
- # Time sync
- time:
- time_ref_source: "fcu"
- timesync_mode: MAVLINK
- timesync_avg_alpha: 0.6
-
- # GPS Settings - ONLY for geofencing
- gps:
- # Disable GPS for navigation
- use_gps_for_navigation: false
-
- # Enable GPS only for geofence
- enable_gps_geofence: true
-
- # Local position (PRIMARY navigation source)
- local_position:
- frame_id: "map"
- tf:
- send: true
- frame_id: "map"
- child_frame_id: "base_link"
-
- # Use local odometry, NOT GPS
- use_vision: true # Will use our visual odometry
-
- # Vision pose input (from visual odometry)
- vision_pose:
- tf:
- listen: false
- frame_id: "odom"
- child_frame_id: "base_link"
-
- # Vision speed input (from optical flow)
- vision_speed:
- listen_twist: true
-
- # Setpoint configuration
- setpoint_raw:
- thrust_scaling: 1.0
-
- setpoint_position:
- tf:
- listen: false
- # Use LOCAL_NED frame (relative positioning)
- mav_frame: 1 # MAV_FRAME_LOCAL_NED
-
- setpoint_velocity:
- mav_frame: 8 # MAV_FRAME_BODY_NED (body-relative)
-
- # Disable GPS-based modes
- # Only allow: MANUAL, STABILIZE, ALT_HOLD, GUIDED (local), LOITER (local)
- allowed_modes:
- - "MANUAL"
- - "STABILIZE"
- - "ALT_HOLD"
- - "GUIDED"
- - "LOITER"
- - "RTL" # Returns to LOCAL origin, not GPS home
diff --git a/config/missions/hover.yaml b/config/missions/hover.yaml
new file mode 100644
index 0000000..4415752
--- /dev/null
+++ b/config/missions/hover.yaml
@@ -0,0 +1,12 @@
+name: hover
+description: Take off and hold position
+
+altitude: 5.0
+duration: 10.0
+
+steps:
+ - action: takeoff
+ altitude: 5.0
+ - action: hover
+ duration: 10.0
+ - action: land
diff --git a/config/missions/search.yaml b/config/missions/search.yaml
new file mode 100644
index 0000000..eae8654
--- /dev/null
+++ b/config/missions/search.yaml
@@ -0,0 +1,21 @@
+name: search
+description: Spiral search pattern to find and land on UGV
+
+altitude: 5.0
+
+search:
+ pattern: spiral
+ initial_leg: 3.0
+ leg_increment: 2.0
+ max_legs: 12
+ detection_radius: 2.0
+
+steps:
+ - action: takeoff
+ altitude: 5.0
+ - action: search_spiral
+ initial_leg: 3.0
+ leg_increment: 2.0
+ max_legs: 12
+ - action: land_on_ugv
+ detection_radius: 2.0
diff --git a/config/missions/square.yaml b/config/missions/square.yaml
new file mode 100644
index 0000000..d5573a1
--- /dev/null
+++ b/config/missions/square.yaml
@@ -0,0 +1,26 @@
+name: square
+description: Fly a square pattern
+
+altitude: 5.0
+side_length: 5.0
+
+steps:
+ - action: takeoff
+ altitude: 5.0
+ - action: move_rel
+ x: 5.0
+ y: 0.0
+ z: 0.0
+ - action: move_rel
+ x: 0.0
+ y: 5.0
+ z: 0.0
+ - action: move_rel
+ x: -5.0
+ y: 0.0
+ z: 0.0
+ - action: move_rel
+ x: 0.0
+ y: -5.0
+ z: 0.0
+ - action: land
diff --git a/config/properties.yaml b/config/properties.yaml
new file mode 100644
index 0000000..3aaea73
--- /dev/null
+++ b/config/properties.yaml
@@ -0,0 +1,36 @@
+simulation:
+ world: uav_ugv_search.sdf
+ software_render: auto
+
+physics:
+ max_step_size: 0.002
+ real_time_factor: 1.0
+
+sensor_noise:
+ enabled: true
+ position_stddev: 0.05
+ velocity_stddev: 0.1
+ gyro_stddev: 0.0002
+ accel_stddev: 0.017
+
+gps:
+ enabled: true
+ update_rate: 5
+ noise_stddev: 2.0
+ purpose: geofence_only
+
+initial_positions:
+ uav:
+ x: 0.0
+ y: 0.0
+ z: 0.195
+ yaw: 90.0
+ ugv:
+ x: 5.0
+ y: 5.0
+ z: 0.0
+ yaw: 0.0
+
+logging:
+ enabled: true
+ log_directory: logs
diff --git a/config/simulation_config.yaml b/config/simulation_config.yaml
deleted file mode 100644
index 1c5da43..0000000
--- a/config/simulation_config.yaml
+++ /dev/null
@@ -1,86 +0,0 @@
-# General Simulation Configuration
-
-/**:
- ros__parameters:
-
- simulation:
- # Use Gazebo ground truth for "perfect" position estimation
- # In real world, this would come from visual odometry
- use_gazebo_ground_truth: true # Set false for realistic VO errors
-
- # Add noise to simulate real sensors
- add_sensor_noise: true
-
- # Noise parameters
- noise:
- position_stddev: 0.05 # meters
- velocity_stddev: 0.1 # m/s
- imu_gyro_stddev: 0.0002 # rad/s
- imu_accel_stddev: 0.017 # m/s^2
-
- # GPS simulation (for geofence only)
- gps:
- enabled: true # Only for geofencing
- update_rate: 5 # Hz
- noise_stddev: 2.0 # meters (realistic GPS error)
-
- # Real-time factor (1.0 = real-time)
- real_time_factor: 1.0
-
- # Physics settings
- physics:
- engine: "ode"
- max_step_size: 0.001
- real_time_update_rate: 1000
-
- # World configuration
- world:
- name: "empty_custom"
- gravity: 9.81
-
- # Lighting (affects vision)
- lighting:
- ambient_level: 0.4
- shadows: true
- sun_angle: 45.0 # degrees
-
- # Ground plane
- ground:
- size: [100.0, 100.0] # meters
- texture: "ground_plane"
- friction: 1.0
-
- # Initial positions (LOCAL coordinates)
- initial_positions:
- uav:
- x: 0.0
- y: 0.0
- z: 0.2 # Start slightly above ground
- roll: 0.0
- pitch: 0.0
- yaw: 0.0
-
- ugv:
- x: 5.0
- y: 0.0
- z: 0.0
- roll: 0.0
- pitch: 0.0
- yaw: 1.57 # 90 degrees (facing +Y)
-
- # Visualization
- visualization:
- show_trajectory: true
- trajectory_length: 1000 # points
- show_local_frame: true
- show_velocity_vector: true
-
- # Data logging
- logging:
- enabled: true
- log_directory: "~/.ros/uav_ugv_logs"
- log_topics:
- - "/uav/visual_odometry/pose"
- - "/uav/mavros/local_position/pose"
- - "/geofence/status"
- bag_recording: false
diff --git a/config/uav.yaml b/config/uav.yaml
new file mode 100644
index 0000000..66a6360
--- /dev/null
+++ b/config/uav.yaml
@@ -0,0 +1,74 @@
+connection:
+ sim: "tcp:127.0.0.1:5760"
+ real: "/dev/ttyAMA0"
+ baud: 57600
+
+flight:
+ takeoff_altitude: 5.0
+ max_altitude: 15.0
+ min_altitude: 3.0
+ max_velocity: 2.0
+ max_acceleration: 1.0
+ max_climb_rate: 1.0
+ max_yaw_rate: 45.0
+
+navigation:
+ frame: LOCAL_NED
+ waypoint_radius: 0.5
+ position_hold_radius: 0.2
+ home_mode: local
+
+vision:
+ forward_camera:
+ enabled: true
+ frame_rate: 30
+ resolution: [640, 480]
+ downward_camera:
+ enabled: true
+ frame_rate: 30
+ resolution: [320, 240]
+ visual_odometry:
+ enabled: true
+ method: ORB
+ min_features: 100
+ max_features: 500
+ optical_flow:
+ enabled: true
+ method: Lucas-Kanade
+ window_size: 15
+ min_altitude: 0.3
+ max_altitude: 10.0
+ landmark_detection:
+ enabled: true
+ method: ArUco
+ marker_size: 0.15
+
+position_estimation:
+ method: EKF
+ update_rate: 50
+ weights:
+ visual_odometry: 0.6
+ optical_flow: 0.3
+ imu: 0.1
+ process_noise:
+ position: 0.1
+ velocity: 0.5
+ measurement_noise:
+ visual_odom: 0.05
+ optical_flow: 0.1
+ imu: 0.2
+
+obstacle_avoidance:
+ enabled: true
+ detection_range: 5.0
+ safety_margin: 1.0
+
+safety:
+ geofence:
+ enabled: true
+ action_on_breach: RTL
+ max_altitude: 10
+ radius: 20
+ failsafe:
+ vision_loss_timeout: 5.0
+ action_on_vision_loss: HOLD
diff --git a/config/uav_params.yaml b/config/uav_params.yaml
deleted file mode 100644
index 60abd84..0000000
--- a/config/uav_params.yaml
+++ /dev/null
@@ -1,105 +0,0 @@
-# UAV Configuration - GPS-Denied Navigation
-/**:
- ros__parameters:
-
- # Vision parameters for navigation
- vision:
- # Forward camera (for visual odometry)
- forward_camera:
- enabled: true
- topic: "/uav/camera/forward/image_raw"
- info_topic: "/uav/camera/forward/camera_info"
- frame_rate: 30
-
- # Downward camera (for optical flow)
- downward_camera:
- enabled: true
- topic: "/uav/camera/downward/image_raw"
- info_topic: "/uav/camera/downward/camera_info"
- frame_rate: 30
-
- # Visual odometry settings
- visual_odometry:
- enabled: true
- method: "ORB" # Options: ORB, SIFT, SURF
- min_features: 100
- max_features: 500
- feature_quality: 0.01
- min_distance: 10
-
- # Optical flow settings
- optical_flow:
- enabled: true
- method: "Lucas-Kanade"
- window_size: 15
- max_level: 3
- min_altitude: 0.3 # meters
- max_altitude: 10.0 # meters
-
- # Landmark detection
- landmarks:
- enabled: true
- detection_method: "ArUco" # Options: ArUco, AprilTag, ORB
- marker_size: 0.15 # meters
-
- # Position estimation (sensor fusion)
- position_estimator:
- fusion_method: "EKF" # Extended Kalman Filter
-
- # Sensor weights (trust levels)
- weights:
- visual_odometry: 0.6
- optical_flow: 0.3
- imu: 0.1
-
- # Update rates
- update_rate: 50 # Hz
-
- # Covariance matrices
- process_noise:
- position: 0.1
- velocity: 0.5
-
- measurement_noise:
- visual_odom: 0.05
- optical_flow: 0.1
- imu: 0.2
-
- # Navigation parameters (RELATIVE coordinates only)
- navigation:
- frame: "LOCAL_NED" # Never use GPS frame
-
- # Control parameters
- max_velocity: 2.0 # m/s
- max_acceleration: 1.0 # m/s^2
- max_climb_rate: 1.0 # m/s
-
- # Waypoint following
- waypoint_radius: 0.5 # meters
- position_hold_radius: 0.2 # meters
-
- # Obstacle avoidance (vision-based)
- obstacle_avoidance:
- enabled: true
- detection_range: 5.0 # meters
- safety_margin: 1.0 # meters
-
- # Mission parameters
- mission:
- takeoff_altitude: 5.0 # meters (relative)
- loiter_radius: 2.0 # meters
- rtl_altitude: 10.0 # meters (relative)
-
- # Home position is LOCAL (0,0,0), not GPS
- home_mode: "local" # Never "gps"
-
- # Safety (geofencing uses GPS, but navigation doesn't)
- safety:
- geofence:
- enabled: true
- use_gps: true # ONLY for geofence
- action_on_breach: "RTL" # Return to LOCAL origin
-
- failsafe:
- vision_loss_timeout: 5.0 # seconds
- action_on_vision_loss: "HOLD" # or "RTL" or "LAND"
diff --git a/config/ugv.yaml b/config/ugv.yaml
new file mode 100644
index 0000000..1beebea
--- /dev/null
+++ b/config/ugv.yaml
@@ -0,0 +1,46 @@
+vehicle:
+ wheelbase: 0.3
+ track_width: 0.25
+ wheel_radius: 0.05
+
+connection:
+ sim: null
+ real: null
+
+position:
+ x: 5.0
+ y: 5.0
+ z: 0.0
+ yaw: 0.0
+
+navigation:
+ max_linear_velocity: 1.0
+ max_angular_velocity: 1.5
+ linear_acceleration: 0.5
+ angular_acceleration: 1.0
+ waypoint_radius: 0.3
+ position_tolerance: 0.3
+
+landing_pad:
+ marker_type: ArUco
+ marker_id: 0
+ marker_size: 0.3
+ pad_diameter: 1.0
+ color: [255, 255, 0]
+
+vision:
+ detection_method: ArUco
+ camera:
+ enabled: true
+ frame_rate: 30
+ resolution: [320, 240]
+
+safety:
+ geofence:
+ enabled: true
+ action_on_breach: STOP
+ failsafe:
+ vision_loss_timeout: 3.0
+ action_on_vision_loss: STOP
+ collision_distance: 0.2
+ action_on_collision: STOP
diff --git a/config/ugv_params.yaml b/config/ugv_params.yaml
deleted file mode 100644
index dd274f5..0000000
--- a/config/ugv_params.yaml
+++ /dev/null
@@ -1,110 +0,0 @@
-# UGV Configuration - GPS-Denied Navigation
-/**:
- ros__parameters:
-
- # Vehicle physical parameters
- vehicle:
- wheelbase: 0.3 # meters
- track_width: 0.25 # meters
- wheel_radius: 0.05 # meters
- max_speed: 1.0 # m/s
- max_turn_rate: 1.5 # rad/s
-
- # Vision parameters for navigation
- vision:
- # Forward camera (for visual odometry and obstacle detection)
- forward_camera:
- enabled: true
- topic: "/ugv/camera/forward/image_raw"
- info_topic: "/ugv/camera/forward/camera_info"
- frame_rate: 30
- fov_horizontal: 90 # degrees
- fov_vertical: 60 # degrees
-
- # Visual odometry settings
- visual_odometry:
- enabled: true
- method: "ORB" # Options: ORB, SIFT
- min_features: 150
- max_features: 600
- feature_quality: 0.02
- min_distance: 8
-
- # Wheel odometry (primary odometry for ground vehicle)
- wheel_odometry:
- enabled: true
- topic: "/ugv/odom"
- covariance_scale: 0.05
-
- # Obstacle detection
- obstacle_detection:
- enabled: true
- method: "depth_camera" # or "stereo"
- min_distance: 0.3 # meters
- max_distance: 10.0 # meters
-
- # Position estimation (sensor fusion)
- position_estimator:
- fusion_method: "EKF"
-
- # Sensor weights (trust levels)
- weights:
- visual_odometry: 0.3
- wheel_odometry: 0.5
- imu: 0.2
-
- # Update rates
- update_rate: 50 # Hz
-
- # Covariance matrices
- process_noise:
- position: 0.05
- velocity: 0.2
- heading: 0.1
-
- measurement_noise:
- visual_odom: 0.08
- wheel_odom: 0.03
- imu: 0.15
-
- # Navigation parameters (RELATIVE coordinates only)
- navigation:
- frame: "LOCAL_NED" # Never use GPS frame
-
- # Control parameters
- max_linear_velocity: 1.0 # m/s
- max_angular_velocity: 1.5 # rad/s
- linear_acceleration: 0.5 # m/s^2
- angular_acceleration: 1.0 # rad/s^2
-
- # Path following
- lookahead_distance: 0.5 # meters
- waypoint_radius: 0.3 # meters
-
- # Obstacle avoidance
- obstacle_avoidance:
- enabled: true
- detection_range: 3.0 # meters
- safety_margin: 0.5 # meters
- avoidance_method: "VFH" # Vector Field Histogram
-
- # Mission parameters
- mission:
- default_speed: 0.5 # m/s
- turn_in_place_threshold: 0.5 # radians
-
- # Home position is LOCAL (0,0,0), not GPS
- home_mode: "local"
-
- # Safety
- safety:
- geofence:
- enabled: true
- use_gps: true # ONLY for geofence
- action_on_breach: "STOP"
-
- failsafe:
- vision_loss_timeout: 3.0 # seconds
- action_on_vision_loss: "STOP"
- collision_distance: 0.2 # meters
- action_on_collision: "STOP"
diff --git a/models/iris_with_gimbal/model.config b/models/iris_with_gimbal/model.config
new file mode 100755
index 0000000..b7fdff3
--- /dev/null
+++ b/models/iris_with_gimbal/model.config
@@ -0,0 +1,45 @@
+
+
+
+ Iris with Gimbal
+ 2.0
+ model.sdf
+
+
+ Fadri Furrer
+ fadri.furrer@mavt.ethz.ch
+
+
+ Michael Burri
+
+
+ Mina Kamel
+
+
+ Janosch Nikolic
+
+
+ Markus Achtelik
+
+
+
+ Rhys Mainwaring
+
+
+
+ Starting with iris_with_standoffs
+ add LiftDragPlugin
+ add ArduCopterPlugin
+ add gimbal_small_2d
+
+
+
+ model://gimbal_small_2d
+ 2.0
+
+
+ model://iris_with_standoffs
+ 2.0
+
+
+
diff --git a/models/iris_with_gimbal/model.sdf b/models/iris_with_gimbal/model.sdf
new file mode 100755
index 0000000..2c0cc68
--- /dev/null
+++ b/models/iris_with_gimbal/model.sdf
@@ -0,0 +1,383 @@
+
+
+
+
+ model://iris_with_standoffs
+
+
+
+ model://gimbal_small_3d
+ gimbal
+ 0 -0.01 -0.124923 90 0 90
+
+
+
+ iris_with_standoffs::base_link
+ gimbal::base_link
+
+
+ 0
+ 0
+
+ 0 0 1
+
+
+
+
+
+ 0 0 -0.05 0 0 0
+
+ 0.01
+
+ 0.00001
+ 0
+ 0
+ 0.00001
+ 0
+ 0.00001
+
+
+
+ 0 0 0 0 1.5708 0
+ 1
+ 10
+ 1
+ /uav/camera/downward
+
+ 1.3962634
+
+ 640
+ 480
+ R8G8B8
+
+
+ 0.1
+ 100
+
+
+
+
+
+ iris_with_standoffs::base_link
+ downward_cam_link
+
+
+
+
+
+
+ 0.3
+ 1.4
+ 4.2500
+ 0.10
+ 0.0
+ -0.025
+ 0.0
+ 0.0
+ 0.002
+ 1.2041
+ 0.084 0 0
+ 0 1 0
+ 0 0 1
+ iris_with_standoffs::rotor_0
+
+
+ 0.3
+ 1.4
+ 4.2500
+ 0.10
+ 0.0
+ -0.025
+ 0.0
+ 0.0
+ 0.002
+ 1.2041
+ -0.084 0 0
+ 0 -1 0
+ 0 0 1
+ iris_with_standoffs::rotor_0
+
+
+
+ 0.3
+ 1.4
+ 4.2500
+ 0.10
+ 0.0
+ -0.025
+ 0.0
+ 0.0
+ 0.002
+ 1.2041
+ 0.084 0 0
+ 0 1 0
+ 0 0 1
+ iris_with_standoffs::rotor_1
+
+
+ 0.3
+ 1.4
+ 4.2500
+ 0.10
+ 0.0
+ -0.025
+ 0.0
+ 0.0
+ 0.002
+ 1.2041
+ -0.084 0 0
+ 0 -1 0
+ 0 0 1
+ iris_with_standoffs::rotor_1
+
+
+
+ 0.3
+ 1.4
+ 4.2500
+ 0.10
+ 0.0
+ -0.025
+ 0.0
+ 0.0
+ 0.002
+ 1.2041
+ 0.084 0 0
+ 0 -1 0
+ 0 0 1
+ iris_with_standoffs::rotor_2
+
+
+ 0.3
+ 1.4
+ 4.2500
+ 0.10
+ 0.0
+ -0.025
+ 0.0
+ 0.0
+ 0.002
+ 1.2041
+ -0.084 0 0
+ 0 1 0
+ 0 0 1
+ iris_with_standoffs::rotor_2
+
+
+
+ 0.3
+ 1.4
+ 4.2500
+ 0.10
+ 0.0
+ -0.025
+ 0.0
+ 0.0
+ 0.002
+ 1.2041
+ 0.084 0 0
+ 0 -1 0
+ 0 0 1
+ iris_with_standoffs::rotor_3
+
+
+ 0.3
+ 1.4
+ 4.2500
+ 0.10
+ 0.0
+ -0.025
+ 0.0
+ 0.0
+ 0.002
+ 1.2041
+ -0.084 0 0
+ 0 1 0
+ 0 0 1
+ iris_with_standoffs::rotor_3
+
+
+
+ iris_with_standoffs::rotor_0_joint
+
+
+ iris_with_standoffs::rotor_1_joint
+
+
+ iris_with_standoffs::rotor_2_joint
+
+
+ iris_with_standoffs::rotor_3_joint
+
+
+
+
+ 127.0.0.1
+ 9002
+ 5
+ 1
+
+
+ 0 0 0 180 0 0
+ 0 0 0 180 0 90
+
+
+ iris_with_standoffs::imu_link::imu_sensor
+
+
+
+ iris_with_standoffs::rotor_0_joint
+ 1
+ 838
+ 0
+ 1100
+ 1900
+ VELOCITY
+ 0.20
+ 0
+ 0
+ 0
+ 0
+ 2.5
+ -2.5
+ 1
+
+
+
+ iris_with_standoffs::rotor_1_joint
+ 1
+ 838
+ 0
+ 1100
+ 1900
+ VELOCITY
+ 0.20
+ 0
+ 0
+ 0
+ 0
+ 2.5
+ -2.5
+ 1
+
+
+
+ iris_with_standoffs::rotor_2_joint
+ 1
+ -838
+ 0
+ 1100
+ 1900
+ VELOCITY
+ 0.20
+ 0
+ 0
+ 0
+ 0
+ 2.5
+ -2.5
+ 1
+
+
+
+ iris_with_standoffs::rotor_3_joint
+ 1
+ -838
+ 0
+ 1100
+ 1900
+ VELOCITY
+ 0.20
+ 0
+ 0
+ 0
+ 0
+ 2.5
+ -2.5
+ 1
+
+
+
+
+ gimbal::roll_joint
+ 1.047197551196
+ -0.5
+ 1100
+ 1900
+ COMMAND
+ /gimbal/cmd_roll
+ 2
+
+
+
+
+ gimbal::pitch_joint
+ -3.14159265
+ -0.75
+ 1100
+ 1900
+ COMMAND
+ /gimbal/cmd_pitch
+ 2
+
+
+
+
+ gimbal::yaw_joint
+ -5.5850536
+ -0.5
+ 1100
+ 1900
+ COMMAND
+ /gimbal/cmd_yaw
+ 2
+
+
+
+
+
+ gimbal::roll_joint
+ /gimbal/cmd_roll
+ 2
+
+
+ gimbal::pitch_joint
+ /gimbal/cmd_pitch
+ 2
+
+
+ gimbal::yaw_joint
+ /gimbal/cmd_yaw
+ 2
+
+
+
+
diff --git a/scripts/run_autonomous.sh b/scripts/run_autonomous.sh
index 3fabf26..116cffc 100755
--- a/scripts/run_autonomous.sh
+++ b/scripts/run_autonomous.sh
@@ -18,8 +18,8 @@ print_error() { echo -e "${RED}[ERROR]${NC} $1"; }
SOFTWARE_RENDER=auto
WORLD="uav_ugv_search.sdf"
MISSION="hover"
-ALTITUDE=5.0
-DURATION=30.0
+ALTITUDE=""
+DURATION=""
while [[ $# -gt 0 ]]; do
case $1 in
@@ -35,8 +35,8 @@ while [[ $# -gt 0 ]]; do
echo " --no-software-render Disable software rendering"
echo " --world World file (default: uav_ugv_search.sdf)"
echo " --mission hover, square, search (default: hover)"
- echo " --altitude Takeoff altitude (default: 5.0)"
- echo " --duration Hover duration (default: 30.0)"
+ echo " --altitude Override altitude from config"
+ echo " --duration Override duration from config"
exit 0
;;
*) shift ;;
@@ -45,6 +45,8 @@ done
cleanup_all() {
print_info "Cleaning up ..."
+ pkill -f "camera_viewer.py" 2>/dev/null || true
+ pkill -f "camera_processor" 2>/dev/null || true
pkill -f "gz sim" 2>/dev/null || true
pkill -f "arducopter" 2>/dev/null || true
pkill -f "sim_vehicle.py" 2>/dev/null || true
@@ -64,6 +66,13 @@ export PATH=$PATH:$HOME/ardupilot/Tools/autotest:$HOME/.local/bin
export GZ_SIM_RESOURCE_PATH="$PROJECT_DIR/models:$PROJECT_DIR/worlds:$HOME/ardupilot_gazebo/models:$HOME/ardupilot_gazebo/worlds"
export GZ_SIM_SYSTEM_PLUGIN_PATH=$HOME/ardupilot_gazebo/build
+print_info "Model path: $GZ_SIM_RESOURCE_PATH"
+if [ -f "$PROJECT_DIR/models/iris_with_gimbal/model.sdf" ]; then
+ print_info "Using LOCAL iris_with_gimbal (with downward camera)"
+else
+ print_info "Using ardupilot_gazebo iris_with_gimbal (NO downward camera)"
+fi
+
if [ "$SOFTWARE_RENDER" = "auto" ]; then
if grep -qi microsoft /proc/version 2>/dev/null; then
print_info "WSL detected -> software rendering"
@@ -93,15 +102,44 @@ else
exit 1
fi
+UGV_CONFIG="$PROJECT_DIR/config/ugv.yaml"
+if [ -f "$UGV_CONFIG" ] && [ -f "$WORLD_FILE" ]; then
+ python3 -c "
+import yaml, re
+cfg = yaml.safe_load(open('$UGV_CONFIG'))
+pos = cfg.get('position', {})
+x, y = pos.get('x', 5.0), pos.get('y', 5.0)
+with open('$WORLD_FILE', 'r') as f:
+ sdf = f.read()
+sdf = re.sub(
+ r'(\s*true\s*)[^<]*',
+ rf'\1{x} {y} 0 0 0 0',
+ sdf, count=1)
+with open('$WORLD_FILE', 'w') as f:
+ f.write(sdf)
+print(f'[INFO] UGV position synced from config: ({x}, {y})')
+" 2>/dev/null || print_info "UGV position sync skipped"
+fi
+
print_info "==================================="
print_info " UAV-UGV Simulation"
print_info "==================================="
print_info "World: $WORLD_FILE"
print_info "Mission: $MISSION"
-print_info "Altitude: ${ALTITUDE}m"
echo ""
-print_info "[1/3] Starting Gazebo ..."
+GZ_DEFAULT_GUI="/usr/share/gz/gz-sim8/gui/gui.config"
+GZ_USER_GUI="$HOME/.gz/sim/8/gui.config"
+CAMERA_PLUGINS="$PROJECT_DIR/config/gui.config"
+
+rm -rf /tmp/gz-* /tmp/gazebo-* 2>/dev/null
+
+mkdir -p "$HOME/.gz/sim/8"
+cp "$GZ_DEFAULT_GUI" "$GZ_USER_GUI"
+sed -i 's|true|false|' "$GZ_USER_GUI"
+sed -i 's|show_again="true"|show_again="false"|' "$GZ_USER_GUI"
+
+print_info "[1/4] Starting Gazebo ..."
gz sim -v4 -r "$WORLD_FILE" &
GZ_PID=$!
sleep 10
@@ -112,7 +150,7 @@ if ! kill -0 $GZ_PID 2>/dev/null; then
fi
print_success "Gazebo running (PID: $GZ_PID)"
-print_info "[2/3] Starting ArduPilot SITL ..."
+print_info "[2/4] Starting ArduPilot SITL ..."
cd ~/ardupilot
SITL_ARGS="-v ArduCopter -f gazebo-iris --model JSON -I0"
@@ -136,19 +174,23 @@ if ! pgrep -f "arducopter" > /dev/null 2>&1; then
fi
print_success "ArduPilot SITL running (TCP 5760)"
-print_info "[3/3] Starting main.py ..."
+print_info "[3/4] Starting main.py ..."
cd "$PROJECT_DIR"
sleep 3
-python3 src/main.py \
- --device sim \
- --connection "tcp:127.0.0.1:5760" \
- --mission "$MISSION" \
- --altitude "$ALTITUDE" \
- --duration "$DURATION" &
+MAIN_ARGS="--device sim --connection tcp:127.0.0.1:5760 --mission $MISSION"
+[ -n "$ALTITUDE" ] && MAIN_ARGS="$MAIN_ARGS --altitude $ALTITUDE"
+[ -n "$DURATION" ] && MAIN_ARGS="$MAIN_ARGS --duration $DURATION"
+
+python3 src/main.py $MAIN_ARGS &
MAIN_PID=$!
print_success "main.py running (PID: $MAIN_PID)"
+print_info "[4/4] Starting camera viewer ..."
+python3 -W ignore:RuntimeWarning -m src.vision.camera_processor down &
+CAM_PID=$!
+print_success "Camera viewer running (PID: $CAM_PID)"
+
print_info ""
print_info "==================================="
print_info " Simulation Running"
@@ -156,8 +198,10 @@ print_info "==================================="
print_info " Gazebo -> ArduPilot SITL (TCP:5760)"
print_info " |"
print_info " main.py (pymavlink)"
+print_info " |"
+print_info " camera_viewer.py (OpenCV)"
print_info ""
-print_info " Mission: $MISSION Alt: ${ALTITUDE}m"
+print_info " Mission: $MISSION (config from YAML)"
print_info " Press Ctrl+C to stop"
print_info "==================================="
diff --git a/setup.sh b/setup.sh
index 23cccef..ecc994f 100755
--- a/setup.sh
+++ b/setup.sh
@@ -247,6 +247,11 @@ if sudo apt-get install -y gz-harmonic 2>/dev/null; then
libgz-msgs10-dev \
rapidjson-dev \
2>/dev/null || print_warning "Some Gazebo dev packages may be missing"
+ # Install Gazebo Python bindings (needed for camera/vision scripts)
+ sudo apt-get install -y \
+ python3-gz-transport13 \
+ python3-gz-msgs10 \
+ 2>/dev/null || print_warning "Gazebo Python bindings not available via apt"
elif sudo apt-get install -y gz-garden 2>/dev/null; then
GZ_VERSION="garden"
# Install Garden development packages
@@ -262,6 +267,11 @@ elif sudo apt-get install -y gz-garden 2>/dev/null; then
libgz-msgs9-dev \
rapidjson-dev \
2>/dev/null || print_warning "Some Gazebo dev packages may be missing"
+ # Install Gazebo Python bindings for Garden
+ sudo apt-get install -y \
+ python3-gz-transport12 \
+ python3-gz-msgs9 \
+ 2>/dev/null || print_warning "Gazebo Python bindings not available via apt"
else
print_warning "Could not install Gazebo Harmonic/Garden"
GZ_VERSION="none"
@@ -283,7 +293,10 @@ if [ -d "$SCRIPT_DIR/venv" ]; then
rm -rf "$SCRIPT_DIR/venv"
fi
-python3 -m venv "$SCRIPT_DIR/venv"
+# --system-site-packages is required so the venv can access
+# Gazebo Python bindings (gz.transport13, gz.msgs10) which are
+# installed as system packages and cannot be pip-installed.
+python3 -m venv --system-site-packages "$SCRIPT_DIR/venv"
source "$SCRIPT_DIR/venv/bin/activate"
pip install --upgrade pip
@@ -357,7 +370,7 @@ fi
echo -e "\033[0;32mEnvironment activated (ROS 2 $ROS_VER)\033[0m"
echo ""
-echo "Run simulation: bash scripts/run_simulation.sh"
+echo "Run simulation: bash scripts/run_autonomous.sh --mission hover"
ACTIVATE_EOF
chmod +x "$SCRIPT_DIR/activate_venv.sh"
@@ -478,6 +491,18 @@ if [ "$INSTALL_ARDUPILOT" = true ]; then
command -v gz &> /dev/null && echo "[OK] Gazebo (gz)" || echo "[WARN] Gazebo not found"
command -v mavproxy.py &> /dev/null && echo "[OK] MAVProxy" || echo "[WARN] MAVProxy not in PATH"
[ -f "$ARDUPILOT_GZ/build/libArduPilotPlugin.so" ] && echo "[OK] ArduPilot Gazebo plugin" || echo "[WARN] Plugin not built"
+
+ # Verify Gazebo Python bindings are accessible from venv
+ source "$SCRIPT_DIR/venv/bin/activate"
+ python3 -c "from gz.transport13 import Node; print('[OK] gz.transport13 Python bindings')" 2>/dev/null || \
+ echo "[WARN] gz.transport13 Python bindings not found"
+ python3 -c "from gz.msgs10.image_pb2 import Image; print('[OK] gz.msgs10 Python bindings')" 2>/dev/null || \
+ echo "[WARN] gz.msgs10 Python bindings not found"
+ python3 -c "import pymavlink; print('[OK] pymavlink')" 2>/dev/null || \
+ echo "[WARN] pymavlink not found in venv"
+ python3 -c "import cv2; print(f'[OK] OpenCV {cv2.__version__}')" 2>/dev/null || \
+ echo "[WARN] OpenCV not found in venv"
+ deactivate
((STEP++))
fi
@@ -513,6 +538,6 @@ echo -e "${CYAN}To run the simulation:${NC}"
echo ""
echo " cd $SCRIPT_DIR"
echo " source activate_venv.sh"
-echo " bash scripts/run_simulation.sh"
+echo " bash scripts/run_autonomous.sh --mission hover"
echo ""
echo -e "${GREEN}==========================================${NC}"
diff --git a/src/control/uav_controller.py b/src/control/uav_controller.py
index 2173c2e..12d10ea 100644
--- a/src/control/uav_controller.py
+++ b/src/control/uav_controller.py
@@ -123,22 +123,40 @@ class Controller:
self._drain_messages()
print("[UAV] Mode -> GUIDED_NOGPS")
- def arm(self, retries: int = 10):
+ def arm(self, retries: int = 30):
self.set_mode_guided()
- for attempt in range(1, retries + 1):
- print(f"[UAV] Arm attempt {attempt}/{retries}...")
- self.conn.arducopter_arm()
+ for i in range(retries):
+ print(f"[UAV] Arm attempt {i+1}/{retries}...")
- t0 = time.time()
- while time.time() - t0 < 5:
+ # Force arm: param2=21196 bypasses pre-arm checks
+ # (equivalent to MAVProxy's "arm throttle force")
+ self.conn.mav.command_long_send(
+ self.conn.target_system,
+ self.conn.target_component,
+ mavutil.mavlink.MAV_CMD_COMPONENT_ARM_DISARM,
+ 0,
+ 1, # param1: 1 = arm
+ 21196, # param2: force arm magic number
+ 0, 0, 0, 0, 0)
+
+ # Wait up to 4 seconds for armed status instead of
+ # blocking forever with motors_armed_wait()
+ armed = False
+ for _ in range(20):
self._drain_messages()
- if self.armed:
- print("[UAV] Armed")
- return True
- sleep(0.2)
+ if self.conn.motors_armed():
+ armed = True
+ break
+ time.sleep(0.2)
- self._drain_messages()
+ if armed:
+ self.armed = True
+ print("[UAV] Armed")
+ return True
+
+ print(f"[UAV] Arm attempt {i+1} failed, retrying...")
+ time.sleep(2)
print("[UAV] FAILED to arm after all retries")
return False
@@ -156,8 +174,12 @@ class Controller:
print(f"[UAV] Takeoff -> {altitude}m")
def land(self):
- self.conn.set_mode_rtl()
- print("[UAV] Landing (RTL)")
+ self.conn.mav.command_long_send(
+ self.conn.target_system,
+ self.conn.target_component,
+ MAV_CMD_DO_SET_MODE,
+ 0, 89, 9, 0, 0, 0, 0, 0)
+ print("[UAV] Landing (LAND mode)")
def land_at(self, lat: int, lon: int):
self.conn.mav.command_long_send(
diff --git a/src/main.py b/src/main.py
index a9dee5d..fb2db81 100644
--- a/src/main.py
+++ b/src/main.py
@@ -4,28 +4,56 @@ import sys
import os
import time
import argparse
+import yaml
from time import sleep
+from pathlib import Path
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
-from control.uav_controller import Controller, HOLD_ALT
+from control.uav_controller import Controller
from control.ugv_controller import UGVController
from utils.helpers import clamp, distance_2d, PIDController, LowPassFilter
from utils.transforms import normalize_angle, body_to_world, world_to_body
+PROJECT_DIR = Path(__file__).resolve().parent.parent
+CONFIG_DIR = PROJECT_DIR / "config"
-def mission_hover(ctrl: Controller, altitude: float = HOLD_ALT,
- duration: float = 30.0):
- print("\n" + "=" * 50)
- print(f" HOVER MISSION: {altitude}m for {duration}s")
- print("=" * 50 + "\n")
+def load_config(name: str) -> dict:
+ path = CONFIG_DIR / name
+ if not path.exists():
+ print(f"[WARN] Config not found: {path}")
+ return {}
+ with open(path, 'r') as f:
+ return yaml.safe_load(f) or {}
+
+
+def load_mission(name: str) -> dict:
+ path = CONFIG_DIR / "missions" / f"{name}.yaml"
+ if not path.exists():
+ print(f"[WARN] Mission not found: {path}")
+ return {}
+ with open(path, 'r') as f:
+ return yaml.safe_load(f) or {}
+
+
+def setup_ardupilot(ctrl: Controller):
ctrl.set_param('ARMING_CHECK', 0)
ctrl.set_param('SCHED_LOOP_RATE', 200)
ctrl.set_param('FS_THR_ENABLE', 0)
ctrl.set_param('FS_GCS_ENABLE', 0)
sleep(2)
+
+def mission_hover(ctrl: Controller, uav_cfg: dict, mission_cfg: dict):
+ altitude = mission_cfg.get('altitude', uav_cfg['flight']['takeoff_altitude'])
+ duration = mission_cfg.get('duration', 30.0)
+
+ print("\n" + "=" * 50)
+ print(f" HOVER MISSION: {altitude}m for {duration}s")
+ print("=" * 50 + "\n")
+
+ setup_ardupilot(ctrl)
ctrl.wait_for_gps()
if not ctrl.arm():
@@ -46,29 +74,18 @@ def mission_hover(ctrl: Controller, altitude: float = HOLD_ALT,
print("\n[UAV] Hover complete")
ctrl.land()
-
- print("[UAV] Waiting for landing ...")
- t0 = time.time()
- while time.time() - t0 < 30:
- ctrl.update_state()
- if ctrl.altitude < 0.3:
- break
- sleep(0.5)
- print("[UAV] Landed!")
+ wait_for_landing(ctrl)
-def mission_square(ctrl: Controller, altitude: float = HOLD_ALT,
- side: float = 5.0):
+def mission_square(ctrl: Controller, uav_cfg: dict, mission_cfg: dict):
+ altitude = mission_cfg.get('altitude', uav_cfg['flight']['takeoff_altitude'])
+ side = mission_cfg.get('side_length', 5.0)
+
print("\n" + "=" * 50)
print(f" SQUARE MISSION: {side}m sides at {altitude}m")
print("=" * 50 + "\n")
- ctrl.set_param('ARMING_CHECK', 0)
- ctrl.set_param('SCHED_LOOP_RATE', 200)
- ctrl.set_param('FS_THR_ENABLE', 0)
- ctrl.set_param('FS_GCS_ENABLE', 0)
- sleep(2)
-
+ setup_ardupilot(ctrl)
ctrl.wait_for_gps()
if not ctrl.arm():
@@ -92,29 +109,23 @@ def mission_square(ctrl: Controller, altitude: float = HOLD_ALT,
print("[UAV] Square complete")
ctrl.land()
-
- print("[UAV] Waiting for landing ...")
- t0 = time.time()
- while time.time() - t0 < 30:
- ctrl.update_state()
- if ctrl.altitude < 0.3:
- break
- sleep(0.5)
- print("[UAV] Landed!")
+ wait_for_landing(ctrl)
def mission_search(ctrl: Controller, ugv: UGVController,
- altitude: float = HOLD_ALT):
+ uav_cfg: dict, mission_cfg: dict):
+ altitude = mission_cfg.get('altitude', uav_cfg['flight']['takeoff_altitude'])
+ search_cfg = mission_cfg.get('search', {})
+ initial_leg = search_cfg.get('initial_leg', 3.0)
+ leg_increment = search_cfg.get('leg_increment', 2.0)
+ max_legs = search_cfg.get('max_legs', 12)
+ detection_radius = search_cfg.get('detection_radius', 2.0)
+
print("\n" + "=" * 50)
print(f" SEARCH MISSION at {altitude}m")
print("=" * 50 + "\n")
- ctrl.set_param('ARMING_CHECK', 0)
- ctrl.set_param('SCHED_LOOP_RATE', 200)
- ctrl.set_param('FS_THR_ENABLE', 0)
- ctrl.set_param('FS_GCS_ENABLE', 0)
- sleep(2)
-
+ setup_ardupilot(ctrl)
ctrl.wait_for_gps()
if not ctrl.arm():
@@ -127,30 +138,23 @@ def mission_search(ctrl: Controller, ugv: UGVController,
ugv_pos = ugv.get_position()
print(f"[UAV] UGV target at ({ugv_pos['x']:.1f}, {ugv_pos['y']:.1f})")
- distance_step = 3.0
- increment = 2.0
+ distance_step = initial_leg
travel_x = True
direction = 1
- MAX_LEGS = 12
- for leg in range(MAX_LEGS):
+ for leg in range(max_legs):
ctrl.update_state()
uav_pos = ctrl.get_local_position()
dist_to_ugv = distance_2d(
(uav_pos['x'], uav_pos['y']),
(ugv_pos['x'], ugv_pos['y'])
)
- print(f"[UAV] Spiral leg {leg+1}/{MAX_LEGS} dist_to_ugv: {dist_to_ugv:.1f}m")
+ print(f"[UAV] Spiral leg {leg+1}/{max_legs} dist_to_ugv: {dist_to_ugv:.1f}m")
- if dist_to_ugv < 2.0:
- print("[UAV] UGV found! Landing nearby.")
+ if dist_to_ugv < detection_radius:
+ print("[UAV] UGV found! Landing.")
ctrl.land()
- t0 = time.time()
- while time.time() - t0 < 30:
- ctrl.update_state()
- if ctrl.altitude < 0.3:
- break
- sleep(0.5)
+ wait_for_landing(ctrl)
print("[UAV] Landed on UGV!")
return
@@ -159,55 +163,82 @@ def mission_search(ctrl: Controller, ugv: UGVController,
else:
ctrl.move_pos_rel(0, distance_step * direction, 0)
direction *= -1
- distance_step += increment
+ distance_step += leg_increment
travel_x = not travel_x
sleep(4)
print("[UAV] Search complete - UGV not found, landing")
ctrl.land()
+ wait_for_landing(ctrl)
+
+def wait_for_landing(ctrl: Controller, timeout: float = 60.0):
+ print("[UAV] Waiting for landing ...")
t0 = time.time()
- while time.time() - t0 < 30:
+ while time.time() - t0 < timeout:
ctrl.update_state()
+ elapsed = int(time.time() - t0)
+ print(f"\r[UAV] Descending: {ctrl.altitude:.1f}m ({elapsed}s) ",
+ end='', flush=True)
+ if ctrl.altitude < 0.5 and not ctrl.armed:
+ break
if ctrl.altitude < 0.3:
break
sleep(0.5)
- print("[UAV] Landed!")
+ print(f"\n[UAV] Landed! (alt: {ctrl.altitude:.2f}m)")
def main():
parser = argparse.ArgumentParser(description='UAV-UGV Simulation')
parser.add_argument('--device', default='sim', choices=['sim', 'real'])
parser.add_argument('--connection', default=None)
- parser.add_argument('--mission', default='hover', choices=['hover', 'square', 'search'])
- parser.add_argument('--altitude', type=float, default=HOLD_ALT)
- parser.add_argument('--duration', type=float, default=30.0)
- parser.add_argument('--ugv-x', type=float, default=10.0)
- parser.add_argument('--ugv-y', type=float, default=5.0)
- parser.add_argument('--ugv-connection', default=None)
+ parser.add_argument('--mission', default='hover')
+ parser.add_argument('--altitude', type=float, default=None)
+ parser.add_argument('--duration', type=float, default=None)
args = parser.parse_args()
+ uav_cfg = load_config('uav.yaml')
+ ugv_cfg = load_config('ugv.yaml')
+ props = load_config('properties.yaml')
+ mission_cfg = load_mission(args.mission)
+
+ if args.altitude is not None:
+ mission_cfg['altitude'] = args.altitude
+ if args.duration is not None:
+ mission_cfg['duration'] = args.duration
+
if args.connection:
conn_str = args.connection
elif args.device == 'real':
- conn_str = '/dev/ttyAMA0'
+ conn_str = uav_cfg.get('connection', {}).get('real', '/dev/ttyAMA0')
else:
- conn_str = 'tcp:127.0.0.1:5760'
+ conn_str = uav_cfg.get('connection', {}).get('sim', 'tcp:127.0.0.1:5760')
+ ugv_pos = ugv_cfg.get('position', {})
ugv = UGVController(
- connection_string=args.ugv_connection,
- static_pos=(args.ugv_x, args.ugv_y),
+ connection_string=ugv_cfg.get('connection', {}).get('sim'),
+ static_pos=(ugv_pos.get('x', 10.0), ugv_pos.get('y', 5.0)),
)
ctrl = Controller(conn_str)
- if args.mission == 'hover':
- mission_hover(ctrl, altitude=args.altitude, duration=args.duration)
- elif args.mission == 'square':
- mission_square(ctrl, altitude=args.altitude)
- elif args.mission == 'search':
- mission_search(ctrl, ugv, altitude=args.altitude)
+ print(f"[MAIN] Config loaded from {CONFIG_DIR}")
+ print(f"[MAIN] Mission: {args.mission}")
+
+ missions = {
+ 'hover': lambda: mission_hover(ctrl, uav_cfg, mission_cfg),
+ 'square': lambda: mission_square(ctrl, uav_cfg, mission_cfg),
+ 'search': lambda: mission_search(ctrl, ugv, uav_cfg, mission_cfg),
+ }
+
+ runner = missions.get(args.mission)
+ if runner:
+ runner()
+ else:
+ print(f"[MAIN] Unknown mission: {args.mission}")
+ print(f"[MAIN] Available: {list(missions.keys())}")
+ return
print("[MAIN] Mission finished.")
diff --git a/src/vision/camera_processor.py b/src/vision/camera_processor.py
index 669ec93..1deedca 100644
--- a/src/vision/camera_processor.py
+++ b/src/vision/camera_processor.py
@@ -1,227 +1,152 @@
#!/usr/bin/env python3
"""
-Camera Processor Node
-Handles camera feed processing for GPS-denied navigation
+Camera Processor - Handles Gazebo camera feeds for GPS-denied navigation.
+Subscribes to gz-transport topics, processes frames, and displays via OpenCV.
+Downstream modules (ArUco detector, optical flow) register as callbacks.
"""
-import rclpy
-from rclpy.node import Node
-from sensor_msgs.msg import Image, CameraInfo
-from std_msgs.msg import Header
-from cv_bridge import CvBridge
-import cv2
+import sys
+import signal
+import time
import numpy as np
+import cv2
+from gz.transport13 import Node
+from gz.msgs10.image_pb2 import Image
+
+# PixelFormatType enum values (from gz.msgs10 Image proto)
+PF_RGB_INT8 = 3
+PF_BGR_INT8 = 8
+PF_R_FLOAT32 = 13
-class CameraProcessor(Node):
- """
- Processes camera feeds for visual odometry and obstacle detection.
- Outputs processed images for downstream nodes.
- """
-
- def __init__(self):
- super().__init__('camera_processor')
-
- self.bridge = CvBridge()
-
- # Camera parameters
- self.camera_matrix = None
- self.dist_coeffs = None
- self.image_size = None
-
- # Image processing parameters
- self.declare_parameter('undistort', True)
- self.declare_parameter('grayscale_output', True)
- self.declare_parameter('histogram_equalization', True)
- self.declare_parameter('resize_factor', 1.0)
-
- self.undistort = self.get_parameter('undistort').value
- self.grayscale_output = self.get_parameter('grayscale_output').value
- self.histogram_eq = self.get_parameter('histogram_equalization').value
- self.resize_factor = self.get_parameter('resize_factor').value
-
- # Undistort maps (computed once)
- self.map1 = None
- self.map2 = None
-
- # Subscribers - Forward camera
- self.forward_image_sub = self.create_subscription(
- Image,
- '/uav/camera/forward/image_raw',
- self.forward_image_callback,
- 10
- )
-
- self.forward_info_sub = self.create_subscription(
- CameraInfo,
- '/uav/camera/forward/camera_info',
- self.camera_info_callback,
- 10
- )
-
- # Subscribers - Downward camera
- self.downward_image_sub = self.create_subscription(
- Image,
- '/uav/camera/downward/image_raw',
- self.downward_image_callback,
- 10
- )
-
- # Publishers - Processed images
- self.forward_processed_pub = self.create_publisher(
- Image,
- '/uav/camera/forward/image_processed',
- 10
- )
-
- self.downward_processed_pub = self.create_publisher(
- Image,
- '/uav/camera/downward/image_processed',
- 10
- )
-
- # Debug visualization
- self.debug_pub = self.create_publisher(
- Image,
- '/uav/camera/debug',
- 10
- )
-
- self.get_logger().info('Camera Processor Node Started')
-
- def camera_info_callback(self, msg: CameraInfo):
- """Extract and store camera calibration parameters."""
- if self.camera_matrix is None:
- self.camera_matrix = np.array(msg.k).reshape(3, 3)
- self.dist_coeffs = np.array(msg.d)
- self.image_size = (msg.width, msg.height)
-
- # Compute undistortion maps
- if self.undistort and self.dist_coeffs is not None:
- new_camera_matrix, _ = cv2.getOptimalNewCameraMatrix(
- self.camera_matrix,
- self.dist_coeffs,
- self.image_size,
- alpha=0
+class CameraProcessor:
+ def __init__(self, topics=None, show_gui=True):
+ self.node = Node()
+ self.show_gui = show_gui
+ self.frames = {}
+ self.callbacks = {}
+ self.running = True
+
+ if topics is None:
+ topics = {
+ "downward": "/uav/camera/downward",
+ "gimbal": "/world/uav_ugv_search/model/iris_with_gimbal/model/gimbal/link/pitch_link/sensor/camera/image",
+ }
+
+ self.topics = topics
+
+ for name, topic in self.topics.items():
+ self.callbacks[name] = []
+ ok = self.node.subscribe(Image, topic, self._make_gz_callback(name))
+ if ok:
+ print(f"[CAM] Subscribed: {name} -> {topic}")
+ else:
+ print(f"[CAM] Failed: {topic}")
+
+ signal.signal(signal.SIGINT, lambda s, f: self.stop())
+
+ def _make_gz_callback(self, name):
+ def cb(msg):
+ fmt = msg.pixel_format_type
+ if fmt == PF_RGB_INT8:
+ arr = np.frombuffer(msg.data, dtype=np.uint8).reshape(
+ msg.height, msg.width, 3
)
- self.map1, self.map2 = cv2.initUndistortRectifyMap(
- self.camera_matrix,
- self.dist_coeffs,
- None,
- new_camera_matrix,
- self.image_size,
- cv2.CV_16SC2
+ bgr = cv2.cvtColor(arr, cv2.COLOR_RGB2BGR)
+ elif fmt == PF_BGR_INT8:
+ arr = np.frombuffer(msg.data, dtype=np.uint8).reshape(
+ msg.height, msg.width, 3
)
-
- self.get_logger().info(
- f'Camera calibration received: {self.image_size[0]}x{self.image_size[1]}'
- )
-
- def process_image(self, image: np.ndarray) -> np.ndarray:
- """
- Apply image processing pipeline.
-
- Args:
- image: Input BGR image
-
- Returns:
- Processed image
- """
+ bgr = arr
+ elif fmt == PF_R_FLOAT32:
+ arr = np.frombuffer(msg.data, dtype=np.float32).reshape(
+ msg.height, msg.width
+ )
+ normalized = cv2.normalize(arr, None, 0, 255, cv2.NORM_MINMAX)
+ bgr = cv2.cvtColor(normalized.astype(np.uint8), cv2.COLOR_GRAY2BGR)
+ else:
+ return
+
+ processed = self.process_image(bgr)
+ self.frames[name] = processed
+
+ for fn in self.callbacks.get(name, []):
+ fn(name, processed)
+
+ return cb
+
+ def process_image(self, image):
processed = image.copy()
-
- # 1. Undistort if calibration available
- if self.undistort and self.map1 is not None:
- processed = cv2.remap(processed, self.map1, self.map2, cv2.INTER_LINEAR)
-
- # 2. Resize if needed
- if self.resize_factor != 1.0:
- new_size = (
- int(processed.shape[1] * self.resize_factor),
- int(processed.shape[0] * self.resize_factor)
- )
- processed = cv2.resize(processed, new_size, interpolation=cv2.INTER_AREA)
-
- # 3. Convert to grayscale if requested
- if self.grayscale_output:
- if len(processed.shape) == 3:
- processed = cv2.cvtColor(processed, cv2.COLOR_BGR2GRAY)
-
- # 4. Histogram equalization for better feature detection
- if self.histogram_eq:
- if len(processed.shape) == 2:
- # CLAHE for better results than standard histogram equalization
- clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
- processed = clahe.apply(processed)
- else:
- # For color images, apply to L channel in LAB
- lab = cv2.cvtColor(processed, cv2.COLOR_BGR2LAB)
- clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
- lab[:, :, 0] = clahe.apply(lab[:, :, 0])
- processed = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
-
+ clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
+ lab = cv2.cvtColor(processed, cv2.COLOR_BGR2LAB)
+ lab[:, :, 0] = clahe.apply(lab[:, :, 0])
+ processed = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
return processed
-
- def forward_image_callback(self, msg: Image):
- """Process forward camera images for visual odometry."""
- try:
- # Convert ROS Image to OpenCV
- cv_image = self.bridge.imgmsg_to_cv2(msg, 'bgr8')
-
- # Process image
- processed = self.process_image(cv_image)
-
- # Convert back to ROS Image
- if len(processed.shape) == 2:
- encoding = 'mono8'
- else:
- encoding = 'bgr8'
-
- processed_msg = self.bridge.cv2_to_imgmsg(processed, encoding)
- processed_msg.header = msg.header
-
- # Publish processed image
- self.forward_processed_pub.publish(processed_msg)
-
- except Exception as e:
- self.get_logger().error(f'Forward image processing error: {e}')
-
- def downward_image_callback(self, msg: Image):
- """Process downward camera images for optical flow."""
- try:
- # Convert ROS Image to OpenCV
- cv_image = self.bridge.imgmsg_to_cv2(msg, 'bgr8')
-
- # Process image
- processed = self.process_image(cv_image)
-
- # Convert back to ROS Image
- if len(processed.shape) == 2:
- encoding = 'mono8'
- else:
- encoding = 'bgr8'
-
- processed_msg = self.bridge.cv2_to_imgmsg(processed, encoding)
- processed_msg.header = msg.header
-
- # Publish processed image
- self.downward_processed_pub.publish(processed_msg)
-
- except Exception as e:
- self.get_logger().error(f'Downward image processing error: {e}')
+
+ def register_callback(self, camera_name, fn):
+ if camera_name in self.callbacks:
+ self.callbacks[camera_name].append(fn)
+ print(f"[CAM] Registered callback for {camera_name}: {fn.__name__}")
+
+ def stop(self):
+ self.running = False
+
+ def spin(self):
+ print("[CAM] Running. Press 'q' to quit, 's' to screenshot.")
+ while self.running:
+ for name, frame in list(self.frames.items()):
+ cv2.imshow(name, frame)
+
+ key = cv2.waitKey(33) & 0xFF
+ if key == ord("q"):
+ break
+ elif key == ord("s"):
+ for name, frame in self.frames.items():
+ fname = f"{name}_{int(time.time())}.png"
+ cv2.imwrite(fname, frame)
+ print(f"[CAM] Saved {fname}")
+
+ cv2.destroyAllWindows()
+ print("[CAM] Stopped.")
+
+ def spin_headless(self):
+ print("[CAM] Running headless (no GUI).")
+ while self.running:
+ time.sleep(0.1)
+ print("[CAM] Stopped.")
+
+ def get_frame(self, camera_name):
+ return self.frames.get(camera_name)
-def main(args=None):
- rclpy.init(args=args)
- node = CameraProcessor()
-
- try:
- rclpy.spin(node)
- except KeyboardInterrupt:
- pass
- finally:
- node.destroy_node()
- rclpy.shutdown()
+def main():
+ cameras = "both"
+ show_gui = True
+
+ if len(sys.argv) > 1:
+ cameras = sys.argv[1].lower()
+ if "--headless" in sys.argv:
+ show_gui = False
+
+ all_topics = {
+ "downward": "/uav/camera/downward",
+ "gimbal": "/world/uav_ugv_search/model/iris_with_gimbal/model/gimbal/link/pitch_link/sensor/camera/image",
+ }
+
+ if cameras == "down":
+ topics = {"downward": all_topics["downward"]}
+ elif cameras == "gimbal":
+ topics = {"gimbal": all_topics["gimbal"]}
+ else:
+ topics = all_topics
+
+ proc = CameraProcessor(topics=topics, show_gui=show_gui)
+
+ if show_gui:
+ proc.spin()
+ else:
+ proc.spin_headless()
-if __name__ == '__main__':
+if __name__ == "__main__":
main()
diff --git a/src/vision/object_detector.py b/src/vision/object_detector.py
index 7afbd0d..64ef27a 100644
--- a/src/vision/object_detector.py
+++ b/src/vision/object_detector.py
@@ -1,132 +1,126 @@
#!/usr/bin/env python3
-"""Object Detector - Visual landmark and obstacle detection."""
+"""
+Object Detector - ArUco marker and feature detection.
+Registers as a callback on CameraProcessor to receive processed frames.
+"""
-import rclpy
-from rclpy.node import Node
-from sensor_msgs.msg import Image
-from geometry_msgs.msg import PoseArray, Pose
-from std_msgs.msg import Float32MultiArray
-from cv_bridge import CvBridge
import cv2
import numpy as np
+from scipy.spatial.transform import Rotation
-class ObjectDetector(Node):
-
- def __init__(self):
- super().__init__('object_detector')
-
- self.bridge = CvBridge()
-
- self.declare_parameter('detection_method', 'ArUco')
- self.declare_parameter('marker_size', 0.15)
- self.declare_parameter('camera_matrix', [500.0, 0.0, 320.0, 0.0, 500.0, 240.0, 0.0, 0.0, 1.0])
-
- self.detection_method = self.get_parameter('detection_method').value
- self.marker_size = self.get_parameter('marker_size').value
- camera_matrix_flat = self.get_parameter('camera_matrix').value
- self.camera_matrix = np.array(camera_matrix_flat).reshape(3, 3)
+class ObjectDetector:
+ def __init__(self, marker_size=0.15, camera_matrix=None, detection_method="ArUco"):
+ self.detection_method = detection_method
+ self.marker_size = marker_size
+
+ if camera_matrix is not None:
+ self.camera_matrix = np.array(camera_matrix).reshape(3, 3)
+ else:
+ self.camera_matrix = np.array(
+ [[500.0, 0.0, 320.0], [0.0, 500.0, 240.0], [0.0, 0.0, 1.0]]
+ )
self.dist_coeffs = np.zeros(5)
-
- if self.detection_method == 'ArUco':
+
+ if self.detection_method == "ArUco":
self.aruco_dict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_50)
self.aruco_params = cv2.aruco.DetectorParameters()
- self.aruco_detector = cv2.aruco.ArucoDetector(self.aruco_dict, self.aruco_params)
-
- self.image_sub = self.create_subscription(
- Image, '/uav/camera/forward/image_raw', self.image_callback, 10)
-
- self.landmarks_pub = self.create_publisher(PoseArray, '/uav/landmarks/poses', 10)
- self.marker_ids_pub = self.create_publisher(Float32MultiArray, '/uav/landmarks/ids', 10)
- self.debug_image_pub = self.create_publisher(Image, '/uav/landmarks/debug', 10)
-
- self.get_logger().info(f'Object Detector Started - {self.detection_method}')
-
- def image_callback(self, msg):
- try:
- frame = self.bridge.imgmsg_to_cv2(msg, 'bgr8')
-
- if self.detection_method == 'ArUco':
- self.detect_aruco(frame, msg.header)
- else:
- self.detect_orb_features(frame, msg.header)
-
- except Exception as e:
- self.get_logger().error(f'Detection error: {e}')
-
- def detect_aruco(self, frame, header):
+ self.aruco_detector = cv2.aruco.ArucoDetector(
+ self.aruco_dict, self.aruco_params
+ )
+
+ self.last_detections = []
+ self.on_detection = None
+
+ print(f"[DET] Object Detector initialized ({self.detection_method})")
+
+ def detect(self, camera_name, frame):
+ if self.detection_method == "ArUco":
+ detections, annotated = self.detect_aruco(frame)
+ else:
+ detections, annotated = self.detect_orb(frame)
+
+ self.last_detections = detections
+
+ if detections and self.on_detection:
+ self.on_detection(detections)
+
+ if annotated is not None:
+ cv2.imshow(f"{camera_name} [detections]", annotated)
+
+ return detections
+
+ def detect_aruco(self, frame):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
corners, ids, rejected = self.aruco_detector.detectMarkers(gray)
-
- pose_array = PoseArray()
- pose_array.header = header
- pose_array.header.frame_id = 'camera_link'
-
- id_array = Float32MultiArray()
-
+
+ detections = []
+ annotated = frame.copy()
+
if ids is not None:
rvecs, tvecs, _ = cv2.aruco.estimatePoseSingleMarkers(
- corners, self.marker_size, self.camera_matrix, self.dist_coeffs)
-
+ corners, self.marker_size, self.camera_matrix, self.dist_coeffs
+ )
+
for i, marker_id in enumerate(ids.flatten()):
- pose = Pose()
- pose.position.x = float(tvecs[i][0][0])
- pose.position.y = float(tvecs[i][0][1])
- pose.position.z = float(tvecs[i][0][2])
-
- rotation_matrix, _ = cv2.Rodrigues(rvecs[i])
- from scipy.spatial.transform import Rotation
+ tvec = tvecs[i][0]
+ rvec = rvecs[i][0]
+
+ rotation_matrix, _ = cv2.Rodrigues(rvec)
r = Rotation.from_matrix(rotation_matrix)
quat = r.as_quat()
- pose.orientation.x = quat[0]
- pose.orientation.y = quat[1]
- pose.orientation.z = quat[2]
- pose.orientation.w = quat[3]
-
- pose_array.poses.append(pose)
- id_array.data.append(float(marker_id))
-
- debug_frame = cv2.aruco.drawDetectedMarkers(frame.copy(), corners, ids)
+
+ center = np.mean(corners[i][0], axis=0)
+
+ detection = {
+ "id": int(marker_id),
+ "position": tvec.tolist(),
+ "orientation_quat": quat.tolist(),
+ "rvec": rvec.tolist(),
+ "tvec": tvec.tolist(),
+ "corners": corners[i][0].tolist(),
+ "center_px": center.tolist(),
+ "distance": float(np.linalg.norm(tvec)),
+ }
+ detections.append(detection)
+
+ cv2.aruco.drawDetectedMarkers(annotated, corners, ids)
for i in range(len(rvecs)):
- cv2.drawFrameAxes(debug_frame, self.camera_matrix, self.dist_coeffs,
- rvecs[i], tvecs[i], self.marker_size * 0.5)
-
- debug_msg = self.bridge.cv2_to_imgmsg(debug_frame, 'bgr8')
- debug_msg.header = header
- self.debug_image_pub.publish(debug_msg)
-
- self.landmarks_pub.publish(pose_array)
- self.marker_ids_pub.publish(id_array)
-
- def detect_orb_features(self, frame, header):
+ cv2.drawFrameAxes(
+ annotated,
+ self.camera_matrix,
+ self.dist_coeffs,
+ rvecs[i],
+ tvecs[i],
+ self.marker_size * 0.5,
+ )
+
+ d = detections[i]
+ label = f"ID:{d['id']} d:{d['distance']:.2f}m"
+ pos = (int(d["center_px"][0]), int(d["center_px"][1]) - 10)
+ cv2.putText(
+ annotated, label, pos, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2
+ )
+
+ return detections, annotated
+
+ def detect_orb(self, frame):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
orb = cv2.ORB_create(nfeatures=500)
keypoints, descriptors = orb.detectAndCompute(gray, None)
-
- pose_array = PoseArray()
- pose_array.header = header
-
+
+ detections = []
for kp in keypoints[:50]:
- pose = Pose()
- pose.position.x = float(kp.pt[0])
- pose.position.y = float(kp.pt[1])
- pose.position.z = 0.0
- pose_array.poses.append(pose)
-
- self.landmarks_pub.publish(pose_array)
+ detections.append(
+ {
+ "type": "feature",
+ "center_px": [kp.pt[0], kp.pt[1]],
+ "size": kp.size,
+ "response": kp.response,
+ }
+ )
-
-def main(args=None):
- rclpy.init(args=args)
- node = ObjectDetector()
- try:
- rclpy.spin(node)
- except KeyboardInterrupt:
- pass
- finally:
- node.destroy_node()
- rclpy.shutdown()
-
-
-if __name__ == '__main__':
- main()
+ annotated = cv2.drawKeypoints(
+ frame, keypoints[:50], None, color=(0, 255, 0), flags=0
+ )
+ return detections, annotated
diff --git a/src/vision/optical_flow.py b/src/vision/optical_flow.py
index 741dc38..54a66d3 100644
--- a/src/vision/optical_flow.py
+++ b/src/vision/optical_flow.py
@@ -1,128 +1,85 @@
#!/usr/bin/env python3
-"""Optical Flow - Velocity estimation from downward camera."""
+"""
+Optical Flow - Velocity estimation from downward camera.
+Lucas-Kanade sparse optical flow for GPS-denied velocity estimation.
+"""
-import rclpy
-from rclpy.node import Node
-from sensor_msgs.msg import Image, Range
-from geometry_msgs.msg import TwistStamped
-from cv_bridge import CvBridge
+import time
import cv2
import numpy as np
-class OpticalFlowNode(Node):
-
- def __init__(self):
- super().__init__('optical_flow_node')
-
- self.bridge = CvBridge()
-
+class OpticalFlowEstimator:
+ def __init__(self, focal_length=500.0, min_altitude=0.3, max_altitude=10.0):
+ self.focal_length = focal_length
+ self.min_altitude = min_altitude
+ self.max_altitude = max_altitude
+
self.prev_frame = None
self.prev_points = None
- self.current_altitude = 1.0
-
- self.declare_parameter('window_size', 15)
- self.declare_parameter('max_level', 3)
- self.declare_parameter('min_altitude', 0.3)
- self.declare_parameter('max_altitude', 10.0)
- self.declare_parameter('focal_length', 500.0)
-
- self.window_size = self.get_parameter('window_size').value
- self.max_level = self.get_parameter('max_level').value
- self.min_altitude = self.get_parameter('min_altitude').value
- self.max_altitude = self.get_parameter('max_altitude').value
- self.focal_length = self.get_parameter('focal_length').value
-
- self.lk_params = dict(
- winSize=(self.window_size, self.window_size),
- maxLevel=self.max_level,
- criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)
- )
-
- self.feature_params = dict(
- maxCorners=100,
- qualityLevel=0.3,
- minDistance=7,
- blockSize=7
- )
-
- self.velocity = np.zeros(2)
self.prev_time = None
-
- self.image_sub = self.create_subscription(
- Image, '/uav/camera/downward/image_raw', self.image_callback, 10)
-
- self.altitude_sub = self.create_subscription(
- Range, '/uav/rangefinder/range', self.altitude_callback, 10)
-
- self.velocity_pub = self.create_publisher(
- TwistStamped, '/uav/optical_flow/velocity', 10)
-
- self.get_logger().info('Optical Flow Node Started')
-
- def altitude_callback(self, msg):
- if msg.range > self.min_altitude and msg.range < self.max_altitude:
- self.current_altitude = msg.range
-
- def image_callback(self, msg):
- try:
- frame = self.bridge.imgmsg_to_cv2(msg, 'bgr8')
- gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
-
- current_time = self.get_clock().now()
-
- if self.prev_frame is not None and self.prev_points is not None:
- new_points, status, error = cv2.calcOpticalFlowPyrLK(
- self.prev_frame, gray, self.prev_points, None, **self.lk_params)
-
- if new_points is not None:
- good_new = new_points[status == 1]
- good_old = self.prev_points[status == 1]
-
- if len(good_new) > 10:
- flow = good_new - good_old
- avg_flow = np.mean(flow, axis=0)
-
- if self.prev_time is not None:
- dt = (current_time - self.prev_time).nanoseconds / 1e9
- if dt > 0:
- pixel_velocity = avg_flow / dt
-
- self.velocity[0] = pixel_velocity[0] * self.current_altitude / self.focal_length
- self.velocity[1] = pixel_velocity[1] * self.current_altitude / self.focal_length
-
- self.publish_velocity()
-
- self.prev_points = cv2.goodFeaturesToTrack(gray, **self.feature_params)
- self.prev_frame = gray
- self.prev_time = current_time
-
- except Exception as e:
- self.get_logger().error(f'Optical flow error: {e}')
-
- def publish_velocity(self):
- vel_msg = TwistStamped()
- vel_msg.header.stamp = self.get_clock().now().to_msg()
- vel_msg.header.frame_id = 'base_link'
-
- vel_msg.twist.linear.x = float(self.velocity[0])
- vel_msg.twist.linear.y = float(self.velocity[1])
- vel_msg.twist.linear.z = 0.0
-
- self.velocity_pub.publish(vel_msg)
+ self.current_altitude = 1.0
+ self.velocity = np.zeros(2)
-def main(args=None):
- rclpy.init(args=args)
- node = OpticalFlowNode()
- try:
- rclpy.spin(node)
- except KeyboardInterrupt:
- pass
- finally:
- node.destroy_node()
- rclpy.shutdown()
+ self.lk_params = dict(
+ winSize=(15, 15),
+ maxLevel=3,
+ criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03),
+ )
+ self.feature_params = dict(
+ maxCorners=100, qualityLevel=0.3, minDistance=7, blockSize=7
+ )
-if __name__ == '__main__':
- main()
+ self.on_velocity = None
+
+ print("[OF] Optical Flow Estimator initialized")
+
+ def set_altitude(self, altitude):
+ if self.min_altitude < altitude < self.max_altitude:
+ self.current_altitude = altitude
+
+ def process_frame(self, camera_name, frame):
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
+ current_time = time.time()
+
+ if self.prev_frame is not None and self.prev_points is not None:
+ new_points, status, error = cv2.calcOpticalFlowPyrLK(
+ self.prev_frame, gray, self.prev_points, None, **self.lk_params
+ )
+
+ if new_points is not None:
+ good_new = new_points[status.flatten() == 1]
+ good_old = self.prev_points[status.flatten() == 1]
+
+ if len(good_new) > 10:
+ flow = good_new - good_old
+ avg_flow = np.mean(flow, axis=0)
+
+ if self.prev_time is not None:
+ dt = current_time - self.prev_time
+ if dt > 0:
+ pixel_velocity = avg_flow / dt
+ self.velocity[0] = (
+ pixel_velocity[0]
+ * self.current_altitude
+ / self.focal_length
+ )
+ self.velocity[1] = (
+ pixel_velocity[1]
+ * self.current_altitude
+ / self.focal_length
+ )
+
+ if self.on_velocity:
+ self.on_velocity(self.velocity)
+
+ self.prev_points = cv2.goodFeaturesToTrack(gray, **self.feature_params)
+ self.prev_frame = gray
+ self.prev_time = current_time
+
+ return self.velocity
+
+ def get_velocity(self):
+ return self.velocity.copy()
diff --git a/src/vision/visual_odometry.py b/src/vision/visual_odometry.py
index 0f72cfa..db9ce95 100644
--- a/src/vision/visual_odometry.py
+++ b/src/vision/visual_odometry.py
@@ -1,173 +1,119 @@
#!/usr/bin/env python3
-"""Visual Odometry - Camera-based position estimation without GPS."""
+"""
+Visual Odometry - Camera-based position estimation without GPS.
+ORB/SIFT feature matching with essential matrix decomposition.
+"""
-import rclpy
-from rclpy.node import Node
-from sensor_msgs.msg import Image, CameraInfo
-from geometry_msgs.msg import PoseStamped, TwistStamped
-from cv_bridge import CvBridge
+import time
import cv2
import numpy as np
from scipy.spatial.transform import Rotation
-class VisualOdometryNode(Node):
-
- def __init__(self):
- super().__init__('visual_odometry_node')
-
- self.bridge = CvBridge()
-
- self.prev_frame = None
- self.prev_keypoints = None
- self.prev_descriptors = None
-
- self.camera_matrix = None
- self.dist_coeffs = None
-
- self.current_pose = np.eye(4)
- self.position = np.zeros(3)
- self.orientation = np.eye(3)
- self.prev_time = None
- self.velocity = np.zeros(3)
-
- self.detector_type = self.declare_parameter('detector_type', 'ORB').value
- self.min_features = self.declare_parameter('min_features', 100).value
- self.feature_quality = self.declare_parameter('feature_quality', 0.01).value
-
- if self.detector_type == 'ORB':
+class VisualOdometry:
+ def __init__(self, camera_matrix=None, detector_type="ORB", min_features=100):
+ self.detector_type = detector_type
+ self.min_features = min_features
+
+ if camera_matrix is not None:
+ self.camera_matrix = np.array(camera_matrix).reshape(3, 3)
+ else:
+ self.camera_matrix = np.array(
+ [[500.0, 0.0, 320.0], [0.0, 500.0, 240.0], [0.0, 0.0, 1.0]]
+ )
+ self.dist_coeffs = np.zeros(5)
+
+ if detector_type == "ORB":
self.detector = cv2.ORB_create(nfeatures=500)
self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)
- elif self.detector_type == 'SIFT':
+ elif detector_type == "SIFT":
self.detector = cv2.SIFT_create(nfeatures=500)
self.matcher = cv2.BFMatcher(cv2.NORM_L2, crossCheck=False)
else:
self.detector = cv2.ORB_create(nfeatures=500)
self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)
-
- self.image_sub = self.create_subscription(
- Image, '/uav/camera/forward/image_raw', self.image_callback, 10)
-
- self.camera_info_sub = self.create_subscription(
- CameraInfo, '/uav/camera/forward/camera_info', self.camera_info_callback, 10)
-
- self.pose_pub = self.create_publisher(PoseStamped, '/uav/visual_odometry/pose', 10)
- self.velocity_pub = self.create_publisher(TwistStamped, '/uav/visual_odometry/velocity', 10)
-
- self.timer = self.create_timer(0.033, self.publish_pose)
-
- self.get_logger().info(f'Visual Odometry Node Started - {self.detector_type} detector')
-
- def camera_info_callback(self, msg):
- if self.camera_matrix is None:
- self.camera_matrix = np.array(msg.k).reshape(3, 3)
- self.dist_coeffs = np.array(msg.d)
- self.get_logger().info('Camera calibration received')
-
- def image_callback(self, msg):
- try:
- frame = self.bridge.imgmsg_to_cv2(msg, 'bgr8')
- gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
-
- keypoints, descriptors = self.detector.detectAndCompute(gray, None)
-
- current_time = self.get_clock().now()
-
- if self.prev_frame is not None and len(keypoints) >= self.min_features:
- matches = self.match_features(self.prev_descriptors, descriptors)
-
- if len(matches) >= self.min_features:
- self.estimate_motion(self.prev_keypoints, keypoints, matches, current_time)
-
- self.prev_frame = gray
- self.prev_keypoints = keypoints
- self.prev_descriptors = descriptors
- self.prev_time = current_time
-
- except Exception as e:
- self.get_logger().error(f'Visual odometry error: {e}')
-
- def match_features(self, desc1, desc2):
+
+ self.prev_frame = None
+ self.prev_keypoints = None
+ self.prev_descriptors = None
+ self.prev_time = None
+
+ self.position = np.zeros(3)
+ self.orientation = np.eye(3)
+ self.velocity = np.zeros(3)
+
+ self.on_pose = None
+
+ print(f"[VO] Visual Odometry initialized ({detector_type})")
+
+ def process_frame(self, camera_name, frame):
+ gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
+ keypoints, descriptors = self.detector.detectAndCompute(gray, None)
+ current_time = time.time()
+
+ if (
+ self.prev_frame is not None
+ and self.prev_descriptors is not None
+ and len(keypoints) >= self.min_features
+ ):
+ matches = self._match_features(self.prev_descriptors, descriptors)
+ if len(matches) >= self.min_features:
+ self._estimate_motion(
+ self.prev_keypoints, keypoints, matches, current_time
+ )
+
+ self.prev_frame = gray
+ self.prev_keypoints = keypoints
+ self.prev_descriptors = descriptors
+ self.prev_time = current_time
+
+ def _match_features(self, desc1, desc2):
if desc1 is None or desc2 is None:
return []
-
+
matches = self.matcher.knnMatch(desc1, desc2, k=2)
-
- good_matches = []
- for match_pair in matches:
- if len(match_pair) == 2:
- m, n = match_pair
+ good = []
+ for pair in matches:
+ if len(pair) == 2:
+ m, n = pair
if m.distance < 0.7 * n.distance:
- good_matches.append(m)
-
- return good_matches
-
- def estimate_motion(self, prev_kp, curr_kp, matches, current_time):
- if self.camera_matrix is None or len(matches) < 5:
+ good.append(m)
+ return good
+
+ def _estimate_motion(self, prev_kp, curr_kp, matches, current_time):
+ if len(matches) < 5:
return
-
+
pts1 = np.float32([prev_kp[m.queryIdx].pt for m in matches])
pts2 = np.float32([curr_kp[m.trainIdx].pt for m in matches])
-
+
E, mask = cv2.findEssentialMat(
- pts1, pts2, self.camera_matrix,
- method=cv2.RANSAC, prob=0.999, threshold=1.0)
-
+ pts1, pts2, self.camera_matrix, method=cv2.RANSAC, prob=0.999, threshold=1.0
+ )
+
if E is None:
return
-
+
_, R, t, mask = cv2.recoverPose(E, pts1, pts2, self.camera_matrix, mask=mask)
-
+
scale = 1.0
translation = scale * t.flatten()
-
+
if self.prev_time is not None:
- dt = (current_time - self.prev_time).nanoseconds / 1e9
+ dt = current_time - self.prev_time
if dt > 0:
self.velocity = translation / dt
-
+
self.position += self.orientation @ translation
self.orientation = R @ self.orientation
-
- def publish_pose(self):
- pose_msg = PoseStamped()
- pose_msg.header.stamp = self.get_clock().now().to_msg()
- pose_msg.header.frame_id = 'odom'
-
- pose_msg.pose.position.x = float(self.position[0])
- pose_msg.pose.position.y = float(self.position[1])
- pose_msg.pose.position.z = float(self.position[2])
-
- rotation = Rotation.from_matrix(self.orientation)
- quat = rotation.as_quat()
- pose_msg.pose.orientation.x = quat[0]
- pose_msg.pose.orientation.y = quat[1]
- pose_msg.pose.orientation.z = quat[2]
- pose_msg.pose.orientation.w = quat[3]
-
- self.pose_pub.publish(pose_msg)
-
- vel_msg = TwistStamped()
- vel_msg.header.stamp = self.get_clock().now().to_msg()
- vel_msg.header.frame_id = 'odom'
- vel_msg.twist.linear.x = float(self.velocity[0])
- vel_msg.twist.linear.y = float(self.velocity[1])
- vel_msg.twist.linear.z = float(self.velocity[2])
-
- self.velocity_pub.publish(vel_msg)
+ if self.on_pose:
+ r = Rotation.from_matrix(self.orientation)
+ self.on_pose(self.position.copy(), r.as_quat())
-def main(args=None):
- rclpy.init(args=args)
- node = VisualOdometryNode()
- try:
- rclpy.spin(node)
- except KeyboardInterrupt:
- pass
- finally:
- node.destroy_node()
- rclpy.shutdown()
+ def get_pose(self):
+ r = Rotation.from_matrix(self.orientation)
+ return self.position.copy(), r.as_quat()
-
-if __name__ == '__main__':
- main()
+ def get_velocity(self):
+ return self.velocity.copy()
diff --git a/src/vision/visual_servoing.py b/src/vision/visual_servoing.py
index 5c8a27e..b4c880d 100644
--- a/src/vision/visual_servoing.py
+++ b/src/vision/visual_servoing.py
@@ -1,135 +1,76 @@
#!/usr/bin/env python3
-"""Visual Servoing - Vision-based control for precision positioning."""
+"""
+Visual Servoing - Vision-based control for precision landing on ArUco marker.
+Uses ObjectDetector detections + pymavlink to send velocity commands.
+"""
-import rclpy
-from rclpy.node import Node
-from sensor_msgs.msg import Image
-from geometry_msgs.msg import PoseStamped, TwistStamped, Pose
-from std_msgs.msg import Bool
-from cv_bridge import CvBridge
-import cv2
import numpy as np
-class VisualServoing(Node):
-
- def __init__(self):
- super().__init__('visual_servoing')
-
- self.bridge = CvBridge()
-
- self.declare_parameter('target_marker_id', 0)
- self.declare_parameter('desired_distance', 1.0)
- self.declare_parameter('kp_linear', 0.5)
- self.declare_parameter('kp_angular', 0.3)
- self.declare_parameter('max_velocity', 1.0)
-
- self.target_marker_id = self.get_parameter('target_marker_id').value
- self.desired_distance = self.get_parameter('desired_distance').value
- self.kp_linear = self.get_parameter('kp_linear').value
- self.kp_angular = self.get_parameter('kp_angular').value
- self.max_velocity = self.get_parameter('max_velocity').value
-
+class VisualServoing:
+ def __init__(self, controller, target_marker_id=0, desired_distance=1.0):
+ self.controller = controller
+ self.target_marker_id = target_marker_id
+ self.desired_distance = desired_distance
+
+ self.kp_xy = 0.5
+ self.kp_z = 0.3
+ self.max_velocity = 1.0
+
self.enabled = False
- self.target_pose = None
+ self.target_acquired = False
self.image_center = (320, 240)
-
- self.camera_matrix = np.array([
- [500.0, 0.0, 320.0],
- [0.0, 500.0, 240.0],
- [0.0, 0.0, 1.0]
- ])
- self.dist_coeffs = np.zeros(5)
-
- self.aruco_dict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_50)
- self.aruco_params = cv2.aruco.DetectorParameters()
- self.aruco_detector = cv2.aruco.ArucoDetector(self.aruco_dict, self.aruco_params)
-
- self.image_sub = self.create_subscription(
- Image, '/uav/camera/forward/image_raw', self.image_callback, 10)
-
- self.enable_sub = self.create_subscription(
- Bool, '/visual_servoing/enable', self.enable_callback, 10)
-
- self.target_sub = self.create_subscription(
- Pose, '/visual_servoing/target', self.target_callback, 10)
-
- self.velocity_pub = self.create_publisher(
- TwistStamped, '/uav/visual_servoing/cmd_vel', 10)
-
- self.status_pub = self.create_publisher(
- Bool, '/visual_servoing/target_acquired', 10)
-
- self.get_logger().info('Visual Servoing Node Started')
-
- def enable_callback(self, msg):
- self.enabled = msg.data
- self.get_logger().info(f'Visual servoing {"enabled" if self.enabled else "disabled"}')
-
- def target_callback(self, msg):
- self.target_pose = msg
-
- def image_callback(self, msg):
+
+ self.last_detection = None
+
+ print(f"[VS] Visual Servoing initialized (target marker: {target_marker_id})")
+
+ def enable(self):
+ self.enabled = True
+ print("[VS] Enabled")
+
+ def disable(self):
+ self.enabled = False
+ print("[VS] Disabled")
+
+ def on_detections(self, detections):
if not self.enabled:
return
-
- try:
- frame = self.bridge.imgmsg_to_cv2(msg, 'bgr8')
- self.image_center = (frame.shape[1] // 2, frame.shape[0] // 2)
-
- gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
- corners, ids, _ = self.aruco_detector.detectMarkers(gray)
-
- target_acquired = Bool()
- target_acquired.data = False
-
- if ids is not None and self.target_marker_id in ids:
- idx = np.where(ids == self.target_marker_id)[0][0]
- target_corners = corners[idx]
-
- rvecs, tvecs, _ = cv2.aruco.estimatePoseSingleMarkers(
- [target_corners], 0.15, self.camera_matrix, self.dist_coeffs)
-
- target_pos = tvecs[0][0]
- self.compute_control(target_pos, target_corners)
-
- target_acquired.data = True
-
- self.status_pub.publish(target_acquired)
-
- except Exception as e:
- self.get_logger().error(f'Visual servoing error: {e}')
-
- def compute_control(self, target_pos, corners):
- marker_center = np.mean(corners[0], axis=0)
-
- error_x = self.image_center[0] - marker_center[0]
- error_y = self.image_center[1] - marker_center[1]
- error_z = target_pos[2] - self.desired_distance
-
- vel_msg = TwistStamped()
- vel_msg.header.stamp = self.get_clock().now().to_msg()
- vel_msg.header.frame_id = 'base_link'
-
- vel_msg.twist.linear.x = np.clip(self.kp_linear * error_z, -self.max_velocity, self.max_velocity)
- vel_msg.twist.linear.y = np.clip(self.kp_linear * error_x / 100.0, -self.max_velocity, self.max_velocity)
- vel_msg.twist.linear.z = np.clip(self.kp_linear * error_y / 100.0, -self.max_velocity, self.max_velocity)
- vel_msg.twist.angular.z = np.clip(self.kp_angular * error_x / 100.0, -1.0, 1.0)
-
- self.velocity_pub.publish(vel_msg)
+ target = None
+ for d in detections:
+ if d.get("id") == self.target_marker_id:
+ target = d
+ break
-def main(args=None):
- rclpy.init(args=args)
- node = VisualServoing()
- try:
- rclpy.spin(node)
- except KeyboardInterrupt:
- pass
- finally:
- node.destroy_node()
- rclpy.shutdown()
+ if target is None:
+ self.target_acquired = False
+ return
+ self.target_acquired = True
+ self.last_detection = target
+ self.compute_control(target)
-if __name__ == '__main__':
- main()
+ def compute_control(self, detection):
+ center_px = detection["center_px"]
+ distance = detection["distance"]
+ position = detection["position"]
+
+ error_x = self.image_center[0] - center_px[0]
+ error_y = self.image_center[1] - center_px[1]
+ error_z = distance - self.desired_distance
+
+ vx = np.clip(self.kp_xy * error_y / 100.0, -self.max_velocity, self.max_velocity)
+ vy = np.clip(self.kp_xy * error_x / 100.0, -self.max_velocity, self.max_velocity)
+ vz = np.clip(-self.kp_z * error_z, -self.max_velocity, self.max_velocity)
+
+ self.controller.send_velocity(vx, vy, vz)
+
+ print(
+ f"\r[VS] Target ID:{detection['id']} "
+ f"d:{distance:.2f}m "
+ f"err:({error_x:.0f},{error_y:.0f}) "
+ f"vel:({vx:.2f},{vy:.2f},{vz:.2f})",
+ end="",
+ flush=True,
+ )
diff --git a/worlds/uav_ugv_search.sdf b/worlds/uav_ugv_search.sdf
index 4959792..788889b 100644
--- a/worlds/uav_ugv_search.sdf
+++ b/worlds/uav_ugv_search.sdf
@@ -153,11 +153,12 @@
0 0 0.195 0 0 90
+
true
- 10 5 0 0 0 0
+ 5.0 5.0 0 0 0 0
diff --git a/wsl_env.sh b/wsl_env.sh
deleted file mode 100755
index d111c3e..0000000
--- a/wsl_env.sh
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/bash
-# WSL Environment for UAV-UGV Simulation
-
-if [ -d "/mnt/wslg" ]; then
- export DISPLAY=:0
-else
- export DISPLAY=$(cat /etc/resolv.conf 2>/dev/null | grep nameserver | awk '{print $2}'):0
-fi
-
-export LIBGL_ALWAYS_INDIRECT=0
-export MESA_GL_VERSION_OVERRIDE=3.3
-export MESA_GLSL_VERSION_OVERRIDE=330
-export OGRE_RTT_MODE=Copy