Camera Sim Update

This commit is contained in:
2026-02-12 14:29:32 -05:00
parent 0e427f3597
commit 92da41138b
27 changed files with 1353 additions and 1317 deletions

View File

@@ -1,56 +0,0 @@
#!/bin/bash
# UAV-UGV Simulation - Environment Activation
# Usage: source activate_venv.sh
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
# ROS 2 Setup
if [ -f "/opt/ros/jazzy/setup.bash" ]; then
source /opt/ros/jazzy/setup.bash
ROS_VER="jazzy"
elif [ -f "/opt/ros/humble/setup.bash" ]; then
source /opt/ros/humble/setup.bash
ROS_VER="humble"
else
echo "[WARN] ROS 2 not found in /opt/ros/"
ROS_VER="none"
fi
# ArduPilot Environment
export ARDUPILOT_HOME="$HOME/ardupilot"
export PATH="$ARDUPILOT_HOME/Tools/autotest:$PATH"
export PATH="$HOME/.local/bin:$PATH"
# Deactivate existing venv
if [ -n "$VIRTUAL_ENV" ]; then
deactivate 2>/dev/null || true
fi
# Activate project venv
if [ -f "$SCRIPT_DIR/venv/bin/activate" ]; then
source "$SCRIPT_DIR/venv/bin/activate"
fi
# Gazebo paths (new Gazebo - Ignition/Harmonic)
export GZ_SIM_RESOURCE_PATH="$SCRIPT_DIR/models:$SCRIPT_DIR/worlds:${GZ_SIM_RESOURCE_PATH:-}"
# ArduPilot Gazebo plugin
if [ -d "$HOME/ardupilot_gazebo/build" ]; then
export GZ_SIM_SYSTEM_PLUGIN_PATH="$HOME/ardupilot_gazebo/build:${GZ_SIM_SYSTEM_PLUGIN_PATH:-}"
export GZ_SIM_RESOURCE_PATH="$HOME/ardupilot_gazebo/models:$HOME/ardupilot_gazebo/worlds:$GZ_SIM_RESOURCE_PATH"
fi
# WSL environment
if grep -qEi "(microsoft|wsl)" /proc/version 2>/dev/null; then
if [ -d "/mnt/wslg" ]; then
export DISPLAY=:0
else
export DISPLAY=$(cat /etc/resolv.conf 2>/dev/null | grep nameserver | awk '{print $2}'):0
fi
export LIBGL_ALWAYS_INDIRECT=0
export MESA_GL_VERSION_OVERRIDE=3.3
fi
echo -e "\033[0;32mEnvironment activated (ROS 2 $ROS_VER)\033[0m"
echo ""
echo "Run simulation: bash scripts/run_simulation.sh"

View File

@@ -70,3 +70,9 @@ FS_GCS_ENABLE 0
# ==================== # ====================
LOG_BITMASK 176126 LOG_BITMASK 176126
LOG_DISARMED 0 LOG_DISARMED 0
# ====================
# Landing Speed
# ====================
LAND_SPEED 150 # Final descent cm/s (default 50)
LAND_SPEED_HIGH 250 # Initial descent cm/s (default 0=WPNAV_SPEED_DN)

View File

@@ -1,76 +0,0 @@
# Geofence Configuration
# GPS is ONLY used for safety boundaries, NOT navigation
/**:
ros__parameters:
geofence:
enabled: true
# Fence type
type: "polygon" # Options: "polygon", "circle", "cylinder"
# Polygon fence (GPS coordinates for safety boundary)
polygon_fence:
points:
- latitude: 47.397742
longitude: 8.545594
- latitude: 47.398242
longitude: 8.545594
- latitude: 47.398242
longitude: 8.546094
- latitude: 47.397742
longitude: 8.546094
# Close the polygon automatically
auto_close: true
# Circular fence (alternative to polygon)
circle_fence:
center_latitude: 47.397742
center_longitude: 8.545594
radius_meters: 100
# Cylinder fence (circle + altitude limits)
cylinder_fence:
center_latitude: 47.397742
center_longitude: 8.545594
radius_meters: 100
min_altitude: 0 # meters AGL
max_altitude: 50 # meters AGL
# Altitude limits (applies to all fence types)
altitude_limits:
min_altitude: 0.0 # meters AGL
max_altitude: 50.0 # meters AGL
enable_floor: true
enable_ceiling: true
# Fence actions
actions:
on_breach: "RTL" # Options: "RTL", "LAND", "HOLD", "ALERT_ONLY"
warning_distance: 10.0 # meters from fence
warning_action: "ALERT" # Options: "ALERT", "SLOW_DOWN"
# Monitoring
check_rate: 10 # Hz
consecutive_breaches_required: 3 # Require 3 consecutive breaches before action
# Visualization
publish_markers: true
marker_topic: "/geofence/markers"
marker_color:
safe: {r: 0.0, g: 1.0, b: 0.0, a: 0.5}
warning: {r: 1.0, g: 1.0, b: 0.0, a: 0.7}
breach: {r: 1.0, g: 0.0, b: 0.0, a: 0.9}
# Multi-vehicle support
vehicles:
uav:
enabled: true
namespace: "/uav"
gps_topic: "/uav/mavros/global_position/global"
ugv:
enabled: true
namespace: "/ugv"
gps_topic: "/ugv/gps/fix"

19
config/gui.config Normal file
View File

@@ -0,0 +1,19 @@
<!-- Camera feed plugins (added via --gui-config on top of defaults) -->
<plugin filename="ImageDisplay" name="Gimbal Camera">
<gz-gui>
<title>Gimbal Camera</title>
<property type="string" key="state">docked</property>
</gz-gui>
<topic>/world/uav_ugv_search/model/iris_with_gimbal/model/gimbal/link/pitch_link/sensor/camera/image</topic>
<topic_picker>true</topic_picker>
</plugin>
<plugin filename="ImageDisplay" name="Downward Camera">
<gz-gui>
<title>Downward Camera</title>
<property type="string" key="state">docked</property>
</gz-gui>
<topic>/uav/camera/downward</topic>
<topic_picker>true</topic_picker>
</plugin>

View File

@@ -1,72 +0,0 @@
# MAVROS Configuration for GPS-Denied Navigation
# GPS is DISABLED for navigation, ENABLED only for geofencing
/**:
ros__parameters:
use_sim_time: true
# Connection
fcu_protocol: "v2.0"
# System
system_id: 1
component_id: 1
# Time sync
time:
time_ref_source: "fcu"
timesync_mode: MAVLINK
timesync_avg_alpha: 0.6
# GPS Settings - ONLY for geofencing
gps:
# Disable GPS for navigation
use_gps_for_navigation: false
# Enable GPS only for geofence
enable_gps_geofence: true
# Local position (PRIMARY navigation source)
local_position:
frame_id: "map"
tf:
send: true
frame_id: "map"
child_frame_id: "base_link"
# Use local odometry, NOT GPS
use_vision: true # Will use our visual odometry
# Vision pose input (from visual odometry)
vision_pose:
tf:
listen: false
frame_id: "odom"
child_frame_id: "base_link"
# Vision speed input (from optical flow)
vision_speed:
listen_twist: true
# Setpoint configuration
setpoint_raw:
thrust_scaling: 1.0
setpoint_position:
tf:
listen: false
# Use LOCAL_NED frame (relative positioning)
mav_frame: 1 # MAV_FRAME_LOCAL_NED
setpoint_velocity:
mav_frame: 8 # MAV_FRAME_BODY_NED (body-relative)
# Disable GPS-based modes
# Only allow: MANUAL, STABILIZE, ALT_HOLD, GUIDED (local), LOITER (local)
allowed_modes:
- "MANUAL"
- "STABILIZE"
- "ALT_HOLD"
- "GUIDED"
- "LOITER"
- "RTL" # Returns to LOCAL origin, not GPS home

View File

@@ -0,0 +1,12 @@
name: hover
description: Take off and hold position
altitude: 5.0
duration: 10.0
steps:
- action: takeoff
altitude: 5.0
- action: hover
duration: 10.0
- action: land

View File

@@ -0,0 +1,21 @@
name: search
description: Spiral search pattern to find and land on UGV
altitude: 5.0
search:
pattern: spiral
initial_leg: 3.0
leg_increment: 2.0
max_legs: 12
detection_radius: 2.0
steps:
- action: takeoff
altitude: 5.0
- action: search_spiral
initial_leg: 3.0
leg_increment: 2.0
max_legs: 12
- action: land_on_ugv
detection_radius: 2.0

View File

@@ -0,0 +1,26 @@
name: square
description: Fly a square pattern
altitude: 5.0
side_length: 5.0
steps:
- action: takeoff
altitude: 5.0
- action: move_rel
x: 5.0
y: 0.0
z: 0.0
- action: move_rel
x: 0.0
y: 5.0
z: 0.0
- action: move_rel
x: -5.0
y: 0.0
z: 0.0
- action: move_rel
x: 0.0
y: -5.0
z: 0.0
- action: land

36
config/properties.yaml Normal file
View File

@@ -0,0 +1,36 @@
simulation:
world: uav_ugv_search.sdf
software_render: auto
physics:
max_step_size: 0.002
real_time_factor: 1.0
sensor_noise:
enabled: true
position_stddev: 0.05
velocity_stddev: 0.1
gyro_stddev: 0.0002
accel_stddev: 0.017
gps:
enabled: true
update_rate: 5
noise_stddev: 2.0
purpose: geofence_only
initial_positions:
uav:
x: 0.0
y: 0.0
z: 0.195
yaw: 90.0
ugv:
x: 5.0
y: 5.0
z: 0.0
yaw: 0.0
logging:
enabled: true
log_directory: logs

View File

@@ -1,86 +0,0 @@
# General Simulation Configuration
/**:
ros__parameters:
simulation:
# Use Gazebo ground truth for "perfect" position estimation
# In real world, this would come from visual odometry
use_gazebo_ground_truth: true # Set false for realistic VO errors
# Add noise to simulate real sensors
add_sensor_noise: true
# Noise parameters
noise:
position_stddev: 0.05 # meters
velocity_stddev: 0.1 # m/s
imu_gyro_stddev: 0.0002 # rad/s
imu_accel_stddev: 0.017 # m/s^2
# GPS simulation (for geofence only)
gps:
enabled: true # Only for geofencing
update_rate: 5 # Hz
noise_stddev: 2.0 # meters (realistic GPS error)
# Real-time factor (1.0 = real-time)
real_time_factor: 1.0
# Physics settings
physics:
engine: "ode"
max_step_size: 0.001
real_time_update_rate: 1000
# World configuration
world:
name: "empty_custom"
gravity: 9.81
# Lighting (affects vision)
lighting:
ambient_level: 0.4
shadows: true
sun_angle: 45.0 # degrees
# Ground plane
ground:
size: [100.0, 100.0] # meters
texture: "ground_plane"
friction: 1.0
# Initial positions (LOCAL coordinates)
initial_positions:
uav:
x: 0.0
y: 0.0
z: 0.2 # Start slightly above ground
roll: 0.0
pitch: 0.0
yaw: 0.0
ugv:
x: 5.0
y: 0.0
z: 0.0
roll: 0.0
pitch: 0.0
yaw: 1.57 # 90 degrees (facing +Y)
# Visualization
visualization:
show_trajectory: true
trajectory_length: 1000 # points
show_local_frame: true
show_velocity_vector: true
# Data logging
logging:
enabled: true
log_directory: "~/.ros/uav_ugv_logs"
log_topics:
- "/uav/visual_odometry/pose"
- "/uav/mavros/local_position/pose"
- "/geofence/status"
bag_recording: false

74
config/uav.yaml Normal file
View File

@@ -0,0 +1,74 @@
connection:
sim: "tcp:127.0.0.1:5760"
real: "/dev/ttyAMA0"
baud: 57600
flight:
takeoff_altitude: 5.0
max_altitude: 15.0
min_altitude: 3.0
max_velocity: 2.0
max_acceleration: 1.0
max_climb_rate: 1.0
max_yaw_rate: 45.0
navigation:
frame: LOCAL_NED
waypoint_radius: 0.5
position_hold_radius: 0.2
home_mode: local
vision:
forward_camera:
enabled: true
frame_rate: 30
resolution: [640, 480]
downward_camera:
enabled: true
frame_rate: 30
resolution: [320, 240]
visual_odometry:
enabled: true
method: ORB
min_features: 100
max_features: 500
optical_flow:
enabled: true
method: Lucas-Kanade
window_size: 15
min_altitude: 0.3
max_altitude: 10.0
landmark_detection:
enabled: true
method: ArUco
marker_size: 0.15
position_estimation:
method: EKF
update_rate: 50
weights:
visual_odometry: 0.6
optical_flow: 0.3
imu: 0.1
process_noise:
position: 0.1
velocity: 0.5
measurement_noise:
visual_odom: 0.05
optical_flow: 0.1
imu: 0.2
obstacle_avoidance:
enabled: true
detection_range: 5.0
safety_margin: 1.0
safety:
geofence:
enabled: true
action_on_breach: RTL
max_altitude: 10
radius: 20
failsafe:
vision_loss_timeout: 5.0
action_on_vision_loss: HOLD

View File

@@ -1,105 +0,0 @@
# UAV Configuration - GPS-Denied Navigation
/**:
ros__parameters:
# Vision parameters for navigation
vision:
# Forward camera (for visual odometry)
forward_camera:
enabled: true
topic: "/uav/camera/forward/image_raw"
info_topic: "/uav/camera/forward/camera_info"
frame_rate: 30
# Downward camera (for optical flow)
downward_camera:
enabled: true
topic: "/uav/camera/downward/image_raw"
info_topic: "/uav/camera/downward/camera_info"
frame_rate: 30
# Visual odometry settings
visual_odometry:
enabled: true
method: "ORB" # Options: ORB, SIFT, SURF
min_features: 100
max_features: 500
feature_quality: 0.01
min_distance: 10
# Optical flow settings
optical_flow:
enabled: true
method: "Lucas-Kanade"
window_size: 15
max_level: 3
min_altitude: 0.3 # meters
max_altitude: 10.0 # meters
# Landmark detection
landmarks:
enabled: true
detection_method: "ArUco" # Options: ArUco, AprilTag, ORB
marker_size: 0.15 # meters
# Position estimation (sensor fusion)
position_estimator:
fusion_method: "EKF" # Extended Kalman Filter
# Sensor weights (trust levels)
weights:
visual_odometry: 0.6
optical_flow: 0.3
imu: 0.1
# Update rates
update_rate: 50 # Hz
# Covariance matrices
process_noise:
position: 0.1
velocity: 0.5
measurement_noise:
visual_odom: 0.05
optical_flow: 0.1
imu: 0.2
# Navigation parameters (RELATIVE coordinates only)
navigation:
frame: "LOCAL_NED" # Never use GPS frame
# Control parameters
max_velocity: 2.0 # m/s
max_acceleration: 1.0 # m/s^2
max_climb_rate: 1.0 # m/s
# Waypoint following
waypoint_radius: 0.5 # meters
position_hold_radius: 0.2 # meters
# Obstacle avoidance (vision-based)
obstacle_avoidance:
enabled: true
detection_range: 5.0 # meters
safety_margin: 1.0 # meters
# Mission parameters
mission:
takeoff_altitude: 5.0 # meters (relative)
loiter_radius: 2.0 # meters
rtl_altitude: 10.0 # meters (relative)
# Home position is LOCAL (0,0,0), not GPS
home_mode: "local" # Never "gps"
# Safety (geofencing uses GPS, but navigation doesn't)
safety:
geofence:
enabled: true
use_gps: true # ONLY for geofence
action_on_breach: "RTL" # Return to LOCAL origin
failsafe:
vision_loss_timeout: 5.0 # seconds
action_on_vision_loss: "HOLD" # or "RTL" or "LAND"

46
config/ugv.yaml Normal file
View File

@@ -0,0 +1,46 @@
vehicle:
wheelbase: 0.3
track_width: 0.25
wheel_radius: 0.05
connection:
sim: null
real: null
position:
x: 5.0
y: 5.0
z: 0.0
yaw: 0.0
navigation:
max_linear_velocity: 1.0
max_angular_velocity: 1.5
linear_acceleration: 0.5
angular_acceleration: 1.0
waypoint_radius: 0.3
position_tolerance: 0.3
landing_pad:
marker_type: ArUco
marker_id: 0
marker_size: 0.3
pad_diameter: 1.0
color: [255, 255, 0]
vision:
detection_method: ArUco
camera:
enabled: true
frame_rate: 30
resolution: [320, 240]
safety:
geofence:
enabled: true
action_on_breach: STOP
failsafe:
vision_loss_timeout: 3.0
action_on_vision_loss: STOP
collision_distance: 0.2
action_on_collision: STOP

View File

@@ -1,110 +0,0 @@
# UGV Configuration - GPS-Denied Navigation
/**:
ros__parameters:
# Vehicle physical parameters
vehicle:
wheelbase: 0.3 # meters
track_width: 0.25 # meters
wheel_radius: 0.05 # meters
max_speed: 1.0 # m/s
max_turn_rate: 1.5 # rad/s
# Vision parameters for navigation
vision:
# Forward camera (for visual odometry and obstacle detection)
forward_camera:
enabled: true
topic: "/ugv/camera/forward/image_raw"
info_topic: "/ugv/camera/forward/camera_info"
frame_rate: 30
fov_horizontal: 90 # degrees
fov_vertical: 60 # degrees
# Visual odometry settings
visual_odometry:
enabled: true
method: "ORB" # Options: ORB, SIFT
min_features: 150
max_features: 600
feature_quality: 0.02
min_distance: 8
# Wheel odometry (primary odometry for ground vehicle)
wheel_odometry:
enabled: true
topic: "/ugv/odom"
covariance_scale: 0.05
# Obstacle detection
obstacle_detection:
enabled: true
method: "depth_camera" # or "stereo"
min_distance: 0.3 # meters
max_distance: 10.0 # meters
# Position estimation (sensor fusion)
position_estimator:
fusion_method: "EKF"
# Sensor weights (trust levels)
weights:
visual_odometry: 0.3
wheel_odometry: 0.5
imu: 0.2
# Update rates
update_rate: 50 # Hz
# Covariance matrices
process_noise:
position: 0.05
velocity: 0.2
heading: 0.1
measurement_noise:
visual_odom: 0.08
wheel_odom: 0.03
imu: 0.15
# Navigation parameters (RELATIVE coordinates only)
navigation:
frame: "LOCAL_NED" # Never use GPS frame
# Control parameters
max_linear_velocity: 1.0 # m/s
max_angular_velocity: 1.5 # rad/s
linear_acceleration: 0.5 # m/s^2
angular_acceleration: 1.0 # rad/s^2
# Path following
lookahead_distance: 0.5 # meters
waypoint_radius: 0.3 # meters
# Obstacle avoidance
obstacle_avoidance:
enabled: true
detection_range: 3.0 # meters
safety_margin: 0.5 # meters
avoidance_method: "VFH" # Vector Field Histogram
# Mission parameters
mission:
default_speed: 0.5 # m/s
turn_in_place_threshold: 0.5 # radians
# Home position is LOCAL (0,0,0), not GPS
home_mode: "local"
# Safety
safety:
geofence:
enabled: true
use_gps: true # ONLY for geofence
action_on_breach: "STOP"
failsafe:
vision_loss_timeout: 3.0 # seconds
action_on_vision_loss: "STOP"
collision_distance: 0.2 # meters
action_on_collision: "STOP"

View File

@@ -0,0 +1,45 @@
<?xml version="1.0"?>
<model>
<name>Iris with Gimbal</name>
<version>2.0</version>
<sdf version="1.9">model.sdf</sdf>
<author>
<name>Fadri Furrer</name>
<email>fadri.furrer@mavt.ethz.ch</email>
</author>
<author>
<name>Michael Burri</name>
</author>
<author>
<name>Mina Kamel</name>
</author>
<author>
<name>Janosch Nikolic</name>
</author>
<author>
<name>Markus Achtelik</name>
</author>
<author>
<name>Rhys Mainwaring</name>
</author>
<description>
Starting with iris_with_standoffs
add LiftDragPlugin
add ArduCopterPlugin
add gimbal_small_2d
</description>
<depend>
<model>
<uri>model://gimbal_small_2d</uri>
<version>2.0</version>
</model>
<model>
<uri>model://iris_with_standoffs</uri>
<version>2.0</version>
</model>
</depend>
</model>

383
models/iris_with_gimbal/model.sdf Executable file
View File

@@ -0,0 +1,383 @@
<?xml version='1.0'?>
<sdf version="1.9">
<model name="iris_with_gimbal">
<include>
<uri>model://iris_with_standoffs</uri>
</include>
<include>
<uri>model://gimbal_small_3d</uri>
<name>gimbal</name>
<pose degrees="true">0 -0.01 -0.124923 90 0 90</pose>
</include>
<joint name="gimbal_joint" type="revolute">
<parent>iris_with_standoffs::base_link</parent>
<child>gimbal::base_link</child>
<axis>
<limit>
<lower>0</lower>
<upper>0</upper>
</limit>
<xyz>0 0 1</xyz>
</axis>
</joint>
<!-- Downward-facing camera for ArUco / landing pad detection -->
<link name="downward_cam_link">
<pose>0 0 -0.05 0 0 0</pose>
<inertial>
<mass>0.01</mass>
<inertia>
<ixx>0.00001</ixx>
<ixy>0</ixy>
<ixz>0</ixz>
<iyy>0.00001</iyy>
<iyz>0</iyz>
<izz>0.00001</izz>
</inertia>
</inertial>
<sensor name="downward_camera" type="camera">
<pose>0 0 0 0 1.5708 0</pose>
<always_on>1</always_on>
<update_rate>10</update_rate>
<visualize>1</visualize>
<topic>/uav/camera/downward</topic>
<camera>
<horizontal_fov>1.3962634</horizontal_fov>

<clip>
<near>0.1</near>
<far>100</far>
</clip>
</camera>
</sensor>
</link>
<joint name="downward_cam_joint" type="fixed">
<parent>iris_with_standoffs::base_link</parent>
<child>downward_cam_link</child>
</joint>
<!-- plugins -->
<plugin filename="gz-sim-joint-state-publisher-system"
name="gz::sim::systems::JointStatePublisher">
</plugin>
<plugin filename="gz-sim-lift-drag-system"
name="gz::sim::systems::LiftDrag">
<a0>0.3</a0>
<alpha_stall>1.4</alpha_stall>
<cla>4.2500</cla>
<cda>0.10</cda>
<cma>0.0</cma>
<cla_stall>-0.025</cla_stall>
<cda_stall>0.0</cda_stall>
<cma_stall>0.0</cma_stall>
<area>0.002</area>
<air_density>1.2041</air_density>
<cp>0.084 0 0</cp>
<forward>0 1 0</forward>
<upward>0 0 1</upward>
<link_name>iris_with_standoffs::rotor_0</link_name>
</plugin>
<plugin filename="gz-sim-lift-drag-system"
name="gz::sim::systems::LiftDrag">
<a0>0.3</a0>
<alpha_stall>1.4</alpha_stall>
<cla>4.2500</cla>
<cda>0.10</cda>
<cma>0.0</cma>
<cla_stall>-0.025</cla_stall>
<cda_stall>0.0</cda_stall>
<cma_stall>0.0</cma_stall>
<area>0.002</area>
<air_density>1.2041</air_density>
<cp>-0.084 0 0</cp>
<forward>0 -1 0</forward>
<upward>0 0 1</upward>
<link_name>iris_with_standoffs::rotor_0</link_name>
</plugin>
<plugin filename="gz-sim-lift-drag-system"
name="gz::sim::systems::LiftDrag">
<a0>0.3</a0>
<alpha_stall>1.4</alpha_stall>
<cla>4.2500</cla>
<cda>0.10</cda>
<cma>0.0</cma>
<cla_stall>-0.025</cla_stall>
<cda_stall>0.0</cda_stall>
<cma_stall>0.0</cma_stall>
<area>0.002</area>
<air_density>1.2041</air_density>
<cp>0.084 0 0</cp>
<forward>0 1 0</forward>
<upward>0 0 1</upward>
<link_name>iris_with_standoffs::rotor_1</link_name>
</plugin>
<plugin filename="gz-sim-lift-drag-system"
name="gz::sim::systems::LiftDrag">
<a0>0.3</a0>
<alpha_stall>1.4</alpha_stall>
<cla>4.2500</cla>
<cda>0.10</cda>
<cma>0.0</cma>
<cla_stall>-0.025</cla_stall>
<cda_stall>0.0</cda_stall>
<cma_stall>0.0</cma_stall>
<area>0.002</area>
<air_density>1.2041</air_density>
<cp>-0.084 0 0</cp>
<forward>0 -1 0</forward>
<upward>0 0 1</upward>
<link_name>iris_with_standoffs::rotor_1</link_name>
</plugin>
<plugin filename="gz-sim-lift-drag-system"
name="gz::sim::systems::LiftDrag">
<a0>0.3</a0>
<alpha_stall>1.4</alpha_stall>
<cla>4.2500</cla>
<cda>0.10</cda>
<cma>0.0</cma>
<cla_stall>-0.025</cla_stall>
<cda_stall>0.0</cda_stall>
<cma_stall>0.0</cma_stall>
<area>0.002</area>
<air_density>1.2041</air_density>
<cp>0.084 0 0</cp>
<forward>0 -1 0</forward>
<upward>0 0 1</upward>
<link_name>iris_with_standoffs::rotor_2</link_name>
</plugin>
<plugin filename="gz-sim-lift-drag-system"
name="gz::sim::systems::LiftDrag">
<a0>0.3</a0>
<alpha_stall>1.4</alpha_stall>
<cla>4.2500</cla>
<cda>0.10</cda>
<cma>0.0</cma>
<cla_stall>-0.025</cla_stall>
<cda_stall>0.0</cda_stall>
<cma_stall>0.0</cma_stall>
<area>0.002</area>
<air_density>1.2041</air_density>
<cp>-0.084 0 0</cp>
<forward>0 1 0</forward>
<upward>0 0 1</upward>
<link_name>iris_with_standoffs::rotor_2</link_name>
</plugin>
<plugin filename="gz-sim-lift-drag-system"
name="gz::sim::systems::LiftDrag">
<a0>0.3</a0>
<alpha_stall>1.4</alpha_stall>
<cla>4.2500</cla>
<cda>0.10</cda>
<cma>0.0</cma>
<cla_stall>-0.025</cla_stall>
<cda_stall>0.0</cda_stall>
<cma_stall>0.0</cma_stall>
<area>0.002</area>
<air_density>1.2041</air_density>
<cp>0.084 0 0</cp>
<forward>0 -1 0</forward>
<upward>0 0 1</upward>
<link_name>iris_with_standoffs::rotor_3</link_name>
</plugin>
<plugin filename="gz-sim-lift-drag-system"
name="gz::sim::systems::LiftDrag">
<a0>0.3</a0>
<alpha_stall>1.4</alpha_stall>
<cla>4.2500</cla>
<cda>0.10</cda>
<cma>0.0</cma>
<cla_stall>-0.025</cla_stall>
<cda_stall>0.0</cda_stall>
<cma_stall>0.0</cma_stall>
<area>0.002</area>
<air_density>1.2041</air_density>
<cp>-0.084 0 0</cp>
<forward>0 1 0</forward>
<upward>0 0 1</upward>
<link_name>iris_with_standoffs::rotor_3</link_name>
</plugin>
<plugin filename="gz-sim-apply-joint-force-system"
name="gz::sim::systems::ApplyJointForce">
<joint_name>iris_with_standoffs::rotor_0_joint</joint_name>
</plugin>
<plugin filename="gz-sim-apply-joint-force-system"
name="gz::sim::systems::ApplyJointForce">
<joint_name>iris_with_standoffs::rotor_1_joint</joint_name>
</plugin>
<plugin filename="gz-sim-apply-joint-force-system"
name="gz::sim::systems::ApplyJointForce">
<joint_name>iris_with_standoffs::rotor_2_joint</joint_name>
</plugin>
<plugin filename="gz-sim-apply-joint-force-system"
name="gz::sim::systems::ApplyJointForce">
<joint_name>iris_with_standoffs::rotor_3_joint</joint_name>
</plugin>
<plugin name="ArduPilotPlugin"
filename="ArduPilotPlugin">
<!-- Port settings -->
<fdm_addr>127.0.0.1</fdm_addr>
<fdm_port_in>9002</fdm_port_in>
<connectionTimeoutMaxCount>5</connectionTimeoutMaxCount>
<lock_step>1</lock_step>
<!-- Frame conventions
Require by ArduPilot: change model and gazebo from XYZ to XY-Z coordinates
-->
<modelXYZToAirplaneXForwardZDown degrees="true">0 0 0 180 0 0</modelXYZToAirplaneXForwardZDown>
<gazeboXYZToNED degrees="true">0 0 0 180 0 90</gazeboXYZToNED>
<!-- Sensors -->
<imuName>iris_with_standoffs::imu_link::imu_sensor</imuName>
<!--
incoming control command [0, 1]
so offset it by 0 to get [0, 1]
and divide max target by 1.
offset = 0
multiplier = 838 max rpm / 1 = 838
-->
<control channel="0">
<jointName>iris_with_standoffs::rotor_0_joint</jointName>
<useForce>1</useForce>
<multiplier>838</multiplier>
<offset>0</offset>
<servo_min>1100</servo_min>
<servo_max>1900</servo_max>
<type>VELOCITY</type>
<p_gain>0.20</p_gain>
<i_gain>0</i_gain>
<d_gain>0</d_gain>
<i_max>0</i_max>
<i_min>0</i_min>
<cmd_max>2.5</cmd_max>
<cmd_min>-2.5</cmd_min>
<controlVelocitySlowdownSim>1</controlVelocitySlowdownSim>
</control>
<control channel="1">
<jointName>iris_with_standoffs::rotor_1_joint</jointName>
<useForce>1</useForce>
<multiplier>838</multiplier>
<offset>0</offset>
<servo_min>1100</servo_min>
<servo_max>1900</servo_max>
<type>VELOCITY</type>
<p_gain>0.20</p_gain>
<i_gain>0</i_gain>
<d_gain>0</d_gain>
<i_max>0</i_max>
<i_min>0</i_min>
<cmd_max>2.5</cmd_max>
<cmd_min>-2.5</cmd_min>
<controlVelocitySlowdownSim>1</controlVelocitySlowdownSim>
</control>
<control channel="2">
<jointName>iris_with_standoffs::rotor_2_joint</jointName>
<useForce>1</useForce>
<multiplier>-838</multiplier>
<offset>0</offset>
<servo_min>1100</servo_min>
<servo_max>1900</servo_max>
<type>VELOCITY</type>
<p_gain>0.20</p_gain>
<i_gain>0</i_gain>
<d_gain>0</d_gain>
<i_max>0</i_max>
<i_min>0</i_min>
<cmd_max>2.5</cmd_max>
<cmd_min>-2.5</cmd_min>
<controlVelocitySlowdownSim>1</controlVelocitySlowdownSim>
</control>
<control channel="3">
<jointName>iris_with_standoffs::rotor_3_joint</jointName>
<useForce>1</useForce>
<multiplier>-838</multiplier>
<offset>0</offset>
<servo_min>1100</servo_min>
<servo_max>1900</servo_max>
<type>VELOCITY</type>
<p_gain>0.20</p_gain>
<i_gain>0</i_gain>
<d_gain>0</d_gain>
<i_max>0</i_max>
<i_min>0</i_min>
<cmd_max>2.5</cmd_max>
<cmd_min>-2.5</cmd_min>
<controlVelocitySlowdownSim>1</controlVelocitySlowdownSim>
</control>
<!-- roll range is -30 to +30 deg -->
<control channel="8">
<jointName>gimbal::roll_joint</jointName>
<multiplier>1.047197551196</multiplier>
<offset>-0.5</offset>
<servo_min>1100</servo_min>
<servo_max>1900</servo_max>
<type>COMMAND</type>
<cmd_topic>/gimbal/cmd_roll</cmd_topic>
<p_gain>2</p_gain>
</control>
<!-- pitch range is -135 to +45 deg -->
<control channel="9">
<jointName>gimbal::pitch_joint</jointName>
<multiplier>-3.14159265</multiplier>
<offset>-0.75</offset>
<servo_min>1100</servo_min>
<servo_max>1900</servo_max>
<type>COMMAND</type>
<cmd_topic>/gimbal/cmd_pitch</cmd_topic>
<p_gain>2</p_gain>
</control>
<!-- yaw range is -160 to +160 deg -->
<control channel="10">
<jointName>gimbal::yaw_joint</jointName>
<multiplier>-5.5850536</multiplier>
<offset>-0.5</offset>
<servo_min>1100</servo_min>
<servo_max>1900</servo_max>
<type>COMMAND</type>
<cmd_topic>/gimbal/cmd_yaw</cmd_topic>
<p_gain>2</p_gain>
</control>
</plugin>
<plugin
filename="gz-sim-joint-position-controller-system"
name="gz::sim::systems::JointPositionController">
<joint_name>gimbal::roll_joint</joint_name>
<topic>/gimbal/cmd_roll</topic>
<p_gain>2</p_gain>
</plugin>
<plugin
filename="gz-sim-joint-position-controller-system"
name="gz::sim::systems::JointPositionController">
<joint_name>gimbal::pitch_joint</joint_name>
<topic>/gimbal/cmd_pitch</topic>
<p_gain>2</p_gain>
</plugin>
<plugin
filename="gz-sim-joint-position-controller-system"
name="gz::sim::systems::JointPositionController">
<joint_name>gimbal::yaw_joint</joint_name>
<topic>/gimbal/cmd_yaw</topic>
<p_gain>2</p_gain>
</plugin>
</model>
</sdf>

View File

@@ -18,8 +18,8 @@ print_error() { echo -e "${RED}[ERROR]${NC} $1"; }
SOFTWARE_RENDER=auto SOFTWARE_RENDER=auto
WORLD="uav_ugv_search.sdf" WORLD="uav_ugv_search.sdf"
MISSION="hover" MISSION="hover"
ALTITUDE=5.0 ALTITUDE=""
DURATION=30.0 DURATION=""
while [[ $# -gt 0 ]]; do while [[ $# -gt 0 ]]; do
case $1 in case $1 in
@@ -35,8 +35,8 @@ while [[ $# -gt 0 ]]; do
echo " --no-software-render Disable software rendering" echo " --no-software-render Disable software rendering"
echo " --world <file> World file (default: uav_ugv_search.sdf)" echo " --world <file> World file (default: uav_ugv_search.sdf)"
echo " --mission <type> hover, square, search (default: hover)" echo " --mission <type> hover, square, search (default: hover)"
echo " --altitude <m> Takeoff altitude (default: 5.0)" echo " --altitude <m> Override altitude from config"
echo " --duration <s> Hover duration (default: 30.0)" echo " --duration <s> Override duration from config"
exit 0 exit 0
;; ;;
*) shift ;; *) shift ;;
@@ -45,6 +45,8 @@ done
cleanup_all() { cleanup_all() {
print_info "Cleaning up ..." print_info "Cleaning up ..."
pkill -f "camera_viewer.py" 2>/dev/null || true
pkill -f "camera_processor" 2>/dev/null || true
pkill -f "gz sim" 2>/dev/null || true pkill -f "gz sim" 2>/dev/null || true
pkill -f "arducopter" 2>/dev/null || true pkill -f "arducopter" 2>/dev/null || true
pkill -f "sim_vehicle.py" 2>/dev/null || true pkill -f "sim_vehicle.py" 2>/dev/null || true
@@ -64,6 +66,13 @@ export PATH=$PATH:$HOME/ardupilot/Tools/autotest:$HOME/.local/bin
export GZ_SIM_RESOURCE_PATH="$PROJECT_DIR/models:$PROJECT_DIR/worlds:$HOME/ardupilot_gazebo/models:$HOME/ardupilot_gazebo/worlds" export GZ_SIM_RESOURCE_PATH="$PROJECT_DIR/models:$PROJECT_DIR/worlds:$HOME/ardupilot_gazebo/models:$HOME/ardupilot_gazebo/worlds"
export GZ_SIM_SYSTEM_PLUGIN_PATH=$HOME/ardupilot_gazebo/build export GZ_SIM_SYSTEM_PLUGIN_PATH=$HOME/ardupilot_gazebo/build
print_info "Model path: $GZ_SIM_RESOURCE_PATH"
if [ -f "$PROJECT_DIR/models/iris_with_gimbal/model.sdf" ]; then
print_info "Using LOCAL iris_with_gimbal (with downward camera)"
else
print_info "Using ardupilot_gazebo iris_with_gimbal (NO downward camera)"
fi
if [ "$SOFTWARE_RENDER" = "auto" ]; then if [ "$SOFTWARE_RENDER" = "auto" ]; then
if grep -qi microsoft /proc/version 2>/dev/null; then if grep -qi microsoft /proc/version 2>/dev/null; then
print_info "WSL detected -> software rendering" print_info "WSL detected -> software rendering"
@@ -93,15 +102,44 @@ else
exit 1 exit 1
fi fi
UGV_CONFIG="$PROJECT_DIR/config/ugv.yaml"
if [ -f "$UGV_CONFIG" ] && [ -f "$WORLD_FILE" ]; then
python3 -c "
import yaml, re
cfg = yaml.safe_load(open('$UGV_CONFIG'))
pos = cfg.get('position', {})
x, y = pos.get('x', 5.0), pos.get('y', 5.0)
with open('$WORLD_FILE', 'r') as f:
sdf = f.read()
sdf = re.sub(
r'(<model name=\"ugv_target\">\s*<static>true</static>\s*)<pose>[^<]*</pose>',
rf'\1<pose>{x} {y} 0 0 0 0</pose>',
sdf, count=1)
with open('$WORLD_FILE', 'w') as f:
f.write(sdf)
print(f'[INFO] UGV position synced from config: ({x}, {y})')
" 2>/dev/null || print_info "UGV position sync skipped"
fi
print_info "===================================" print_info "==================================="
print_info " UAV-UGV Simulation" print_info " UAV-UGV Simulation"
print_info "===================================" print_info "==================================="
print_info "World: $WORLD_FILE" print_info "World: $WORLD_FILE"
print_info "Mission: $MISSION" print_info "Mission: $MISSION"
print_info "Altitude: ${ALTITUDE}m"
echo "" echo ""
print_info "[1/3] Starting Gazebo ..." GZ_DEFAULT_GUI="/usr/share/gz/gz-sim8/gui/gui.config"
GZ_USER_GUI="$HOME/.gz/sim/8/gui.config"
CAMERA_PLUGINS="$PROJECT_DIR/config/gui.config"
rm -rf /tmp/gz-* /tmp/gazebo-* 2>/dev/null
mkdir -p "$HOME/.gz/sim/8"
cp "$GZ_DEFAULT_GUI" "$GZ_USER_GUI"
sed -i 's|<start_paused>true</start_paused>|<start_paused>false</start_paused>|' "$GZ_USER_GUI"
sed -i 's|show_again="true"|show_again="false"|' "$GZ_USER_GUI"
print_info "[1/4] Starting Gazebo ..."
gz sim -v4 -r "$WORLD_FILE" & gz sim -v4 -r "$WORLD_FILE" &
GZ_PID=$! GZ_PID=$!
sleep 10 sleep 10
@@ -112,7 +150,7 @@ if ! kill -0 $GZ_PID 2>/dev/null; then
fi fi
print_success "Gazebo running (PID: $GZ_PID)" print_success "Gazebo running (PID: $GZ_PID)"
print_info "[2/3] Starting ArduPilot SITL ..." print_info "[2/4] Starting ArduPilot SITL ..."
cd ~/ardupilot cd ~/ardupilot
SITL_ARGS="-v ArduCopter -f gazebo-iris --model JSON -I0" SITL_ARGS="-v ArduCopter -f gazebo-iris --model JSON -I0"
@@ -136,19 +174,23 @@ if ! pgrep -f "arducopter" > /dev/null 2>&1; then
fi fi
print_success "ArduPilot SITL running (TCP 5760)" print_success "ArduPilot SITL running (TCP 5760)"
print_info "[3/3] Starting main.py ..." print_info "[3/4] Starting main.py ..."
cd "$PROJECT_DIR" cd "$PROJECT_DIR"
sleep 3 sleep 3
python3 src/main.py \ MAIN_ARGS="--device sim --connection tcp:127.0.0.1:5760 --mission $MISSION"
--device sim \ [ -n "$ALTITUDE" ] && MAIN_ARGS="$MAIN_ARGS --altitude $ALTITUDE"
--connection "tcp:127.0.0.1:5760" \ [ -n "$DURATION" ] && MAIN_ARGS="$MAIN_ARGS --duration $DURATION"
--mission "$MISSION" \
--altitude "$ALTITUDE" \ python3 src/main.py $MAIN_ARGS &
--duration "$DURATION" &
MAIN_PID=$! MAIN_PID=$!
print_success "main.py running (PID: $MAIN_PID)" print_success "main.py running (PID: $MAIN_PID)"
print_info "[4/4] Starting camera viewer ..."
python3 -W ignore:RuntimeWarning -m src.vision.camera_processor down &
CAM_PID=$!
print_success "Camera viewer running (PID: $CAM_PID)"
print_info "" print_info ""
print_info "===================================" print_info "==================================="
print_info " Simulation Running" print_info " Simulation Running"
@@ -156,8 +198,10 @@ print_info "==================================="
print_info " Gazebo -> ArduPilot SITL (TCP:5760)" print_info " Gazebo -> ArduPilot SITL (TCP:5760)"
print_info " |" print_info " |"
print_info " main.py (pymavlink)" print_info " main.py (pymavlink)"
print_info " |"
print_info " camera_viewer.py (OpenCV)"
print_info "" print_info ""
print_info " Mission: $MISSION Alt: ${ALTITUDE}m" print_info " Mission: $MISSION (config from YAML)"
print_info " Press Ctrl+C to stop" print_info " Press Ctrl+C to stop"
print_info "===================================" print_info "==================================="

View File

@@ -247,6 +247,11 @@ if sudo apt-get install -y gz-harmonic 2>/dev/null; then
libgz-msgs10-dev \ libgz-msgs10-dev \
rapidjson-dev \ rapidjson-dev \
2>/dev/null || print_warning "Some Gazebo dev packages may be missing" 2>/dev/null || print_warning "Some Gazebo dev packages may be missing"
# Install Gazebo Python bindings (needed for camera/vision scripts)
sudo apt-get install -y \
python3-gz-transport13 \
python3-gz-msgs10 \
2>/dev/null || print_warning "Gazebo Python bindings not available via apt"
elif sudo apt-get install -y gz-garden 2>/dev/null; then elif sudo apt-get install -y gz-garden 2>/dev/null; then
GZ_VERSION="garden" GZ_VERSION="garden"
# Install Garden development packages # Install Garden development packages
@@ -262,6 +267,11 @@ elif sudo apt-get install -y gz-garden 2>/dev/null; then
libgz-msgs9-dev \ libgz-msgs9-dev \
rapidjson-dev \ rapidjson-dev \
2>/dev/null || print_warning "Some Gazebo dev packages may be missing" 2>/dev/null || print_warning "Some Gazebo dev packages may be missing"
# Install Gazebo Python bindings for Garden
sudo apt-get install -y \
python3-gz-transport12 \
python3-gz-msgs9 \
2>/dev/null || print_warning "Gazebo Python bindings not available via apt"
else else
print_warning "Could not install Gazebo Harmonic/Garden" print_warning "Could not install Gazebo Harmonic/Garden"
GZ_VERSION="none" GZ_VERSION="none"
@@ -283,7 +293,10 @@ if [ -d "$SCRIPT_DIR/venv" ]; then
rm -rf "$SCRIPT_DIR/venv" rm -rf "$SCRIPT_DIR/venv"
fi fi
python3 -m venv "$SCRIPT_DIR/venv" # --system-site-packages is required so the venv can access
# Gazebo Python bindings (gz.transport13, gz.msgs10) which are
# installed as system packages and cannot be pip-installed.
python3 -m venv --system-site-packages "$SCRIPT_DIR/venv"
source "$SCRIPT_DIR/venv/bin/activate" source "$SCRIPT_DIR/venv/bin/activate"
pip install --upgrade pip pip install --upgrade pip
@@ -357,7 +370,7 @@ fi
echo -e "\033[0;32mEnvironment activated (ROS 2 $ROS_VER)\033[0m" echo -e "\033[0;32mEnvironment activated (ROS 2 $ROS_VER)\033[0m"
echo "" echo ""
echo "Run simulation: bash scripts/run_simulation.sh" echo "Run simulation: bash scripts/run_autonomous.sh --mission hover"
ACTIVATE_EOF ACTIVATE_EOF
chmod +x "$SCRIPT_DIR/activate_venv.sh" chmod +x "$SCRIPT_DIR/activate_venv.sh"
@@ -478,6 +491,18 @@ if [ "$INSTALL_ARDUPILOT" = true ]; then
command -v gz &> /dev/null && echo "[OK] Gazebo (gz)" || echo "[WARN] Gazebo not found" command -v gz &> /dev/null && echo "[OK] Gazebo (gz)" || echo "[WARN] Gazebo not found"
command -v mavproxy.py &> /dev/null && echo "[OK] MAVProxy" || echo "[WARN] MAVProxy not in PATH" command -v mavproxy.py &> /dev/null && echo "[OK] MAVProxy" || echo "[WARN] MAVProxy not in PATH"
[ -f "$ARDUPILOT_GZ/build/libArduPilotPlugin.so" ] && echo "[OK] ArduPilot Gazebo plugin" || echo "[WARN] Plugin not built" [ -f "$ARDUPILOT_GZ/build/libArduPilotPlugin.so" ] && echo "[OK] ArduPilot Gazebo plugin" || echo "[WARN] Plugin not built"
# Verify Gazebo Python bindings are accessible from venv
source "$SCRIPT_DIR/venv/bin/activate"
python3 -c "from gz.transport13 import Node; print('[OK] gz.transport13 Python bindings')" 2>/dev/null || \
echo "[WARN] gz.transport13 Python bindings not found"
python3 -c "from gz.msgs10.image_pb2 import Image; print('[OK] gz.msgs10 Python bindings')" 2>/dev/null || \
echo "[WARN] gz.msgs10 Python bindings not found"
python3 -c "import pymavlink; print('[OK] pymavlink')" 2>/dev/null || \
echo "[WARN] pymavlink not found in venv"
python3 -c "import cv2; print(f'[OK] OpenCV {cv2.__version__}')" 2>/dev/null || \
echo "[WARN] OpenCV not found in venv"
deactivate
((STEP++)) ((STEP++))
fi fi
@@ -513,6 +538,6 @@ echo -e "${CYAN}To run the simulation:${NC}"
echo "" echo ""
echo " cd $SCRIPT_DIR" echo " cd $SCRIPT_DIR"
echo " source activate_venv.sh" echo " source activate_venv.sh"
echo " bash scripts/run_simulation.sh" echo " bash scripts/run_autonomous.sh --mission hover"
echo "" echo ""
echo -e "${GREEN}==========================================${NC}" echo -e "${GREEN}==========================================${NC}"

View File

@@ -123,22 +123,40 @@ class Controller:
self._drain_messages() self._drain_messages()
print("[UAV] Mode -> GUIDED_NOGPS") print("[UAV] Mode -> GUIDED_NOGPS")
def arm(self, retries: int = 10): def arm(self, retries: int = 30):
self.set_mode_guided() self.set_mode_guided()
for attempt in range(1, retries + 1): for i in range(retries):
print(f"[UAV] Arm attempt {attempt}/{retries}...") print(f"[UAV] Arm attempt {i+1}/{retries}...")
self.conn.arducopter_arm()
t0 = time.time() # Force arm: param2=21196 bypasses pre-arm checks
while time.time() - t0 < 5: # (equivalent to MAVProxy's "arm throttle force")
self.conn.mav.command_long_send(
self.conn.target_system,
self.conn.target_component,
mavutil.mavlink.MAV_CMD_COMPONENT_ARM_DISARM,
0,
1, # param1: 1 = arm
21196, # param2: force arm magic number
0, 0, 0, 0, 0)
# Wait up to 4 seconds for armed status instead of
# blocking forever with motors_armed_wait()
armed = False
for _ in range(20):
self._drain_messages() self._drain_messages()
if self.armed: if self.conn.motors_armed():
print("[UAV] Armed") armed = True
return True break
sleep(0.2) time.sleep(0.2)
self._drain_messages() if armed:
self.armed = True
print("[UAV] Armed")
return True
print(f"[UAV] Arm attempt {i+1} failed, retrying...")
time.sleep(2)
print("[UAV] FAILED to arm after all retries") print("[UAV] FAILED to arm after all retries")
return False return False
@@ -156,8 +174,12 @@ class Controller:
print(f"[UAV] Takeoff -> {altitude}m") print(f"[UAV] Takeoff -> {altitude}m")
def land(self): def land(self):
self.conn.set_mode_rtl() self.conn.mav.command_long_send(
print("[UAV] Landing (RTL)") self.conn.target_system,
self.conn.target_component,
MAV_CMD_DO_SET_MODE,
0, 89, 9, 0, 0, 0, 0, 0)
print("[UAV] Landing (LAND mode)")
def land_at(self, lat: int, lon: int): def land_at(self, lat: int, lon: int):
self.conn.mav.command_long_send( self.conn.mav.command_long_send(

View File

@@ -4,28 +4,56 @@ import sys
import os import os
import time import time
import argparse import argparse
import yaml
from time import sleep from time import sleep
from pathlib import Path
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__))) sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
from control.uav_controller import Controller, HOLD_ALT from control.uav_controller import Controller
from control.ugv_controller import UGVController from control.ugv_controller import UGVController
from utils.helpers import clamp, distance_2d, PIDController, LowPassFilter from utils.helpers import clamp, distance_2d, PIDController, LowPassFilter
from utils.transforms import normalize_angle, body_to_world, world_to_body from utils.transforms import normalize_angle, body_to_world, world_to_body
PROJECT_DIR = Path(__file__).resolve().parent.parent
CONFIG_DIR = PROJECT_DIR / "config"
def mission_hover(ctrl: Controller, altitude: float = HOLD_ALT,
duration: float = 30.0):
print("\n" + "=" * 50)
print(f" HOVER MISSION: {altitude}m for {duration}s")
print("=" * 50 + "\n")
def load_config(name: str) -> dict:
path = CONFIG_DIR / name
if not path.exists():
print(f"[WARN] Config not found: {path}")
return {}
with open(path, 'r') as f:
return yaml.safe_load(f) or {}
def load_mission(name: str) -> dict:
path = CONFIG_DIR / "missions" / f"{name}.yaml"
if not path.exists():
print(f"[WARN] Mission not found: {path}")
return {}
with open(path, 'r') as f:
return yaml.safe_load(f) or {}
def setup_ardupilot(ctrl: Controller):
ctrl.set_param('ARMING_CHECK', 0) ctrl.set_param('ARMING_CHECK', 0)
ctrl.set_param('SCHED_LOOP_RATE', 200) ctrl.set_param('SCHED_LOOP_RATE', 200)
ctrl.set_param('FS_THR_ENABLE', 0) ctrl.set_param('FS_THR_ENABLE', 0)
ctrl.set_param('FS_GCS_ENABLE', 0) ctrl.set_param('FS_GCS_ENABLE', 0)
sleep(2) sleep(2)
def mission_hover(ctrl: Controller, uav_cfg: dict, mission_cfg: dict):
altitude = mission_cfg.get('altitude', uav_cfg['flight']['takeoff_altitude'])
duration = mission_cfg.get('duration', 30.0)
print("\n" + "=" * 50)
print(f" HOVER MISSION: {altitude}m for {duration}s")
print("=" * 50 + "\n")
setup_ardupilot(ctrl)
ctrl.wait_for_gps() ctrl.wait_for_gps()
if not ctrl.arm(): if not ctrl.arm():
@@ -46,29 +74,18 @@ def mission_hover(ctrl: Controller, altitude: float = HOLD_ALT,
print("\n[UAV] Hover complete") print("\n[UAV] Hover complete")
ctrl.land() ctrl.land()
wait_for_landing(ctrl)
print("[UAV] Waiting for landing ...")
t0 = time.time()
while time.time() - t0 < 30:
ctrl.update_state()
if ctrl.altitude < 0.3:
break
sleep(0.5)
print("[UAV] Landed!")
def mission_square(ctrl: Controller, altitude: float = HOLD_ALT, def mission_square(ctrl: Controller, uav_cfg: dict, mission_cfg: dict):
side: float = 5.0): altitude = mission_cfg.get('altitude', uav_cfg['flight']['takeoff_altitude'])
side = mission_cfg.get('side_length', 5.0)
print("\n" + "=" * 50) print("\n" + "=" * 50)
print(f" SQUARE MISSION: {side}m sides at {altitude}m") print(f" SQUARE MISSION: {side}m sides at {altitude}m")
print("=" * 50 + "\n") print("=" * 50 + "\n")
ctrl.set_param('ARMING_CHECK', 0) setup_ardupilot(ctrl)
ctrl.set_param('SCHED_LOOP_RATE', 200)
ctrl.set_param('FS_THR_ENABLE', 0)
ctrl.set_param('FS_GCS_ENABLE', 0)
sleep(2)
ctrl.wait_for_gps() ctrl.wait_for_gps()
if not ctrl.arm(): if not ctrl.arm():
@@ -92,29 +109,23 @@ def mission_square(ctrl: Controller, altitude: float = HOLD_ALT,
print("[UAV] Square complete") print("[UAV] Square complete")
ctrl.land() ctrl.land()
wait_for_landing(ctrl)
print("[UAV] Waiting for landing ...")
t0 = time.time()
while time.time() - t0 < 30:
ctrl.update_state()
if ctrl.altitude < 0.3:
break
sleep(0.5)
print("[UAV] Landed!")
def mission_search(ctrl: Controller, ugv: UGVController, def mission_search(ctrl: Controller, ugv: UGVController,
altitude: float = HOLD_ALT): uav_cfg: dict, mission_cfg: dict):
altitude = mission_cfg.get('altitude', uav_cfg['flight']['takeoff_altitude'])
search_cfg = mission_cfg.get('search', {})
initial_leg = search_cfg.get('initial_leg', 3.0)
leg_increment = search_cfg.get('leg_increment', 2.0)
max_legs = search_cfg.get('max_legs', 12)
detection_radius = search_cfg.get('detection_radius', 2.0)
print("\n" + "=" * 50) print("\n" + "=" * 50)
print(f" SEARCH MISSION at {altitude}m") print(f" SEARCH MISSION at {altitude}m")
print("=" * 50 + "\n") print("=" * 50 + "\n")
ctrl.set_param('ARMING_CHECK', 0) setup_ardupilot(ctrl)
ctrl.set_param('SCHED_LOOP_RATE', 200)
ctrl.set_param('FS_THR_ENABLE', 0)
ctrl.set_param('FS_GCS_ENABLE', 0)
sleep(2)
ctrl.wait_for_gps() ctrl.wait_for_gps()
if not ctrl.arm(): if not ctrl.arm():
@@ -127,30 +138,23 @@ def mission_search(ctrl: Controller, ugv: UGVController,
ugv_pos = ugv.get_position() ugv_pos = ugv.get_position()
print(f"[UAV] UGV target at ({ugv_pos['x']:.1f}, {ugv_pos['y']:.1f})") print(f"[UAV] UGV target at ({ugv_pos['x']:.1f}, {ugv_pos['y']:.1f})")
distance_step = 3.0 distance_step = initial_leg
increment = 2.0
travel_x = True travel_x = True
direction = 1 direction = 1
MAX_LEGS = 12
for leg in range(MAX_LEGS): for leg in range(max_legs):
ctrl.update_state() ctrl.update_state()
uav_pos = ctrl.get_local_position() uav_pos = ctrl.get_local_position()
dist_to_ugv = distance_2d( dist_to_ugv = distance_2d(
(uav_pos['x'], uav_pos['y']), (uav_pos['x'], uav_pos['y']),
(ugv_pos['x'], ugv_pos['y']) (ugv_pos['x'], ugv_pos['y'])
) )
print(f"[UAV] Spiral leg {leg+1}/{MAX_LEGS} dist_to_ugv: {dist_to_ugv:.1f}m") print(f"[UAV] Spiral leg {leg+1}/{max_legs} dist_to_ugv: {dist_to_ugv:.1f}m")
if dist_to_ugv < 2.0: if dist_to_ugv < detection_radius:
print("[UAV] UGV found! Landing nearby.") print("[UAV] UGV found! Landing.")
ctrl.land() ctrl.land()
t0 = time.time() wait_for_landing(ctrl)
while time.time() - t0 < 30:
ctrl.update_state()
if ctrl.altitude < 0.3:
break
sleep(0.5)
print("[UAV] Landed on UGV!") print("[UAV] Landed on UGV!")
return return
@@ -159,55 +163,82 @@ def mission_search(ctrl: Controller, ugv: UGVController,
else: else:
ctrl.move_pos_rel(0, distance_step * direction, 0) ctrl.move_pos_rel(0, distance_step * direction, 0)
direction *= -1 direction *= -1
distance_step += increment distance_step += leg_increment
travel_x = not travel_x travel_x = not travel_x
sleep(4) sleep(4)
print("[UAV] Search complete - UGV not found, landing") print("[UAV] Search complete - UGV not found, landing")
ctrl.land() ctrl.land()
wait_for_landing(ctrl)
def wait_for_landing(ctrl: Controller, timeout: float = 60.0):
print("[UAV] Waiting for landing ...")
t0 = time.time() t0 = time.time()
while time.time() - t0 < 30: while time.time() - t0 < timeout:
ctrl.update_state() ctrl.update_state()
elapsed = int(time.time() - t0)
print(f"\r[UAV] Descending: {ctrl.altitude:.1f}m ({elapsed}s) ",
end='', flush=True)
if ctrl.altitude < 0.5 and not ctrl.armed:
break
if ctrl.altitude < 0.3: if ctrl.altitude < 0.3:
break break
sleep(0.5) sleep(0.5)
print("[UAV] Landed!") print(f"\n[UAV] Landed! (alt: {ctrl.altitude:.2f}m)")
def main(): def main():
parser = argparse.ArgumentParser(description='UAV-UGV Simulation') parser = argparse.ArgumentParser(description='UAV-UGV Simulation')
parser.add_argument('--device', default='sim', choices=['sim', 'real']) parser.add_argument('--device', default='sim', choices=['sim', 'real'])
parser.add_argument('--connection', default=None) parser.add_argument('--connection', default=None)
parser.add_argument('--mission', default='hover', choices=['hover', 'square', 'search']) parser.add_argument('--mission', default='hover')
parser.add_argument('--altitude', type=float, default=HOLD_ALT) parser.add_argument('--altitude', type=float, default=None)
parser.add_argument('--duration', type=float, default=30.0) parser.add_argument('--duration', type=float, default=None)
parser.add_argument('--ugv-x', type=float, default=10.0)
parser.add_argument('--ugv-y', type=float, default=5.0)
parser.add_argument('--ugv-connection', default=None)
args = parser.parse_args() args = parser.parse_args()
uav_cfg = load_config('uav.yaml')
ugv_cfg = load_config('ugv.yaml')
props = load_config('properties.yaml')
mission_cfg = load_mission(args.mission)
if args.altitude is not None:
mission_cfg['altitude'] = args.altitude
if args.duration is not None:
mission_cfg['duration'] = args.duration
if args.connection: if args.connection:
conn_str = args.connection conn_str = args.connection
elif args.device == 'real': elif args.device == 'real':
conn_str = '/dev/ttyAMA0' conn_str = uav_cfg.get('connection', {}).get('real', '/dev/ttyAMA0')
else: else:
conn_str = 'tcp:127.0.0.1:5760' conn_str = uav_cfg.get('connection', {}).get('sim', 'tcp:127.0.0.1:5760')
ugv_pos = ugv_cfg.get('position', {})
ugv = UGVController( ugv = UGVController(
connection_string=args.ugv_connection, connection_string=ugv_cfg.get('connection', {}).get('sim'),
static_pos=(args.ugv_x, args.ugv_y), static_pos=(ugv_pos.get('x', 10.0), ugv_pos.get('y', 5.0)),
) )
ctrl = Controller(conn_str) ctrl = Controller(conn_str)
if args.mission == 'hover': print(f"[MAIN] Config loaded from {CONFIG_DIR}")
mission_hover(ctrl, altitude=args.altitude, duration=args.duration) print(f"[MAIN] Mission: {args.mission}")
elif args.mission == 'square':
mission_square(ctrl, altitude=args.altitude) missions = {
elif args.mission == 'search': 'hover': lambda: mission_hover(ctrl, uav_cfg, mission_cfg),
mission_search(ctrl, ugv, altitude=args.altitude) 'square': lambda: mission_square(ctrl, uav_cfg, mission_cfg),
'search': lambda: mission_search(ctrl, ugv, uav_cfg, mission_cfg),
}
runner = missions.get(args.mission)
if runner:
runner()
else:
print(f"[MAIN] Unknown mission: {args.mission}")
print(f"[MAIN] Available: {list(missions.keys())}")
return
print("[MAIN] Mission finished.") print("[MAIN] Mission finished.")

View File

@@ -1,227 +1,152 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
""" """
Camera Processor Node Camera Processor - Handles Gazebo camera feeds for GPS-denied navigation.
Handles camera feed processing for GPS-denied navigation Subscribes to gz-transport topics, processes frames, and displays via OpenCV.
Downstream modules (ArUco detector, optical flow) register as callbacks.
""" """
import rclpy import sys
from rclpy.node import Node import signal
from sensor_msgs.msg import Image, CameraInfo import time
from std_msgs.msg import Header
from cv_bridge import CvBridge
import cv2
import numpy as np import numpy as np
import cv2
from gz.transport13 import Node
from gz.msgs10.image_pb2 import Image
# PixelFormatType enum values (from gz.msgs10 Image proto)
PF_RGB_INT8 = 3
PF_BGR_INT8 = 8
PF_R_FLOAT32 = 13
class CameraProcessor(Node): class CameraProcessor:
""" def __init__(self, topics=None, show_gui=True):
Processes camera feeds for visual odometry and obstacle detection. self.node = Node()
Outputs processed images for downstream nodes. self.show_gui = show_gui
""" self.frames = {}
self.callbacks = {}
def __init__(self): self.running = True
super().__init__('camera_processor')
if topics is None:
self.bridge = CvBridge() topics = {
"downward": "/uav/camera/downward",
# Camera parameters "gimbal": "/world/uav_ugv_search/model/iris_with_gimbal/model/gimbal/link/pitch_link/sensor/camera/image",
self.camera_matrix = None }
self.dist_coeffs = None
self.image_size = None self.topics = topics
# Image processing parameters for name, topic in self.topics.items():
self.declare_parameter('undistort', True) self.callbacks[name] = []
self.declare_parameter('grayscale_output', True) ok = self.node.subscribe(Image, topic, self._make_gz_callback(name))
self.declare_parameter('histogram_equalization', True) if ok:
self.declare_parameter('resize_factor', 1.0) print(f"[CAM] Subscribed: {name} -> {topic}")
else:
self.undistort = self.get_parameter('undistort').value print(f"[CAM] Failed: {topic}")
self.grayscale_output = self.get_parameter('grayscale_output').value
self.histogram_eq = self.get_parameter('histogram_equalization').value signal.signal(signal.SIGINT, lambda s, f: self.stop())
self.resize_factor = self.get_parameter('resize_factor').value
def _make_gz_callback(self, name):
# Undistort maps (computed once) def cb(msg):
self.map1 = None fmt = msg.pixel_format_type
self.map2 = None if fmt == PF_RGB_INT8:
arr = np.frombuffer(msg.data, dtype=np.uint8).reshape(
# Subscribers - Forward camera msg.height, msg.width, 3
self.forward_image_sub = self.create_subscription(
Image,
'/uav/camera/forward/image_raw',
self.forward_image_callback,
10
)
self.forward_info_sub = self.create_subscription(
CameraInfo,
'/uav/camera/forward/camera_info',
self.camera_info_callback,
10
)
# Subscribers - Downward camera
self.downward_image_sub = self.create_subscription(
Image,
'/uav/camera/downward/image_raw',
self.downward_image_callback,
10
)
# Publishers - Processed images
self.forward_processed_pub = self.create_publisher(
Image,
'/uav/camera/forward/image_processed',
10
)
self.downward_processed_pub = self.create_publisher(
Image,
'/uav/camera/downward/image_processed',
10
)
# Debug visualization
self.debug_pub = self.create_publisher(
Image,
'/uav/camera/debug',
10
)
self.get_logger().info('Camera Processor Node Started')
def camera_info_callback(self, msg: CameraInfo):
"""Extract and store camera calibration parameters."""
if self.camera_matrix is None:
self.camera_matrix = np.array(msg.k).reshape(3, 3)
self.dist_coeffs = np.array(msg.d)
self.image_size = (msg.width, msg.height)
# Compute undistortion maps
if self.undistort and self.dist_coeffs is not None:
new_camera_matrix, _ = cv2.getOptimalNewCameraMatrix(
self.camera_matrix,
self.dist_coeffs,
self.image_size,
alpha=0
) )
self.map1, self.map2 = cv2.initUndistortRectifyMap( bgr = cv2.cvtColor(arr, cv2.COLOR_RGB2BGR)
self.camera_matrix, elif fmt == PF_BGR_INT8:
self.dist_coeffs, arr = np.frombuffer(msg.data, dtype=np.uint8).reshape(
None, msg.height, msg.width, 3
new_camera_matrix,
self.image_size,
cv2.CV_16SC2
) )
bgr = arr
self.get_logger().info( elif fmt == PF_R_FLOAT32:
f'Camera calibration received: {self.image_size[0]}x{self.image_size[1]}' arr = np.frombuffer(msg.data, dtype=np.float32).reshape(
) msg.height, msg.width
)
def process_image(self, image: np.ndarray) -> np.ndarray: normalized = cv2.normalize(arr, None, 0, 255, cv2.NORM_MINMAX)
""" bgr = cv2.cvtColor(normalized.astype(np.uint8), cv2.COLOR_GRAY2BGR)
Apply image processing pipeline. else:
return
Args:
image: Input BGR image processed = self.process_image(bgr)
self.frames[name] = processed
Returns:
Processed image for fn in self.callbacks.get(name, []):
""" fn(name, processed)
return cb
def process_image(self, image):
processed = image.copy() processed = image.copy()
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
# 1. Undistort if calibration available lab = cv2.cvtColor(processed, cv2.COLOR_BGR2LAB)
if self.undistort and self.map1 is not None: lab[:, :, 0] = clahe.apply(lab[:, :, 0])
processed = cv2.remap(processed, self.map1, self.map2, cv2.INTER_LINEAR) processed = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
# 2. Resize if needed
if self.resize_factor != 1.0:
new_size = (
int(processed.shape[1] * self.resize_factor),
int(processed.shape[0] * self.resize_factor)
)
processed = cv2.resize(processed, new_size, interpolation=cv2.INTER_AREA)
# 3. Convert to grayscale if requested
if self.grayscale_output:
if len(processed.shape) == 3:
processed = cv2.cvtColor(processed, cv2.COLOR_BGR2GRAY)
# 4. Histogram equalization for better feature detection
if self.histogram_eq:
if len(processed.shape) == 2:
# CLAHE for better results than standard histogram equalization
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
processed = clahe.apply(processed)
else:
# For color images, apply to L channel in LAB
lab = cv2.cvtColor(processed, cv2.COLOR_BGR2LAB)
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
lab[:, :, 0] = clahe.apply(lab[:, :, 0])
processed = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
return processed return processed
def forward_image_callback(self, msg: Image): def register_callback(self, camera_name, fn):
"""Process forward camera images for visual odometry.""" if camera_name in self.callbacks:
try: self.callbacks[camera_name].append(fn)
# Convert ROS Image to OpenCV print(f"[CAM] Registered callback for {camera_name}: {fn.__name__}")
cv_image = self.bridge.imgmsg_to_cv2(msg, 'bgr8')
def stop(self):
# Process image self.running = False
processed = self.process_image(cv_image)
def spin(self):
# Convert back to ROS Image print("[CAM] Running. Press 'q' to quit, 's' to screenshot.")
if len(processed.shape) == 2: while self.running:
encoding = 'mono8' for name, frame in list(self.frames.items()):
else: cv2.imshow(name, frame)
encoding = 'bgr8'
key = cv2.waitKey(33) & 0xFF
processed_msg = self.bridge.cv2_to_imgmsg(processed, encoding) if key == ord("q"):
processed_msg.header = msg.header break
elif key == ord("s"):
# Publish processed image for name, frame in self.frames.items():
self.forward_processed_pub.publish(processed_msg) fname = f"{name}_{int(time.time())}.png"
cv2.imwrite(fname, frame)
except Exception as e: print(f"[CAM] Saved {fname}")
self.get_logger().error(f'Forward image processing error: {e}')
cv2.destroyAllWindows()
def downward_image_callback(self, msg: Image): print("[CAM] Stopped.")
"""Process downward camera images for optical flow."""
try: def spin_headless(self):
# Convert ROS Image to OpenCV print("[CAM] Running headless (no GUI).")
cv_image = self.bridge.imgmsg_to_cv2(msg, 'bgr8') while self.running:
time.sleep(0.1)
# Process image print("[CAM] Stopped.")
processed = self.process_image(cv_image)
def get_frame(self, camera_name):
# Convert back to ROS Image return self.frames.get(camera_name)
if len(processed.shape) == 2:
encoding = 'mono8'
else:
encoding = 'bgr8'
processed_msg = self.bridge.cv2_to_imgmsg(processed, encoding)
processed_msg.header = msg.header
# Publish processed image
self.downward_processed_pub.publish(processed_msg)
except Exception as e:
self.get_logger().error(f'Downward image processing error: {e}')
def main(args=None): def main():
rclpy.init(args=args) cameras = "both"
node = CameraProcessor() show_gui = True
try: if len(sys.argv) > 1:
rclpy.spin(node) cameras = sys.argv[1].lower()
except KeyboardInterrupt: if "--headless" in sys.argv:
pass show_gui = False
finally:
node.destroy_node() all_topics = {
rclpy.shutdown() "downward": "/uav/camera/downward",
"gimbal": "/world/uav_ugv_search/model/iris_with_gimbal/model/gimbal/link/pitch_link/sensor/camera/image",
}
if cameras == "down":
topics = {"downward": all_topics["downward"]}
elif cameras == "gimbal":
topics = {"gimbal": all_topics["gimbal"]}
else:
topics = all_topics
proc = CameraProcessor(topics=topics, show_gui=show_gui)
if show_gui:
proc.spin()
else:
proc.spin_headless()
if __name__ == '__main__': if __name__ == "__main__":
main() main()

View File

@@ -1,132 +1,126 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
"""Object Detector - Visual landmark and obstacle detection.""" """
Object Detector - ArUco marker and feature detection.
Registers as a callback on CameraProcessor to receive processed frames.
"""
import rclpy
from rclpy.node import Node
from sensor_msgs.msg import Image
from geometry_msgs.msg import PoseArray, Pose
from std_msgs.msg import Float32MultiArray
from cv_bridge import CvBridge
import cv2 import cv2
import numpy as np import numpy as np
from scipy.spatial.transform import Rotation
class ObjectDetector(Node): class ObjectDetector:
def __init__(self, marker_size=0.15, camera_matrix=None, detection_method="ArUco"):
def __init__(self): self.detection_method = detection_method
super().__init__('object_detector') self.marker_size = marker_size
self.bridge = CvBridge() if camera_matrix is not None:
self.camera_matrix = np.array(camera_matrix).reshape(3, 3)
self.declare_parameter('detection_method', 'ArUco') else:
self.declare_parameter('marker_size', 0.15) self.camera_matrix = np.array(
self.declare_parameter('camera_matrix', [500.0, 0.0, 320.0, 0.0, 500.0, 240.0, 0.0, 0.0, 1.0]) [[500.0, 0.0, 320.0], [0.0, 500.0, 240.0], [0.0, 0.0, 1.0]]
)
self.detection_method = self.get_parameter('detection_method').value
self.marker_size = self.get_parameter('marker_size').value
camera_matrix_flat = self.get_parameter('camera_matrix').value
self.camera_matrix = np.array(camera_matrix_flat).reshape(3, 3)
self.dist_coeffs = np.zeros(5) self.dist_coeffs = np.zeros(5)
if self.detection_method == 'ArUco': if self.detection_method == "ArUco":
self.aruco_dict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_50) self.aruco_dict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_50)
self.aruco_params = cv2.aruco.DetectorParameters() self.aruco_params = cv2.aruco.DetectorParameters()
self.aruco_detector = cv2.aruco.ArucoDetector(self.aruco_dict, self.aruco_params) self.aruco_detector = cv2.aruco.ArucoDetector(
self.aruco_dict, self.aruco_params
self.image_sub = self.create_subscription( )
Image, '/uav/camera/forward/image_raw', self.image_callback, 10)
self.last_detections = []
self.landmarks_pub = self.create_publisher(PoseArray, '/uav/landmarks/poses', 10) self.on_detection = None
self.marker_ids_pub = self.create_publisher(Float32MultiArray, '/uav/landmarks/ids', 10)
self.debug_image_pub = self.create_publisher(Image, '/uav/landmarks/debug', 10) print(f"[DET] Object Detector initialized ({self.detection_method})")
self.get_logger().info(f'Object Detector Started - {self.detection_method}') def detect(self, camera_name, frame):
if self.detection_method == "ArUco":
def image_callback(self, msg): detections, annotated = self.detect_aruco(frame)
try: else:
frame = self.bridge.imgmsg_to_cv2(msg, 'bgr8') detections, annotated = self.detect_orb(frame)
if self.detection_method == 'ArUco': self.last_detections = detections
self.detect_aruco(frame, msg.header)
else: if detections and self.on_detection:
self.detect_orb_features(frame, msg.header) self.on_detection(detections)
except Exception as e: if annotated is not None:
self.get_logger().error(f'Detection error: {e}') cv2.imshow(f"{camera_name} [detections]", annotated)
def detect_aruco(self, frame, header): return detections
def detect_aruco(self, frame):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
corners, ids, rejected = self.aruco_detector.detectMarkers(gray) corners, ids, rejected = self.aruco_detector.detectMarkers(gray)
pose_array = PoseArray() detections = []
pose_array.header = header annotated = frame.copy()
pose_array.header.frame_id = 'camera_link'
id_array = Float32MultiArray()
if ids is not None: if ids is not None:
rvecs, tvecs, _ = cv2.aruco.estimatePoseSingleMarkers( rvecs, tvecs, _ = cv2.aruco.estimatePoseSingleMarkers(
corners, self.marker_size, self.camera_matrix, self.dist_coeffs) corners, self.marker_size, self.camera_matrix, self.dist_coeffs
)
for i, marker_id in enumerate(ids.flatten()): for i, marker_id in enumerate(ids.flatten()):
pose = Pose() tvec = tvecs[i][0]
pose.position.x = float(tvecs[i][0][0]) rvec = rvecs[i][0]
pose.position.y = float(tvecs[i][0][1])
pose.position.z = float(tvecs[i][0][2]) rotation_matrix, _ = cv2.Rodrigues(rvec)
rotation_matrix, _ = cv2.Rodrigues(rvecs[i])
from scipy.spatial.transform import Rotation
r = Rotation.from_matrix(rotation_matrix) r = Rotation.from_matrix(rotation_matrix)
quat = r.as_quat() quat = r.as_quat()
pose.orientation.x = quat[0]
pose.orientation.y = quat[1] center = np.mean(corners[i][0], axis=0)
pose.orientation.z = quat[2]
pose.orientation.w = quat[3] detection = {
"id": int(marker_id),
pose_array.poses.append(pose) "position": tvec.tolist(),
id_array.data.append(float(marker_id)) "orientation_quat": quat.tolist(),
"rvec": rvec.tolist(),
debug_frame = cv2.aruco.drawDetectedMarkers(frame.copy(), corners, ids) "tvec": tvec.tolist(),
"corners": corners[i][0].tolist(),
"center_px": center.tolist(),
"distance": float(np.linalg.norm(tvec)),
}
detections.append(detection)
cv2.aruco.drawDetectedMarkers(annotated, corners, ids)
for i in range(len(rvecs)): for i in range(len(rvecs)):
cv2.drawFrameAxes(debug_frame, self.camera_matrix, self.dist_coeffs, cv2.drawFrameAxes(
rvecs[i], tvecs[i], self.marker_size * 0.5) annotated,
self.camera_matrix,
debug_msg = self.bridge.cv2_to_imgmsg(debug_frame, 'bgr8') self.dist_coeffs,
debug_msg.header = header rvecs[i],
self.debug_image_pub.publish(debug_msg) tvecs[i],
self.marker_size * 0.5,
self.landmarks_pub.publish(pose_array) )
self.marker_ids_pub.publish(id_array)
d = detections[i]
def detect_orb_features(self, frame, header): label = f"ID:{d['id']} d:{d['distance']:.2f}m"
pos = (int(d["center_px"][0]), int(d["center_px"][1]) - 10)
cv2.putText(
annotated, label, pos, cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2
)
return detections, annotated
def detect_orb(self, frame):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
orb = cv2.ORB_create(nfeatures=500) orb = cv2.ORB_create(nfeatures=500)
keypoints, descriptors = orb.detectAndCompute(gray, None) keypoints, descriptors = orb.detectAndCompute(gray, None)
pose_array = PoseArray() detections = []
pose_array.header = header
for kp in keypoints[:50]: for kp in keypoints[:50]:
pose = Pose() detections.append(
pose.position.x = float(kp.pt[0]) {
pose.position.y = float(kp.pt[1]) "type": "feature",
pose.position.z = 0.0 "center_px": [kp.pt[0], kp.pt[1]],
pose_array.poses.append(pose) "size": kp.size,
"response": kp.response,
self.landmarks_pub.publish(pose_array) }
)
annotated = cv2.drawKeypoints(
def main(args=None): frame, keypoints[:50], None, color=(0, 255, 0), flags=0
rclpy.init(args=args) )
node = ObjectDetector() return detections, annotated
try:
rclpy.spin(node)
except KeyboardInterrupt:
pass
finally:
node.destroy_node()
rclpy.shutdown()
if __name__ == '__main__':
main()

View File

@@ -1,128 +1,85 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
"""Optical Flow - Velocity estimation from downward camera.""" """
Optical Flow - Velocity estimation from downward camera.
Lucas-Kanade sparse optical flow for GPS-denied velocity estimation.
"""
import rclpy import time
from rclpy.node import Node
from sensor_msgs.msg import Image, Range
from geometry_msgs.msg import TwistStamped
from cv_bridge import CvBridge
import cv2 import cv2
import numpy as np import numpy as np
class OpticalFlowNode(Node): class OpticalFlowEstimator:
def __init__(self, focal_length=500.0, min_altitude=0.3, max_altitude=10.0):
def __init__(self): self.focal_length = focal_length
super().__init__('optical_flow_node') self.min_altitude = min_altitude
self.max_altitude = max_altitude
self.bridge = CvBridge()
self.prev_frame = None self.prev_frame = None
self.prev_points = None self.prev_points = None
self.current_altitude = 1.0
self.declare_parameter('window_size', 15)
self.declare_parameter('max_level', 3)
self.declare_parameter('min_altitude', 0.3)
self.declare_parameter('max_altitude', 10.0)
self.declare_parameter('focal_length', 500.0)
self.window_size = self.get_parameter('window_size').value
self.max_level = self.get_parameter('max_level').value
self.min_altitude = self.get_parameter('min_altitude').value
self.max_altitude = self.get_parameter('max_altitude').value
self.focal_length = self.get_parameter('focal_length').value
self.lk_params = dict(
winSize=(self.window_size, self.window_size),
maxLevel=self.max_level,
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03)
)
self.feature_params = dict(
maxCorners=100,
qualityLevel=0.3,
minDistance=7,
blockSize=7
)
self.velocity = np.zeros(2)
self.prev_time = None self.prev_time = None
self.current_altitude = 1.0
self.image_sub = self.create_subscription(
Image, '/uav/camera/downward/image_raw', self.image_callback, 10)
self.altitude_sub = self.create_subscription(
Range, '/uav/rangefinder/range', self.altitude_callback, 10)
self.velocity_pub = self.create_publisher(
TwistStamped, '/uav/optical_flow/velocity', 10)
self.get_logger().info('Optical Flow Node Started')
def altitude_callback(self, msg):
if msg.range > self.min_altitude and msg.range < self.max_altitude:
self.current_altitude = msg.range
def image_callback(self, msg):
try:
frame = self.bridge.imgmsg_to_cv2(msg, 'bgr8')
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
current_time = self.get_clock().now()
if self.prev_frame is not None and self.prev_points is not None:
new_points, status, error = cv2.calcOpticalFlowPyrLK(
self.prev_frame, gray, self.prev_points, None, **self.lk_params)
if new_points is not None:
good_new = new_points[status == 1]
good_old = self.prev_points[status == 1]
if len(good_new) > 10:
flow = good_new - good_old
avg_flow = np.mean(flow, axis=0)
if self.prev_time is not None:
dt = (current_time - self.prev_time).nanoseconds / 1e9
if dt > 0:
pixel_velocity = avg_flow / dt
self.velocity[0] = pixel_velocity[0] * self.current_altitude / self.focal_length
self.velocity[1] = pixel_velocity[1] * self.current_altitude / self.focal_length
self.publish_velocity()
self.prev_points = cv2.goodFeaturesToTrack(gray, **self.feature_params)
self.prev_frame = gray
self.prev_time = current_time
except Exception as e:
self.get_logger().error(f'Optical flow error: {e}')
def publish_velocity(self):
vel_msg = TwistStamped()
vel_msg.header.stamp = self.get_clock().now().to_msg()
vel_msg.header.frame_id = 'base_link'
vel_msg.twist.linear.x = float(self.velocity[0])
vel_msg.twist.linear.y = float(self.velocity[1])
vel_msg.twist.linear.z = 0.0
self.velocity_pub.publish(vel_msg)
self.velocity = np.zeros(2)
def main(args=None): self.lk_params = dict(
rclpy.init(args=args) winSize=(15, 15),
node = OpticalFlowNode() maxLevel=3,
try: criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03),
rclpy.spin(node) )
except KeyboardInterrupt:
pass
finally:
node.destroy_node()
rclpy.shutdown()
self.feature_params = dict(
maxCorners=100, qualityLevel=0.3, minDistance=7, blockSize=7
)
if __name__ == '__main__': self.on_velocity = None
main()
print("[OF] Optical Flow Estimator initialized")
def set_altitude(self, altitude):
if self.min_altitude < altitude < self.max_altitude:
self.current_altitude = altitude
def process_frame(self, camera_name, frame):
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
current_time = time.time()
if self.prev_frame is not None and self.prev_points is not None:
new_points, status, error = cv2.calcOpticalFlowPyrLK(
self.prev_frame, gray, self.prev_points, None, **self.lk_params
)
if new_points is not None:
good_new = new_points[status.flatten() == 1]
good_old = self.prev_points[status.flatten() == 1]
if len(good_new) > 10:
flow = good_new - good_old
avg_flow = np.mean(flow, axis=0)
if self.prev_time is not None:
dt = current_time - self.prev_time
if dt > 0:
pixel_velocity = avg_flow / dt
self.velocity[0] = (
pixel_velocity[0]
* self.current_altitude
/ self.focal_length
)
self.velocity[1] = (
pixel_velocity[1]
* self.current_altitude
/ self.focal_length
)
if self.on_velocity:
self.on_velocity(self.velocity)
self.prev_points = cv2.goodFeaturesToTrack(gray, **self.feature_params)
self.prev_frame = gray
self.prev_time = current_time
return self.velocity
def get_velocity(self):
return self.velocity.copy()

View File

@@ -1,173 +1,119 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
"""Visual Odometry - Camera-based position estimation without GPS.""" """
Visual Odometry - Camera-based position estimation without GPS.
ORB/SIFT feature matching with essential matrix decomposition.
"""
import rclpy import time
from rclpy.node import Node
from sensor_msgs.msg import Image, CameraInfo
from geometry_msgs.msg import PoseStamped, TwistStamped
from cv_bridge import CvBridge
import cv2 import cv2
import numpy as np import numpy as np
from scipy.spatial.transform import Rotation from scipy.spatial.transform import Rotation
class VisualOdometryNode(Node): class VisualOdometry:
def __init__(self, camera_matrix=None, detector_type="ORB", min_features=100):
def __init__(self): self.detector_type = detector_type
super().__init__('visual_odometry_node') self.min_features = min_features
self.bridge = CvBridge() if camera_matrix is not None:
self.camera_matrix = np.array(camera_matrix).reshape(3, 3)
self.prev_frame = None else:
self.prev_keypoints = None self.camera_matrix = np.array(
self.prev_descriptors = None [[500.0, 0.0, 320.0], [0.0, 500.0, 240.0], [0.0, 0.0, 1.0]]
)
self.camera_matrix = None self.dist_coeffs = np.zeros(5)
self.dist_coeffs = None
if detector_type == "ORB":
self.current_pose = np.eye(4)
self.position = np.zeros(3)
self.orientation = np.eye(3)
self.prev_time = None
self.velocity = np.zeros(3)
self.detector_type = self.declare_parameter('detector_type', 'ORB').value
self.min_features = self.declare_parameter('min_features', 100).value
self.feature_quality = self.declare_parameter('feature_quality', 0.01).value
if self.detector_type == 'ORB':
self.detector = cv2.ORB_create(nfeatures=500) self.detector = cv2.ORB_create(nfeatures=500)
self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False) self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)
elif self.detector_type == 'SIFT': elif detector_type == "SIFT":
self.detector = cv2.SIFT_create(nfeatures=500) self.detector = cv2.SIFT_create(nfeatures=500)
self.matcher = cv2.BFMatcher(cv2.NORM_L2, crossCheck=False) self.matcher = cv2.BFMatcher(cv2.NORM_L2, crossCheck=False)
else: else:
self.detector = cv2.ORB_create(nfeatures=500) self.detector = cv2.ORB_create(nfeatures=500)
self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False) self.matcher = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=False)
self.image_sub = self.create_subscription( self.prev_frame = None
Image, '/uav/camera/forward/image_raw', self.image_callback, 10) self.prev_keypoints = None
self.prev_descriptors = None
self.camera_info_sub = self.create_subscription( self.prev_time = None
CameraInfo, '/uav/camera/forward/camera_info', self.camera_info_callback, 10)
self.position = np.zeros(3)
self.pose_pub = self.create_publisher(PoseStamped, '/uav/visual_odometry/pose', 10) self.orientation = np.eye(3)
self.velocity_pub = self.create_publisher(TwistStamped, '/uav/visual_odometry/velocity', 10) self.velocity = np.zeros(3)
self.timer = self.create_timer(0.033, self.publish_pose) self.on_pose = None
self.get_logger().info(f'Visual Odometry Node Started - {self.detector_type} detector') print(f"[VO] Visual Odometry initialized ({detector_type})")
def camera_info_callback(self, msg): def process_frame(self, camera_name, frame):
if self.camera_matrix is None: gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
self.camera_matrix = np.array(msg.k).reshape(3, 3) keypoints, descriptors = self.detector.detectAndCompute(gray, None)
self.dist_coeffs = np.array(msg.d) current_time = time.time()
self.get_logger().info('Camera calibration received')
if (
def image_callback(self, msg): self.prev_frame is not None
try: and self.prev_descriptors is not None
frame = self.bridge.imgmsg_to_cv2(msg, 'bgr8') and len(keypoints) >= self.min_features
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) ):
matches = self._match_features(self.prev_descriptors, descriptors)
keypoints, descriptors = self.detector.detectAndCompute(gray, None) if len(matches) >= self.min_features:
self._estimate_motion(
current_time = self.get_clock().now() self.prev_keypoints, keypoints, matches, current_time
)
if self.prev_frame is not None and len(keypoints) >= self.min_features:
matches = self.match_features(self.prev_descriptors, descriptors) self.prev_frame = gray
self.prev_keypoints = keypoints
if len(matches) >= self.min_features: self.prev_descriptors = descriptors
self.estimate_motion(self.prev_keypoints, keypoints, matches, current_time) self.prev_time = current_time
self.prev_frame = gray def _match_features(self, desc1, desc2):
self.prev_keypoints = keypoints
self.prev_descriptors = descriptors
self.prev_time = current_time
except Exception as e:
self.get_logger().error(f'Visual odometry error: {e}')
def match_features(self, desc1, desc2):
if desc1 is None or desc2 is None: if desc1 is None or desc2 is None:
return [] return []
matches = self.matcher.knnMatch(desc1, desc2, k=2) matches = self.matcher.knnMatch(desc1, desc2, k=2)
good = []
good_matches = [] for pair in matches:
for match_pair in matches: if len(pair) == 2:
if len(match_pair) == 2: m, n = pair
m, n = match_pair
if m.distance < 0.7 * n.distance: if m.distance < 0.7 * n.distance:
good_matches.append(m) good.append(m)
return good
return good_matches
def _estimate_motion(self, prev_kp, curr_kp, matches, current_time):
def estimate_motion(self, prev_kp, curr_kp, matches, current_time): if len(matches) < 5:
if self.camera_matrix is None or len(matches) < 5:
return return
pts1 = np.float32([prev_kp[m.queryIdx].pt for m in matches]) pts1 = np.float32([prev_kp[m.queryIdx].pt for m in matches])
pts2 = np.float32([curr_kp[m.trainIdx].pt for m in matches]) pts2 = np.float32([curr_kp[m.trainIdx].pt for m in matches])
E, mask = cv2.findEssentialMat( E, mask = cv2.findEssentialMat(
pts1, pts2, self.camera_matrix, pts1, pts2, self.camera_matrix, method=cv2.RANSAC, prob=0.999, threshold=1.0
method=cv2.RANSAC, prob=0.999, threshold=1.0) )
if E is None: if E is None:
return return
_, R, t, mask = cv2.recoverPose(E, pts1, pts2, self.camera_matrix, mask=mask) _, R, t, mask = cv2.recoverPose(E, pts1, pts2, self.camera_matrix, mask=mask)
scale = 1.0 scale = 1.0
translation = scale * t.flatten() translation = scale * t.flatten()
if self.prev_time is not None: if self.prev_time is not None:
dt = (current_time - self.prev_time).nanoseconds / 1e9 dt = current_time - self.prev_time
if dt > 0: if dt > 0:
self.velocity = translation / dt self.velocity = translation / dt
self.position += self.orientation @ translation self.position += self.orientation @ translation
self.orientation = R @ self.orientation self.orientation = R @ self.orientation
def publish_pose(self):
pose_msg = PoseStamped()
pose_msg.header.stamp = self.get_clock().now().to_msg()
pose_msg.header.frame_id = 'odom'
pose_msg.pose.position.x = float(self.position[0])
pose_msg.pose.position.y = float(self.position[1])
pose_msg.pose.position.z = float(self.position[2])
rotation = Rotation.from_matrix(self.orientation)
quat = rotation.as_quat()
pose_msg.pose.orientation.x = quat[0]
pose_msg.pose.orientation.y = quat[1]
pose_msg.pose.orientation.z = quat[2]
pose_msg.pose.orientation.w = quat[3]
self.pose_pub.publish(pose_msg)
vel_msg = TwistStamped()
vel_msg.header.stamp = self.get_clock().now().to_msg()
vel_msg.header.frame_id = 'odom'
vel_msg.twist.linear.x = float(self.velocity[0])
vel_msg.twist.linear.y = float(self.velocity[1])
vel_msg.twist.linear.z = float(self.velocity[2])
self.velocity_pub.publish(vel_msg)
if self.on_pose:
r = Rotation.from_matrix(self.orientation)
self.on_pose(self.position.copy(), r.as_quat())
def main(args=None): def get_pose(self):
rclpy.init(args=args) r = Rotation.from_matrix(self.orientation)
node = VisualOdometryNode() return self.position.copy(), r.as_quat()
try:
rclpy.spin(node)
except KeyboardInterrupt:
pass
finally:
node.destroy_node()
rclpy.shutdown()
def get_velocity(self):
if __name__ == '__main__': return self.velocity.copy()
main()

View File

@@ -1,135 +1,76 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
"""Visual Servoing - Vision-based control for precision positioning.""" """
Visual Servoing - Vision-based control for precision landing on ArUco marker.
Uses ObjectDetector detections + pymavlink to send velocity commands.
"""
import rclpy
from rclpy.node import Node
from sensor_msgs.msg import Image
from geometry_msgs.msg import PoseStamped, TwistStamped, Pose
from std_msgs.msg import Bool
from cv_bridge import CvBridge
import cv2
import numpy as np import numpy as np
class VisualServoing(Node): class VisualServoing:
def __init__(self, controller, target_marker_id=0, desired_distance=1.0):
def __init__(self): self.controller = controller
super().__init__('visual_servoing') self.target_marker_id = target_marker_id
self.desired_distance = desired_distance
self.bridge = CvBridge()
self.kp_xy = 0.5
self.declare_parameter('target_marker_id', 0) self.kp_z = 0.3
self.declare_parameter('desired_distance', 1.0) self.max_velocity = 1.0
self.declare_parameter('kp_linear', 0.5)
self.declare_parameter('kp_angular', 0.3)
self.declare_parameter('max_velocity', 1.0)
self.target_marker_id = self.get_parameter('target_marker_id').value
self.desired_distance = self.get_parameter('desired_distance').value
self.kp_linear = self.get_parameter('kp_linear').value
self.kp_angular = self.get_parameter('kp_angular').value
self.max_velocity = self.get_parameter('max_velocity').value
self.enabled = False self.enabled = False
self.target_pose = None self.target_acquired = False
self.image_center = (320, 240) self.image_center = (320, 240)
self.camera_matrix = np.array([ self.last_detection = None
[500.0, 0.0, 320.0],
[0.0, 500.0, 240.0], print(f"[VS] Visual Servoing initialized (target marker: {target_marker_id})")
[0.0, 0.0, 1.0]
]) def enable(self):
self.dist_coeffs = np.zeros(5) self.enabled = True
print("[VS] Enabled")
self.aruco_dict = cv2.aruco.getPredefinedDictionary(cv2.aruco.DICT_4X4_50)
self.aruco_params = cv2.aruco.DetectorParameters() def disable(self):
self.aruco_detector = cv2.aruco.ArucoDetector(self.aruco_dict, self.aruco_params) self.enabled = False
print("[VS] Disabled")
self.image_sub = self.create_subscription(
Image, '/uav/camera/forward/image_raw', self.image_callback, 10) def on_detections(self, detections):
self.enable_sub = self.create_subscription(
Bool, '/visual_servoing/enable', self.enable_callback, 10)
self.target_sub = self.create_subscription(
Pose, '/visual_servoing/target', self.target_callback, 10)
self.velocity_pub = self.create_publisher(
TwistStamped, '/uav/visual_servoing/cmd_vel', 10)
self.status_pub = self.create_publisher(
Bool, '/visual_servoing/target_acquired', 10)
self.get_logger().info('Visual Servoing Node Started')
def enable_callback(self, msg):
self.enabled = msg.data
self.get_logger().info(f'Visual servoing {"enabled" if self.enabled else "disabled"}')
def target_callback(self, msg):
self.target_pose = msg
def image_callback(self, msg):
if not self.enabled: if not self.enabled:
return return
try:
frame = self.bridge.imgmsg_to_cv2(msg, 'bgr8')
self.image_center = (frame.shape[1] // 2, frame.shape[0] // 2)
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
corners, ids, _ = self.aruco_detector.detectMarkers(gray)
target_acquired = Bool()
target_acquired.data = False
if ids is not None and self.target_marker_id in ids:
idx = np.where(ids == self.target_marker_id)[0][0]
target_corners = corners[idx]
rvecs, tvecs, _ = cv2.aruco.estimatePoseSingleMarkers(
[target_corners], 0.15, self.camera_matrix, self.dist_coeffs)
target_pos = tvecs[0][0]
self.compute_control(target_pos, target_corners)
target_acquired.data = True
self.status_pub.publish(target_acquired)
except Exception as e:
self.get_logger().error(f'Visual servoing error: {e}')
def compute_control(self, target_pos, corners):
marker_center = np.mean(corners[0], axis=0)
error_x = self.image_center[0] - marker_center[0]
error_y = self.image_center[1] - marker_center[1]
error_z = target_pos[2] - self.desired_distance
vel_msg = TwistStamped()
vel_msg.header.stamp = self.get_clock().now().to_msg()
vel_msg.header.frame_id = 'base_link'
vel_msg.twist.linear.x = np.clip(self.kp_linear * error_z, -self.max_velocity, self.max_velocity)
vel_msg.twist.linear.y = np.clip(self.kp_linear * error_x / 100.0, -self.max_velocity, self.max_velocity)
vel_msg.twist.linear.z = np.clip(self.kp_linear * error_y / 100.0, -self.max_velocity, self.max_velocity)
vel_msg.twist.angular.z = np.clip(self.kp_angular * error_x / 100.0, -1.0, 1.0)
self.velocity_pub.publish(vel_msg)
target = None
for d in detections:
if d.get("id") == self.target_marker_id:
target = d
break
def main(args=None): if target is None:
rclpy.init(args=args) self.target_acquired = False
node = VisualServoing() return
try:
rclpy.spin(node)
except KeyboardInterrupt:
pass
finally:
node.destroy_node()
rclpy.shutdown()
self.target_acquired = True
self.last_detection = target
self.compute_control(target)
if __name__ == '__main__': def compute_control(self, detection):
main() center_px = detection["center_px"]
distance = detection["distance"]
position = detection["position"]
error_x = self.image_center[0] - center_px[0]
error_y = self.image_center[1] - center_px[1]
error_z = distance - self.desired_distance
vx = np.clip(self.kp_xy * error_y / 100.0, -self.max_velocity, self.max_velocity)
vy = np.clip(self.kp_xy * error_x / 100.0, -self.max_velocity, self.max_velocity)
vz = np.clip(-self.kp_z * error_z, -self.max_velocity, self.max_velocity)
self.controller.send_velocity(vx, vy, vz)
print(
f"\r[VS] Target ID:{detection['id']} "
f"d:{distance:.2f}m "
f"err:({error_x:.0f},{error_y:.0f}) "
f"vel:({vx:.2f},{vy:.2f},{vz:.2f})",
end="",
flush=True,
)

View File

@@ -153,11 +153,12 @@
<pose degrees="true">0 0 0.195 0 0 90</pose> <pose degrees="true">0 0 0.195 0 0 90</pose>
</include> </include>
<!-- ===================== UGV: Target Vehicle ===================== --> <!-- ===================== UGV: Target Vehicle ===================== -->
<!-- The drone should search for this vehicle and land on it --> <!-- The drone should search for this vehicle and land on it -->
<model name="ugv_target"> <model name="ugv_target">
<static>true</static> <static>true</static>
<pose>10 5 0 0 0 0</pose> <pose>5.0 5.0 0 0 0 0</pose>
<!-- Main body (blue box) --> <!-- Main body (blue box) -->
<link name="base_link"> <link name="base_link">

View File

@@ -1,13 +0,0 @@
#!/bin/bash
# WSL Environment for UAV-UGV Simulation
if [ -d "/mnt/wslg" ]; then
export DISPLAY=:0
else
export DISPLAY=$(cat /etc/resolv.conf 2>/dev/null | grep nameserver | awk '{print $2}'):0
fi
export LIBGL_ALWAYS_INDIRECT=0
export MESA_GL_VERSION_OVERRIDE=3.3
export MESA_GLSL_VERSION_OVERRIDE=330
export OGRE_RTT_MODE=Copy