Compare commits

...

2 Commits

Author SHA1 Message Date
Eric Seuret
5eef695e83 Ex8..9 2025-10-07 05:53:37 +02:00
Eric Seuret
b37a0ec70b Sol5..6 2025-10-07 05:53:18 +02:00
4 changed files with 348 additions and 0 deletions

71
Ex8_tank_linear.py Executable file
View File

@ -0,0 +1,71 @@
#!/usr/bin/env python
import numpy as np
from WheeledRobot import TankRobotEnv
# TankRobot kinematics
b = 1
r = 0.1
# Wheel equations
J = np.array([
[+1/r, 0, -b/(2*r)],
[-1/r, 0, -b/(2*r)]
])
F = np.linalg.pinv(J)
def controller_tank_linear(t, X_I, dX_I, target_position):
"""Enhanced linear control with polar coordinates and dynamic target"""
...
# Calculate control input
U = J @ dX_R_des
return U
def run_simulation():
"""Run simulation using Gymnasium environment with enhanced linear control to dynamic target"""
# Initialize environment with fixed target for reproducible results
# You can set random_target=True for random target generation
env = TankRobotEnv(render_mode="human", random_target=False)
observation, _ = env.reset()
# Expand render bounds to show target position (default target is at [10,5])
env.set_render_bounds((-2, 12), (-2, 8))
print("Starting Tank Robot Enhanced Linear Control Simulation")
print("Controller: Enhanced linear control with polar coordinates to dynamic target")
print(f"Target position: [{observation[7]:.2f}, {observation[8]:.2f}, {observation[9]:.2f}]")
for step in range(1000):
# Extract controller inputs from observation
# New observation format: [x, y, theta, dx, dy, dtheta, time, target_x, target_y, target_theta]
time = observation[6] # Current time
X_I = observation[:3].reshape(-1, 1) # State [x, y, theta]
dX_I = observation[3:6].reshape(-1, 1) # Derivatives [dx, dy, dtheta]
target_position = observation[7:10] # Target [target_x, target_y, target_theta]
# Call enhanced linear controller with dynamic target
U = controller_tank_linear(time, X_I, dX_I, target_position)
# Step environment
observation, reward, terminated, truncated, _ = env.step(U.flatten())
# Render the environment
env.render()
# Check if target reached
if terminated:
print(f"Target reached at step {step}! Reward: {reward:.2f}")
break
elif truncated:
print(f"Maximum steps reached at step {step}")
break
input("Press Enter to close the simulation window...")
env.close()
print("Simulation completed")
if __name__ == "__main__":
run_simulation()

76
Ex9_tank_l1.py Executable file
View File

@ -0,0 +1,76 @@
#!/usr/bin/env python
import numpy as np
from WheeledRobot import TankRobotEnv
# TankRobot kinematics
b = 1
r = 0.1
# Wheel equations
J = np.array([
[+1/r, 0, -b/(2*r)],
[-1/r, 0, -b/(2*r)]
])
F = np.linalg.pinv(J)
def controller_tank_l1(t, X_I, dX_I, target_position):
"""L1 adaptive control with path planning and dynamic target"""
# Controller parameters
dx_R_max = 3
L0 = 4
L1 = 4
...
# Calculate control input
U = J @ dX_R_des
return U
def run_simulation():
"""Run simulation using Gymnasium environment with L1 adaptive control to dynamic target"""
# Initialize environment with fixed target for reproducible results
# You can set random_target=True for random target generation
env = TankRobotEnv(render_mode="human", random_target=False)
observation, _ = env.reset()
# Expand render bounds to show target position (default target is at [10,5])
env.set_render_bounds((-2, 12), (-2, 8))
print("Starting Tank Robot L1 Adaptive Control Simulation")
print("Controller: L1 adaptive control with path planning to dynamic target")
print(f"Target position: [{observation[7]:.2f}, {observation[8]:.2f}, {observation[9]:.2f}]")
for step in range(1000):
# Extract controller inputs from observation
# New observation format: [x, y, theta, dx, dy, dtheta, time, target_x, target_y, target_theta]
time = observation[6] # Current time
X_I = observation[:3].reshape(-1, 1) # State [x, y, theta]
dX_I = observation[3:6].reshape(-1, 1) # Derivatives [dx, dy, dtheta]
target_position = observation[7:10] # Target [target_x, target_y, target_theta]
# Call L1 adaptive controller with dynamic target
U = controller_tank_l1(time, X_I, dX_I, target_position)
# Step environment
observation, reward, terminated, truncated, _ = env.step(U.flatten())
# Render the environment
env.render()
# Check if target reached
if terminated:
print(f"Target reached at step {step}! Reward: {reward:.2f}")
break
elif truncated:
print(f"Maximum steps reached at step {step}")
break
input("Press Enter to close the simulation window...")
env.close()
print("Simulation completed")
if __name__ == "__main__":
run_simulation()

102
Sol5_omni_bangbang.py Executable file
View File

@ -0,0 +1,102 @@
#!/usr/bin/env python
import numpy as np
from WheeledRobot import OmniRobotEnv
# OmniRobot kinematics
a1 = 0.0; b1 = 0.0; l1 = 0.5
a2 = (2/3)*np.pi; b2 = 0.0; l2 = 0.5
a3 = (4/3)*np.pi; b3 = 0.0; l3 = 0.5
r = 0.1
# Kinematics matrix
J = np.array([
[1/r*np.sin(a1+b1), -1/r*np.cos(a1+b1), -l1/r*np.cos(b1)],
[1/r*np.sin(a2+b2), -1/r*np.cos(a2+b2), -l2/r*np.cos(b2)],
[1/r*np.sin(a3+b3), -1/r*np.cos(a3+b3), -l3/r*np.cos(b3)]
])
F = np.linalg.inv(J)
def controller_omni_bangbang(t, X_I, dX_I, target_position):
"""Bang-bang controller with dynamic target position"""
# maximum control velocity
dX_I_max = np.array([[5], [5], [1]])
# Use target position from parameters instead of hardcoded values
X_I_des = target_position.reshape(-1, 1)
pos_err = X_I_des-X_I
# part 1 (separate X,Y,theta)
dX_I_des = np.array([
[dX_I_max[0,0] * np.sign(pos_err[0,0])],
[dX_I_max[1,0] * np.sign(pos_err[1,0])],
[dX_I_max[2,0] * np.sign(pos_err[2,0])]
])
# part 3 (combined XY control)
# dir_err = pos_err[0:2,:] / np.linalg.norm(pos_err[0:2,:])
# dX_I_des = np.array([
# [dX_I_max[0,0] * dir_err[0,0]],
# [dX_I_max[1,0] * dir_err[1,0]],
# [dX_I_max[2,0] * np.sign(pos_err[2,0])]
# ])
# part 2 (stopping condition)
if np.abs(pos_err[0,0]) < 0.1: dX_I_des[0,0] = 0
if np.abs(pos_err[1,0]) < 0.1: dX_I_des[1,0] = 0
if np.abs(pos_err[2,0]) < 0.01: dX_I_des[2,0] = 0
# coordinate transform
R_RI = np.array([[ np.cos(X_I[2,0]), np.sin(X_I[2,0]), 0.0],
[-np.sin(X_I[2,0]), np.cos(X_I[2,0]), 0.0],
[ 0.0, 0.0, 1.0]])
dX_R_des = R_RI @ dX_I_des
U = J @ dX_R_des
return U
def run_simulation():
"""Run simulation using Gymnasium environment with bang-bang control to dynamic target"""
# Initialize environment with fixed target to match Sol6 behavior
# You can set random_target=True for random target generation
env = OmniRobotEnv(render_mode="human", random_target=False)
observation, _ = env.reset()
# Expand render bounds to show target position (default target is at [10,5])
env.set_render_bounds((-2, 12), (-2, 8))
print("Starting Omnidirectional Robot Bang-Bang Control Simulation")
print("Controller: Bang-bang control to dynamic target position")
print(f"Target position: [{observation[7]:.2f}, {observation[8]:.2f}, {observation[9]:.2f}]")
for step in range(1000):
# Extract controller inputs from observation
# New observation format: [x, y, theta, dx, dy, dtheta, time, target_x, target_y, target_theta]
time = observation[6] # Current time
X_I = observation[:3].reshape(-1, 1) # State [x, y, theta]
dX_I = observation[3:6].reshape(-1, 1) # Derivatives [dx, dy, dtheta]
target_position = observation[7:10] # Target [target_x, target_y, target_theta]
# Call bang-bang controller with dynamic target
U = controller_omni_bangbang(time, X_I, dX_I, target_position)
# Step environment
observation, reward, terminated, truncated, _ = env.step(U.flatten())
# Render the environment
env.render()
# Check if target reached
if terminated:
print(f"Target reached at step {step}! Reward: {reward:.2f}")
break
elif truncated:
print(f"Maximum steps reached at step {step}")
break
input("Press Enter to close the simulation window...")
env.close()
print("Simulation completed")
if __name__ == "__main__":
run_simulation()

99
Sol6_omni_pid.py Executable file
View File

@ -0,0 +1,99 @@
#!/usr/bin/env python
import numpy as np
from WheeledRobot import OmniRobotEnv
# OmniRobot kinematics
a1 = 0.0; b1 = 0.0; l1 = 0.5
a2 = (2/3)*np.pi; b2 = 0.0; l2 = 0.5
a3 = (4/3)*np.pi; b3 = 0.0; l3 = 0.5
r = 0.1
# Kinematics matrix
J = np.array([
[1/r*np.sin(a1+b1), -1/r*np.cos(a1+b1), -l1/r*np.cos(b1)],
[1/r*np.sin(a2+b2), -1/r*np.cos(a2+b2), -l2/r*np.cos(b2)],
[1/r*np.sin(a3+b3), -1/r*np.cos(a3+b3), -l3/r*np.cos(b3)]
])
F = np.linalg.inv(J)
def controller_omni_pid(t, X_I, dX_I, target_position):
"""PID Controller with dynamic target position"""
global int_err
# PID parameters
Kp_t = 2; Kp_r = 2
Kd_t = 0.5; Kd_r = 0.5
Ki_t = 0.1; Ki_r = 0.1
# Use target position from parameters instead of hardcoded values
X_I_des = target_position.reshape(-1, 1)
pos_err = X_I_des - X_I
vel_err = np.zeros((3, 1)) - dX_I
int_err = int_err + pos_err
# PID control output
dX_I_des = np.array([
[Kp_t*pos_err[0,0] + Kd_t*vel_err[0,0] + Ki_t*int_err[0,0]],
[Kp_t*pos_err[1,0] + Kd_t*vel_err[1,0] + Ki_t*int_err[1,0]],
[Kp_r*pos_err[2,0] + Kd_r*vel_err[2,0] + Ki_r*int_err[2,0]]
])
# Coordinate transform
R_RI = np.array([[ np.cos(X_I[2,0]), np.sin(X_I[2,0]), 0.0],
[-np.sin(X_I[2,0]), np.cos(X_I[2,0]), 0.0],
[ 0.0, 0.0, 1.0]])
dX_R_des = R_RI @ dX_I_des
U = J @ dX_R_des
return U
def run_simulation():
"""Run simulation using Gymnasium environment with PID control to dynamic target"""
global int_err
# Initialize environment with fixed target for reproducible results
# You can set random_target=True for random target generation
env = OmniRobotEnv(render_mode="human", random_target=False, speed_limit_enabled=True)
observation, _ = env.reset()
# Expand render bounds to show target position (default target is at [10,5])
env.set_render_bounds((-2, 12), (-2, 8))
# Initialize integral error
int_err = np.zeros((3, 1))
print("Starting Omnidirectional Robot PID Control Simulation")
print("Controller: PID control to dynamic target position")
print(f"Target position: [{observation[7]:.2f}, {observation[8]:.2f}, {observation[9]:.2f}]")
for step in range(1000):
# Extract controller inputs from observation
# New observation format: [x, y, theta, dx, dy, dtheta, time, target_x, target_y, target_theta]
time = observation[6] # Current time
X_I = observation[:3].reshape(-1, 1) # State [x, y, theta]
dX_I = observation[3:6].reshape(-1, 1) # Derivatives [dx, dy, dtheta]
target_position = observation[7:10] # Target [target_x, target_y, target_theta]
# Call PID controller with dynamic target
U = controller_omni_pid(time, X_I, dX_I, target_position)
# Step environment
observation, reward, terminated, truncated, _ = env.step(U.flatten())
# Render the environment
env.render()
# Check if target reached
if terminated:
print(f"Target reached at step {step}! Reward: {reward:.2f}")
break
elif truncated:
print(f"Maximum steps reached at step {step}")
break
input("Press Enter to close the simulation window...")
env.close()
print("Simulation completed")
if __name__ == "__main__":
run_simulation()