lawarob-wheeled/Sol8_tank_linear.py
2025-10-28 06:37:27 +01:00

92 lines
3.1 KiB
Python
Executable File

#!/usr/bin/env python
import numpy as np
from WheeledRobot import TankRobotEnv
# TankRobot kinematics
b = 1
r = 0.1
# Wheel equations
J = np.array([
[+1/r, 0, -b/(2*r)],
[-1/r, 0, -b/(2*r)]
])
F = np.linalg.pinv(J)
def controller_tank_linear(t, X_I, dX_I, target_position):
"""Enhanced linear control with polar coordinates and dynamic target"""
# Use target position from parameters instead of hardcoded values
X_I_des = target_position.reshape(-1, 1)
pos_err = X_I_des - X_I
# Polar coordinates
rho = np.sqrt((pos_err[0,0])**2+(pos_err[1,0])**2)
alpha = -X_I[2,0] + np.arctan2((pos_err[1,0]), (pos_err[0,0]))
beta = -X_I[2,0]-alpha
# Linear control
k_rho = 0.3; k_alpha = 0.8; k_beta = -0.15
dX_R_des = np.array([[k_rho*rho], [0], [k_alpha*alpha + k_beta*beta]])
# Enhanced linear control
# dX_R_fix = 3
# dtheta_R_des = dX_R_des[2,0] * dX_R_fix/dX_R_des[0,0]
# dX_R_des = np.array([[dX_R_fix], [0], [dtheta_R_des]])
# Stopping condition
if rho < 0.1:
dX_R_des = np.array([[0], [0], [0]])
# Calculate control input
U = J @ dX_R_des
return U
def run_simulation():
"""Run simulation using Gymnasium environment with enhanced linear control to dynamic target"""
# Initialize environment with fixed target for reproducible results
# You can set random_target=True for random target generation
env = TankRobotEnv(render_mode="human", random_target=False)
observation, _ = env.reset()
# Expand render bounds to show target position (default target is at [10,5])
env.set_render_bounds((-2, 12), (-2, 8))
print("Starting Tank Robot Enhanced Linear Control Simulation")
print("Controller: Enhanced linear control with polar coordinates to dynamic target")
print(f"Target position: [{observation[7]:.2f}, {observation[8]:.2f}, {observation[9]:.2f}]")
for step in range(1000):
# Extract controller inputs from observation
# New observation format: [x, y, theta, dx, dy, dtheta, time, target_x, target_y, target_theta]
time = observation[6] # Current time
X_I = observation[:3].reshape(-1, 1) # State [x, y, theta]
dX_I = observation[3:6].reshape(-1, 1) # Derivatives [dx, dy, dtheta]
target_position = observation[7:10] # Target [target_x, target_y, target_theta]
# Call enhanced linear controller with dynamic target
U = controller_tank_linear(time, X_I, dX_I, target_position)
# Step environment
observation, reward, terminated, truncated, _ = env.step(U.flatten())
# Render the environment
env.render()
# Check if target reached
if terminated:
print(f"Target reached at step {step}! Reward: {reward:.2f}")
break
elif truncated:
print(f"Maximum steps reached at step {step}")
break
input("Press Enter to close the simulation window...")
env.close()
print("Simulation completed")
if __name__ == "__main__":
run_simulation()