77 lines
2.8 KiB
Python
Executable File
77 lines
2.8 KiB
Python
Executable File
#!/usr/bin/env python
|
|
|
|
import numpy as np
|
|
from WheeledRobot import OmniRobotEnv
|
|
|
|
# OmniRobot kinematics
|
|
a1 = 0.0; b1 = 0.0; l1 = 0.5
|
|
a2 = (2/3)*np.pi; b2 = 0.0; l2 = 0.5
|
|
a3 = (4/3)*np.pi; b3 = 0.0; l3 = 0.5
|
|
r = 0.1
|
|
|
|
# Kinematics matrix
|
|
J = np.array([
|
|
[1/r*np.sin(a1+b1), -1/r*np.cos(a1+b1), -l1/r*np.cos(b1)],
|
|
[1/r*np.sin(a2+b2), -1/r*np.cos(a2+b2), -l2/r*np.cos(b2)],
|
|
[1/r*np.sin(a3+b3), -1/r*np.cos(a3+b3), -l3/r*np.cos(b3)]
|
|
])
|
|
F = np.linalg.inv(J)
|
|
|
|
def controller_omni_bangbang(t, X_I, dX_I, target_position):
|
|
"""Bang-bang controller with dynamic target position"""
|
|
...
|
|
|
|
# coordinate transform
|
|
R_RI = np.array([[ np.cos(X_I[2,0]), np.sin(X_I[2,0]), 0.0],
|
|
[-np.sin(X_I[2,0]), np.cos(X_I[2,0]), 0.0],
|
|
[ 0.0, 0.0, 1.0]])
|
|
dX_R_des = R_RI @ dX_I_des
|
|
U = J @ dX_R_des
|
|
return U
|
|
|
|
def run_simulation():
|
|
"""Run simulation using Gymnasium environment with bang-bang control to dynamic target"""
|
|
# Initialize environment with fixed target to match Sol6 behavior
|
|
# You can set random_target=True for random target generation
|
|
env = OmniRobotEnv(render_mode="human", random_target=False)
|
|
observation, _ = env.reset()
|
|
|
|
# Expand render bounds to show target position (default target is at [10,5])
|
|
env.set_render_bounds((-2, 12), (-2, 8))
|
|
|
|
print("Starting Omnidirectional Robot Bang-Bang Control Simulation")
|
|
print("Controller: Bang-bang control to dynamic target position")
|
|
print(f"Target position: [{observation[7]:.2f}, {observation[8]:.2f}, {observation[9]:.2f}]")
|
|
|
|
for step in range(1000):
|
|
# Extract controller inputs from observation
|
|
# New observation format: [x, y, theta, dx, dy, dtheta, time, target_x, target_y, target_theta]
|
|
time = observation[6] # Current time
|
|
X_I = observation[:3].reshape(-1, 1) # State [x, y, theta]
|
|
dX_I = observation[3:6].reshape(-1, 1) # Derivatives [dx, dy, dtheta]
|
|
target_position = observation[7:10] # Target [target_x, target_y, target_theta]
|
|
|
|
# Call bang-bang controller with dynamic target
|
|
U = controller_omni_bangbang(time, X_I, dX_I, target_position)
|
|
|
|
# Step environment
|
|
observation, reward, terminated, truncated, _ = env.step(U.flatten())
|
|
|
|
# Render the environment
|
|
env.render()
|
|
|
|
# Check if target reached
|
|
if terminated:
|
|
print(f"Target reached at step {step}! Reward: {reward:.2f}")
|
|
break
|
|
elif truncated:
|
|
print(f"Maximum steps reached at step {step}")
|
|
break
|
|
|
|
input("Press Enter to close the simulation window...")
|
|
env.close()
|
|
print("Simulation completed")
|
|
|
|
if __name__ == "__main__":
|
|
run_simulation()
|