#!/usr/bin/env python import numpy as np from WheeledRobot import OmniRobotEnv # OmniRobot kinematics a1 = 0.0; b1 = 0.0; l1 = 0.5 a2 = (2/3)*np.pi; b2 = 0.0; l2 = 0.5 a3 = (4/3)*np.pi; b3 = 0.0; l3 = 0.5 r = 0.1 # Kinematics matrix J = np.array([ [1/r*np.sin(a1+b1), -1/r*np.cos(a1+b1), -l1/r*np.cos(b1)], [1/r*np.sin(a2+b2), -1/r*np.cos(a2+b2), -l2/r*np.cos(b2)], [1/r*np.sin(a3+b3), -1/r*np.cos(a3+b3), -l3/r*np.cos(b3)] ]) F = np.linalg.inv(J) def controller_omni_pid(t, X_I, dX_I, target_position): """PID Controller with dynamic target position""" global int_err # PID parameters Kp_t = 2; Kp_r = 2 Kd_t = 0.5; Kd_r = 0.5 Ki_t = 0.1; Ki_r = 0.1 # Use target position from parameters instead of hardcoded values X_I_des = target_position.reshape(-1, 1) pos_err = X_I_des - X_I vel_err = np.zeros((3, 1)) - dX_I int_err = int_err + pos_err # PID control output dX_I_des = np.array([ [Kp_t*pos_err[0,0] + Kd_t*vel_err[0,0] + Ki_t*int_err[0,0]], [Kp_t*pos_err[1,0] + Kd_t*vel_err[1,0] + Ki_t*int_err[1,0]], [Kp_r*pos_err[2,0] + Kd_r*vel_err[2,0] + Ki_r*int_err[2,0]] ]) # Coordinate transform R_RI = np.array([[ np.cos(X_I[2,0]), np.sin(X_I[2,0]), 0.0], [-np.sin(X_I[2,0]), np.cos(X_I[2,0]), 0.0], [ 0.0, 0.0, 1.0]]) dX_R_des = R_RI @ dX_I_des U = J @ dX_R_des return U def run_simulation(): """Run simulation using Gymnasium environment with PID control to dynamic target""" global int_err # Initialize environment with fixed target for reproducible results # You can set random_target=True for random target generation env = OmniRobotEnv(render_mode="human", random_target=False, speed_limit_enabled=True) observation, _ = env.reset() # Expand render bounds to show target position (default target is at [10,5]) env.set_render_bounds((-2, 12), (-2, 8)) # Initialize integral error int_err = np.zeros((3, 1)) print("Starting Omnidirectional Robot PID Control Simulation") print("Controller: PID control to dynamic target position") print(f"Target position: [{observation[7]:.2f}, {observation[8]:.2f}, {observation[9]:.2f}]") for step in range(1000): # Extract controller inputs from observation # New observation format: [x, y, theta, dx, dy, dtheta, time, target_x, target_y, target_theta] time = observation[6] # Current time X_I = observation[:3].reshape(-1, 1) # State [x, y, theta] dX_I = observation[3:6].reshape(-1, 1) # Derivatives [dx, dy, dtheta] target_position = observation[7:10] # Target [target_x, target_y, target_theta] # Call PID controller with dynamic target U = controller_omni_pid(time, X_I, dX_I, target_position) # Step environment observation, reward, terminated, truncated, _ = env.step(U.flatten()) # Render the environment env.render() # Check if target reached if terminated: print(f"Target reached at step {step}! Reward: {reward:.2f}") break elif truncated: print(f"Maximum steps reached at step {step}") break input("Press Enter to close the simulation window...") env.close() print("Simulation completed") if __name__ == "__main__": run_simulation()