#!/usr/bin/env python import numpy as np from WheeledRobot import OmniRobotEnv # OmniRobot kinematics a1 = 0.0; b1 = 0.0; l1 = 0.5 a2 = (2/3)*np.pi; b2 = 0.0; l2 = 0.5 a3 = (4/3)*np.pi; b3 = 0.0; l3 = 0.5 r = 0.1 # Kinematics matrix J = np.array([ [1/r*np.sin(a1+b1), -1/r*np.cos(a1+b1), -l1/r*np.cos(b1)], [1/r*np.sin(a2+b2), -1/r*np.cos(a2+b2), -l2/r*np.cos(b2)], [1/r*np.sin(a3+b3), -1/r*np.cos(a3+b3), -l3/r*np.cos(b3)] ]) F = np.linalg.inv(J) def controller_omni_bangbang(t, X_I, dX_I, target_position): """Bang-bang controller with dynamic target position""" # maximum control velocity dX_I_max = np.array([[5], [5], [1]]) # Use target position from parameters instead of hardcoded values X_I_des = target_position.reshape(-1, 1) pos_err = X_I_des-X_I # part 1 (separate X,Y,theta) dX_I_des = np.array([ [dX_I_max[0,0] * np.sign(pos_err[0,0])], [dX_I_max[1,0] * np.sign(pos_err[1,0])], [dX_I_max[2,0] * np.sign(pos_err[2,0])] ]) # part 3 (combined XY control) # dir_err = pos_err[0:2,:] / np.linalg.norm(pos_err[0:2,:]) # dX_I_des = np.array([ # [dX_I_max[0,0] * dir_err[0,0]], # [dX_I_max[1,0] * dir_err[1,0]], # [dX_I_max[2,0] * np.sign(pos_err[2,0])] # ]) # part 2 (stopping condition) if np.abs(pos_err[0,0]) < 0.1: dX_I_des[0,0] = 0 if np.abs(pos_err[1,0]) < 0.1: dX_I_des[1,0] = 0 if np.abs(pos_err[2,0]) < 0.01: dX_I_des[2,0] = 0 # coordinate transform R_RI = np.array([[ np.cos(X_I[2,0]), np.sin(X_I[2,0]), 0.0], [-np.sin(X_I[2,0]), np.cos(X_I[2,0]), 0.0], [ 0.0, 0.0, 1.0]]) dX_R_des = R_RI @ dX_I_des U = J @ dX_R_des return U def run_simulation(): """Run simulation using Gymnasium environment with bang-bang control to dynamic target""" # Initialize environment with fixed target to match Sol6 behavior # You can set random_target=True for random target generation env = OmniRobotEnv(render_mode="human", random_target=False) observation, _ = env.reset() # Expand render bounds to show target position (default target is at [10,5]) env.set_render_bounds((-2, 12), (-2, 8)) print("Starting Omnidirectional Robot Bang-Bang Control Simulation") print("Controller: Bang-bang control to dynamic target position") print(f"Target position: [{observation[7]:.2f}, {observation[8]:.2f}, {observation[9]:.2f}]") for step in range(1000): # Extract controller inputs from observation # New observation format: [x, y, theta, dx, dy, dtheta, time, target_x, target_y, target_theta] time = observation[6] # Current time X_I = observation[:3].reshape(-1, 1) # State [x, y, theta] dX_I = observation[3:6].reshape(-1, 1) # Derivatives [dx, dy, dtheta] target_position = observation[7:10] # Target [target_x, target_y, target_theta] # Call bang-bang controller with dynamic target U = controller_omni_bangbang(time, X_I, dX_I, target_position) # Step environment observation, reward, terminated, truncated, _ = env.step(U.flatten()) # Render the environment env.render() # Check if target reached if terminated: print(f"Target reached at step {step}! Reward: {reward:.2f}") break elif truncated: print(f"Maximum steps reached at step {step}") break input("Press Enter to close the simulation window...") env.close() print("Simulation completed") if __name__ == "__main__": run_simulation()