diff --git a/examples/learning_ml/example_felt_lqt.py b/examples/learning_ml/example_felt_lqt.py new file mode 100644 index 000000000..a82a7588f --- /dev/null +++ b/examples/learning_ml/example_felt_lqt.py @@ -0,0 +1,73 @@ +""" +FeLT +================= + +The coder for the paper "FeLT: Fully Tactile-driven Robot Plate Cleaning Skill Learning from Human Demonstration + with Tactile Sensor" by Junjia LIU, et al. +""" +import numpy as np +import os +import rofunc as rf +import pandas as pd + + +# --- Data processing --- +def data_process(data_dir): + all_files = rf.oslab.list_absl_path(data_dir, recursive=False, prefix='trial') + + demos_x = [] + demos_taxels_pressure = [] + + for file in all_files: + hand_rigid = pd.read_csv(os.path.join(file, 'mocap_hand_rigid.csv')) + object_rigid = pd.read_csv(os.path.join(file, 'mocap_object_rigid.csv')) + hand_marker_positions = pd.read_csv(os.path.join(file, 'mocap_hand.csv')) + object_marker_positions = pd.read_csv(os.path.join(file, 'mocap_object.csv')) + sensor_data = pd.read_csv(os.path.join(file, 'sensor_comb.csv'))['taxels_pressure'].to_numpy() + + def get_center_position(df): + data = df.to_numpy().reshape((len(df.to_numpy()), -1, 3)) + return np.mean(data, axis=1) + + def get_orientation(df): + data = df.to_numpy().reshape((len(df.to_numpy()), 3, 3)) + data = np.array( + [rf.robolab.quaternion_from_homo_matrix(rf.robolab.homo_matrix_from_rot_matrix(i)) for i in data]) + return data + + hand_position = get_center_position(hand_marker_positions) # p_WH + object_position = get_center_position(object_marker_positions) # p_WO + hand_ori = get_orientation(hand_rigid) # q_H + object_ori = get_orientation(object_rigid) # q_O + + # hand_pose = np.concatenate((hand_position, hand_ori), axis=1) # p_WH + # object_pose = np.concatenate((object_position, object_ori), axis=1) # p_WO + + T_WO = np.array( + [rf.robolab.homo_matrix_from_quaternion(object_ori[i], object_position[i]) for i in range(len(object_ori))]) + + T_WO_inv = np.array([np.linalg.inv(i) for i in T_WO]) + + p_OH = np.array( + [T_WO_inv[i].dot(np.hstack((hand_position[i], np.ones(1)))) for i in range(len(T_WO_inv))])[:, :3] + + demos_x.append(np.hstack((p_OH, hand_ori, sensor_data.reshape((-1, 1)) / 1000))) + demos_taxels_pressure.append(sensor_data.reshape((-1, 1)) / 1000) + + return demos_x, demos_taxels_pressure + + +demos_x, demos_taxels_pressure = data_process('../data/felt/wipe_spiral') + +# --- TP-GMM --- +demos_x = [demo_x[:500, :7] for demo_x in demos_x] +demos_x = demos_x[0] +filter_indices = [i for i in range(0, len(demos_x) - 10, 10)] +filter_indices.append(len(demos_x) - 1) +via_points_raw = demos_x[filter_indices] + +cfg = rf.config.utils.get_config("./planning", "lqt_cp") + +controller = rf.planning_control.lqt.lqt_cp.LQTCP(via_points_raw, cfg) +u_hat, x_hat, mu, idx_slices = controller.solve() +rf.lqt.plot_3d_uni([x_hat], mu, idx_slices) diff --git a/examples/planning_control/example_lqt_cp.py b/examples/planning_control/example_lqt_cp.py index ad245b368..66ef23595 100644 --- a/examples/planning_control/example_lqt_cp.py +++ b/examples/planning_control/example_lqt_cp.py @@ -13,5 +13,9 @@ filter_indices = [i for i in range(0, len(via_points_raw) - 10, 5)] filter_indices.append(len(via_points_raw) - 1) via_points_raw = via_points_raw[filter_indices] -u_hat, x_hat, mu, idx_slices = rf.lqt.uni_cp(via_points_raw) + +cfg = rf.config.utils.get_config("./planning", "lqt_cp") + +controller = rf.planning_control.lqt.lqt_cp.LQTCP(via_points_raw, cfg) +u_hat, x_hat, mu, idx_slices = controller.solve() rf.lqt.plot_3d_uni([x_hat], mu, idx_slices) diff --git a/examples/planning_control/example_lqt_cp_dmp.py b/examples/planning_control/example_lqt_cp_dmp.py index a6d51c2c7..4c3f28868 100644 --- a/examples/planning_control/example_lqt_cp_dmp.py +++ b/examples/planning_control/example_lqt_cp_dmp.py @@ -4,7 +4,7 @@ This example shows how to use the LQT controller with control primitives and DMP to track a trajectory. """ -import os + import numpy as np import rofunc as rf from scipy.interpolate import interp1d @@ -19,7 +19,9 @@ MuAcc = np.gradient(MuVel)[1] / cfg.dt # Position, velocity and acceleration profiles as references via_points = np.vstack((MuPos, MuVel, MuAcc)) -# - cfg.nbData = len(via_points[0]) -rf.lqt.uni_cp_dmp(via_points, cfg) + +controller = rf.planning_control.lqt.lqt_cp_dmp.LQTCPDMP(via_points, cfg) +# u_hat, x_hat, mu, idx_slices = controller.solve() +# rf.lqt.plot_3d_uni([x_hat], mu, idx_slices) +# rf.lqt.uni_cp_dmp(via_points, cfg) diff --git a/rofunc/config/planning/lqt.yaml b/rofunc/config/planning/lqt.yaml index 5ecf7929a..ac6908146 100644 --- a/rofunc/config/planning/lqt.yaml +++ b/rofunc/config/planning/lqt.yaml @@ -5,9 +5,4 @@ nbPoints: 0 # Number of via-points to track nbVar: ${multi:${nbVarPos},${nbDeriv}} # Dimension of state vector nbVarX: ${add:${nbVar}, 1} # Augmented state space dt: 1E-2 # Time step duration -rfactor: 1E-6 # control cost in LQR - -# For control primitive version -nbFct: 9 # Number of basis function -basisName: "RBF" # can be PIECEWEISE, RBF, BERNSTEIN, FOURIER -nbVarU: 7 # Control space dimension (dx1,dx2,dx3) \ No newline at end of file +rfactor: 1E-6 # control cost in LQR \ No newline at end of file diff --git a/rofunc/config/planning/lqt_cp.yaml b/rofunc/config/planning/lqt_cp.yaml new file mode 100644 index 000000000..37b86b78c --- /dev/null +++ b/rofunc/config/planning/lqt_cp.yaml @@ -0,0 +1,11 @@ +nbData: 1000 # Number of datapoints +nbVarU: 7 # Control space dimension (dx1,dx2,dx3) +nbVarPos: 7 # Control space dimension (dx1,dx2,dx3) +nbDeriv: 2 # Number of static and dynamic features (nbDeriv=2 for [x,dx] and u=ddx) +nbVar: ${multi:${nbVarPos},${nbDeriv}} # Dimension of state vector +nbVarX: ${add:${nbVar}, 1} # Augmented state space +dt: 1E-2 # Time step duration +rfactor: 5E-8 # control cost in LQR +nbFct: 20 # Number of basis function +basisName: "FOURIER" # can be PIECEWEISE, RBF, BERNSTEIN, FOURIER +nbPoints: 0 # Number of via-points to track