Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add logging for h5 #119

Merged
merged 14 commits into from
Dec 3, 2024
195 changes: 139 additions & 56 deletions sim/play.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,24 +9,28 @@
import copy
import logging
import os
import time
import uuid
from datetime import datetime
from typing import Any, Union

import cv2
import h5py
import numpy as np
from isaacgym import gymapi
import torch # isort: skip
from tqdm import tqdm

logger = logging.getLogger(__name__)

import krec
from sim.env import run_dir # noqa: E402
from sim.envs import task_registry # noqa: E402
from sim.model_export import ActorCfg, convert_model_to_onnx # noqa: E402
from sim.utils.helpers import get_args # noqa: E402
from sim.utils.logger import Logger # noqa: E402

import torch # isort: skip
from sim.h5_logger import HDF5Logger

logger = logging.getLogger(__name__)


def export_policy_as_jit(actor_critic: Any, path: Union[str, os.PathLike]) -> None:
Expand Down Expand Up @@ -78,49 +82,85 @@ def play(args: argparse.Namespace) -> None:
export_policy_as_jit(ppo_runner.alg.actor_critic, path)
print("Exported policy as jit script to: ", path)

# export policy as a onnx module (used to run it on web)
if args.export_onnx:
path = ppo_runner.alg.actor_critic
convert_model_to_onnx(path, ActorCfg(), save_path="policy.onnx")
print("Exported policy as onnx to: ", path)
# # export policy as a onnx module (used to run it on web)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why commented?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think its outdated / something that was changed/handled in kinfer, when I run it I get this. Maybe wesley knows more, but low priority for now imo.

  loaded_dict = torch.load(path)
Exported policy as jit script to:  .
Traceback (most recent call last):
  File "/opt/anaconda3/envs/ksl/lib/python3.8/site-packages/torch/serialization.py", line 564, in _check_seekable
    f.seek(f.tell())
  File "/opt/anaconda3/envs/ksl/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1729, in __getattr__
    raise AttributeError(f"'{type(self).__name__}' object has no attribute '{name}'")
AttributeError: 'ActorCritic' object has no attribute 'seek'

During handling of the above exception, another exception occurred:

Traceback (most recent call last):
  File "sim/play.py", line 332, in <module>
    play(args)
  File "sim/play.py", line 94, in play
    convert_model_to_onnx(path, ActorCfg(), save_path="policy.onnx")
  File "/home/kasm-user/ali_repos/sim/sim/model_export.py", line 220, in convert_model_to_onnx
    all_weights = torch.load(model_path, map_location="cpu", weights_only=True)
  File "/opt/anaconda3/envs/ksl/lib/python3.8/site-packages/torch/serialization.py", line 1065, in load
    with _open_file_like(f, 'rb') as opened_file:
  File "/opt/anaconda3/envs/ksl/lib/python3.8/site-packages/torch/serialization.py", line 473, in _open_file_like
    return _open_buffer_reader(name_or_buffer)
  File "/opt/anaconda3/envs/ksl/lib/python3.8/site-packages/torch/serialization.py", line 458, in __init__
    _check_seekable(buffer)
  File "/opt/anaconda3/envs/ksl/lib/python3.8/site-packages/torch/serialization.py", line 567, in _check_seekable
    raise_err_msg(["seek", "tell"], e)
  File "/opt/anaconda3/envs/ksl/lib/python3.8/site-packages/torch/serialization.py", line 560, in raise_err_msg
    raise type(e)(msg)
AttributeError: 'ActorCritic' object has no attribute 'seek'. You can only torch.load from a file that is seekable. Please pre-load the data into a buffer like io.BytesIO and try to load from it instead.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

lets remove it then

# if args.export_onnx:
# path = ppo_runner.alg.actor_critic
# convert_model_to_onnx(path, ActorCfg(), save_path="policy.onnx")
# print("Exported policy as onnx to: ", path)

# Prepare for logging
env_logger = Logger(env.dt)
robot_index = 0
joint_index = 1
stop_state_log = 1000
env_steps_to_run = 1000
now = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")
if args.log_h5:
h5_file = h5py.File(f"data{now}.h5", "w")

# Create dataset for actions
max_timesteps = stop_state_log
num_dof = env.num_dof
dset_actions = h5_file.create_dataset("actions", (max_timesteps, num_dof), dtype=np.float32)

# Create dataset of observations
buf_len = len(env.obs_history) # length of observation buffer
dset_2D_command = h5_file.create_dataset(
"observations/2D_command", (max_timesteps, buf_len, 2), dtype=np.float32
) # sin and cos commands
dset_3D_command = h5_file.create_dataset(
"observations/3D_command", (max_timesteps, buf_len, 3), dtype=np.float32
) # x, y, yaw commands
dset_q = h5_file.create_dataset(
"observations/q", (max_timesteps, buf_len, num_dof), dtype=np.float32
) # joint positions
dset_dq = h5_file.create_dataset(
"observations/dq", (max_timesteps, buf_len, num_dof), dtype=np.float32
) # joint velocities
dset_obs_actions = h5_file.create_dataset(
"observations/actions", (max_timesteps, buf_len, num_dof), dtype=np.float32
) # actions
dset_ang_vel = h5_file.create_dataset(
"observations/ang_vel", (max_timesteps, buf_len, 3), dtype=np.float32
) # root angular velocity
dset_euler = h5_file.create_dataset(
"observations/euler", (max_timesteps, buf_len, 3), dtype=np.float32
) # root orientation
# Create directory for HDF5 files
h5_dir = run_dir() / "h5_out" / args.task / now
h5_dir.mkdir(parents=True, exist_ok=True)

# Get observation dimensions
num_joints = env.num_dof
obs_buffer = env.obs_history[0].tolist()[0]
obs_size = len(obs_buffer)

# Index mappings for observation buffer
# This is based on stompypro_env.py
# https://github.com/kscalelabs/sim/blob/54c40d55eab15a9e784e89fb47e64a668851a41b/sim/envs/humanoids/stompypro_env.py#L225
command_2d_start = 0
command_2d_end = 2
command_3d_start = 2
command_3d_end = 5
joint_pos_start = 5
joint_pos_end = joint_pos_start + num_joints
joint_vel_start = joint_pos_end
joint_vel_end = joint_vel_start + num_joints
prev_actions_start = joint_vel_end
prev_actions_end = prev_actions_start + num_joints
ang_vel_start = prev_actions_end
ang_vel_end = ang_vel_start + 3
euler_start = ang_vel_end
euler_end = euler_start + 3

h5_logger = HDF5Logger(
data_name=args.task,
num_actions=num_joints,
max_timesteps=env_steps_to_run,
num_observations=obs_size,
h5_out_dir=str(h5_dir)
)

if args.log_krec:
# Create directory for KRec files
krec_dir = run_dir() / "krec_out" / args.task / now
krec_dir.mkdir(parents=True, exist_ok=True)

start_time_ns = time.time_ns() # Current time in nanoseconds

# Create KRec header
header = krec.KRecHeader(
uuid=str(uuid.uuid4()),
robot_platform=f"{args.task}-sim",
robot_serial="123",
task=args.task,
start_timestamp=start_time_ns,
end_timestamp=start_time_ns + int(env_steps_to_run * env_cfg.sim.dt * 1e9),
)

# Add actuator configs for each joint
for i in range(env.num_dof):
actuator_config = krec.ActuatorConfig(
i, # actuator id
kp=float(env.p_gains[0, i].cpu()), # get first row, i-th column and convert to CPU float
ki=0.0,
kd=float(env.d_gains[0, i].cpu()), # get first row, i-th column and convert to CPU float
max_torque=float(env.torque_limits[i].cpu()), # convert to CPU float
name=f"Joint{i}",
)
header.add_actuator_config(actuator_config)

# Create KRec object
krec_logger = krec.KRec(header)

if args.render:
camera_properties = gymapi.CameraProperties()
Expand Down Expand Up @@ -151,17 +191,31 @@ def play(args: argparse.Namespace) -> None:
os.mkdir(experiment_dir)
video = cv2.VideoWriter(dir, fourcc, 50.0, (1920, 1080))

for t in tqdm(range(stop_state_log)):
for t in tqdm(range(env_steps_to_run)):
actions = policy(obs.detach())
if args.log_h5:
dset_actions[t] = actions.detach().numpy()
# Extract the current observation
cur_obs = env.obs_history[0].tolist()[0]

h5_logger.log_data({
"t": np.array([t * env.dt], dtype=np.float32),
"2D_command": np.array(cur_obs[command_2d_start:command_2d_end], dtype=np.float32),
"3D_command": np.array(cur_obs[command_3d_start:command_3d_end], dtype=np.float32),
"joint_pos": np.array(cur_obs[joint_pos_start:joint_pos_end], dtype=np.float32),
"joint_vel": np.array(cur_obs[joint_vel_start:joint_vel_end], dtype=np.float32),
"prev_actions": np.array(cur_obs[prev_actions_start:prev_actions_end], dtype=np.float32),
"curr_actions": actions.detach().cpu().numpy()[0],
"ang_vel": np.array(cur_obs[ang_vel_start:ang_vel_end], dtype=np.float32),
"euler_rotation": np.array(cur_obs[euler_start:euler_end], dtype=np.float32),
"buffer": np.array(cur_obs, dtype=np.float32)
})

if args.fix_command:
env.commands[:, 0] = 0.5
env.commands[:, 1] = 0.0
env.commands[:, 2] = 0.0
env.commands[:, 3] = 0.0
obs, critic_obs, rews, dones, infos = env.step(actions.detach())
print(f"IMU: {obs[0, (3 * env.num_actions + 5) + 3 : (3 * env.num_actions + 5) + 2 * 3]}")

if args.render:
env.gym.fetch_results(env.sim, True)
Expand All @@ -187,17 +241,6 @@ def play(args: argparse.Namespace) -> None:
base_vel_yaw = env.base_ang_vel[robot_index, 2].item()
contact_forces_z = env.contact_forces[robot_index, env.feet_indices, 2].cpu().numpy()

if args.log_h5:
for i in range(buf_len):
cur_obs = env.obs_history[i].tolist()[0]
dset_2D_command[t, i] = cur_obs[0:2] # sin and cos commands
dset_3D_command[t, i] = cur_obs[2:5] # x, y, yaw commands
dset_q[t, i] = cur_obs[5 : 5 + num_dof] # joint positions
dset_dq[t, i] = cur_obs[5 + num_dof : 5 + 2 * num_dof] # joint velocities
dset_obs_actions[t, i] = cur_obs[5 + 2 * num_dof : 5 + 3 * num_dof] # actions
dset_ang_vel[t, i] = cur_obs[5 + 3 * num_dof : 8 + 3 * num_dof] # root angular velocity
dset_euler[t, i] = cur_obs[8 + 3 * num_dof : 11 + 3 * num_dof] # root orientation

env_logger.log_states(
{
"dof_pos_target": dof_pos_target,
Expand All @@ -219,15 +262,55 @@ def play(args: argparse.Namespace) -> None:
if num_episodes > 0:
env_logger.log_rewards(infos["episode"], num_episodes)

if args.log_krec:
frame = krec.KRecFrame(
video_timestamp=start_time_ns + int(t * env.dt * 1e9), # Convert simulation time to real time
frame_number=t,
inference_step=t // env_cfg.control.decimation,
)

# Add actuator states and commands for each joint
for i in range(env.num_dof):
state = krec.ActuatorState(
actuator_id=i,
online=True,
position=env.dof_pos[robot_index, i].item(),
velocity=env.dof_vel[robot_index, i].item(),
torque=env.torques[robot_index, i].item(),
)
command = krec.ActuatorCommand(
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

If you want to have 1to1 mapping the command information is missing here which is fundamental.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

this means the 2D and 3D velocity command to the robot, right? yeah I dont think krec has that so it will be a design choice how we wanna save it.

i, # actuator id
position=actions[robot_index, i].item(),
velocity=0.0, # if you have velocity commands
torque=actions[robot_index, i].item(),
)
frame.add_actuator_state(state)
frame.add_actuator_command(command)

# Add IMU data
imu_values = krec.IMUValues(
gyro=krec.Vec3(x=obs[0, 0], y=obs[0, 1], z=obs[0, 2]),
quaternion=krec.IMUQuaternion(x=obs[0, 3], y=obs[0, 4], z=obs[0, 5], w=obs[0, 6]),
)
frame.set_imu_values(imu_values)
krec_logger.add_frame(frame)

env_logger.print_rewards()
env_logger.plot_states()
# env_logger.plot_states()
budzianowski marked this conversation as resolved.
Show resolved Hide resolved

if args.render:
video.release()

if args.log_h5:
print("Saving data to " + os.path.abspath(f"data{now}.h5"))
h5_file.close()
# print(f"Saving HDF5 file to {h5_logger.h5_file_path}") # TODO use code from kdatagen
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

proper todo - remove print ?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yes proper TODO, will remove as soon as its done.

h5_logger.close()
print(f"HDF5 file saved!")

if args.log_krec:
krec_file_path = krec_dir / f"walking_{str(uuid.uuid4())[:8]}.krec"
print(f"Saving KRec file to {krec_file_path}")
krec_logger.save(str(krec_file_path))
print("KRec file saved!")


if __name__ == "__main__":
Expand Down
6 changes: 6 additions & 0 deletions sim/utils/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -232,6 +232,12 @@ def get_args() -> argparse.Namespace:
"default": False,
"help": "Enable HDF5 logging",
},
{
"name": "--log_krec",
"action": "store_true",
"default": False,
"help": "Enable KRec logging",
},
{
"name": "--trimesh",
"action": "store_true",
Expand Down
Loading