younik commited on
Commit
1d388c3
·
verified ·
1 Parent(s): 6bfe272

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -80,3 +80,4 @@ antmaze/medium-play-v1/data/main_data.hdf5 filter=lfs diff=lfs merge=lfs -text
80
  antmaze/umaze-diverse-v1/data/main_data.hdf5 filter=lfs diff=lfs merge=lfs -text
81
  antmaze/umaze-v1/data/main_data.hdf5 filter=lfs diff=lfs merge=lfs -text
82
  pointmaze/large-dense-v2/data/main_data.hdf5 filter=lfs diff=lfs merge=lfs -text
 
 
80
  antmaze/umaze-diverse-v1/data/main_data.hdf5 filter=lfs diff=lfs merge=lfs -text
81
  antmaze/umaze-v1/data/main_data.hdf5 filter=lfs diff=lfs merge=lfs -text
82
  pointmaze/large-dense-v2/data/main_data.hdf5 filter=lfs diff=lfs merge=lfs -text
83
+ pointmaze/large-v2/data/main_data.hdf5 filter=lfs diff=lfs merge=lfs -text
pointmaze/large-v2/data/main_data.hdf5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ee930174761268a85d6b655421a1500181e12afe00d2649df766df7bc5ddead
3
+ size 239183968
pointmaze/large-v2/data/metadata.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"total_episodes": 3360, "total_steps": 1000000, "data_format": "hdf5", "observation_space": "{\"type\": \"Dict\", \"subspaces\": {\"achieved_goal\": {\"type\": \"Box\", \"dtype\": \"float64\", \"shape\": [2], \"low\": [-Infinity, -Infinity], \"high\": [Infinity, Infinity]}, \"desired_goal\": {\"type\": \"Box\", \"dtype\": \"float64\", \"shape\": [2], \"low\": [-Infinity, -Infinity], \"high\": [Infinity, Infinity]}, \"observation\": {\"type\": \"Box\", \"dtype\": \"float64\", \"shape\": [4], \"low\": [-Infinity, -Infinity, -Infinity, -Infinity], \"high\": [Infinity, Infinity, Infinity, Infinity]}}}", "action_space": "{\"type\": \"Box\", \"dtype\": \"float32\", \"shape\": [2], \"low\": [-1.0, -1.0], \"high\": [1.0, 1.0]}", "env_spec": "{\"id\": \"PointMaze_Large-v3\", \"entry_point\": \"gymnasium_robotics.envs.maze.point_maze:PointMazeEnv\", \"reward_threshold\": null, \"nondeterministic\": false, \"max_episode_steps\": 1000000, \"order_enforce\": true, \"disable_env_checker\": false, \"kwargs\": {\"maze_map\": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1], [1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1], [1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1], [1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1], [1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1], [1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1], [1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], \"reward_type\": \"sparse\", \"continuing_task\": true, \"reset_target\": true}, \"additional_wrappers\": [], \"vector_entry_point\": null}", "dataset_id": "D4RL/pointmaze/large-v2", "algorithm_name": "QIteration", "author": ["Rodrigo Perez-Vicente"], "author_email": ["[email protected]"], "code_permalink": "https://github.com/rodrigodelazcano/d4rl-minari-dataset-generation", "minari_version": "0.4.3", "eval_env_spec": "{\"id\": \"PointMaze_Large-v3\", \"entry_point\": \"gymnasium_robotics.envs.maze.point_maze:PointMazeEnv\", \"reward_threshold\": null, \"nondeterministic\": false, \"max_episode_steps\": 800, \"order_enforce\": true, \"disable_env_checker\": false, \"kwargs\": {\"maze_map\": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1], [1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1], [1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1], [1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1], [1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1], [1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1], [1, 0, 0, 1, 0, 0, 0, 1, 0, \"g\", 0, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]], \"reward_type\": \"sparse\", \"continuing_task\": true, \"reset_target\": false}, \"additional_wrappers\": [], \"vector_entry_point\": null}", "ref_max_score": 462.260009765625, "ref_min_score": 3.549999952316284, "num_episodes_average_score": 100, "dataset_size": 239.2, "description": "The data is collected from the [`PointMaze_Large-v3`](https://robotics.farama.org/envs/maze/point_maze/) environment. The agent uses a PD controller to follow a path of waypoints generated with QIteration until it reaches the goal. The task is continuing which means that when the agent reaches the goal the environment generates a new random goal without resetting the location of the agent. The reward function is sparse, only returning a value of 1 if the goal is reached, otherwise 0. To add variance to the collected paths random noise is added to the actions taken by the agent.", "requirements": ["gymnasium-robotics>=1.2.4"]}