Upload folder using huggingface_hub
Browse files
.gitattributes
CHANGED
|
@@ -78,3 +78,4 @@ antmaze/large-play-v1/data/main_data.hdf5 filter=lfs diff=lfs merge=lfs -text
|
|
| 78 |
antmaze/medium-diverse-v1/data/main_data.hdf5 filter=lfs diff=lfs merge=lfs -text
|
| 79 |
antmaze/medium-play-v1/data/main_data.hdf5 filter=lfs diff=lfs merge=lfs -text
|
| 80 |
antmaze/umaze-diverse-v1/data/main_data.hdf5 filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 78 |
antmaze/medium-diverse-v1/data/main_data.hdf5 filter=lfs diff=lfs merge=lfs -text
|
| 79 |
antmaze/medium-play-v1/data/main_data.hdf5 filter=lfs diff=lfs merge=lfs -text
|
| 80 |
antmaze/umaze-diverse-v1/data/main_data.hdf5 filter=lfs diff=lfs merge=lfs -text
|
| 81 |
+
antmaze/umaze-v1/data/main_data.hdf5 filter=lfs diff=lfs merge=lfs -text
|
antmaze/umaze-v1/data/main_data.hdf5
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:14fc4140961bc8cafccea11ef1b99cff3ab780de124147731329485f4602ba22
|
| 3 |
+
size 605029288
|
antmaze/umaze-v1/data/metadata.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"total_episodes": 1430, "total_steps": 1000000, "data_format": "hdf5", "observation_space": "{\"type\": \"Dict\", \"subspaces\": {\"achieved_goal\": {\"type\": \"Box\", \"dtype\": \"float64\", \"shape\": [2], \"low\": [-Infinity, -Infinity], \"high\": [Infinity, Infinity]}, \"desired_goal\": {\"type\": \"Box\", \"dtype\": \"float64\", \"shape\": [2], \"low\": [-Infinity, -Infinity], \"high\": [Infinity, Infinity]}, \"observation\": {\"type\": \"Box\", \"dtype\": \"float64\", \"shape\": [27], \"low\": [-Infinity, -Infinity, -Infinity, -Infinity, -Infinity, -Infinity, -Infinity, -Infinity, -Infinity, -Infinity, -Infinity, -Infinity, -Infinity, -Infinity, -Infinity, -Infinity, -Infinity, -Infinity, -Infinity, -Infinity, -Infinity, -Infinity, -Infinity, -Infinity, -Infinity, -Infinity, -Infinity], \"high\": [Infinity, Infinity, Infinity, Infinity, Infinity, Infinity, Infinity, Infinity, Infinity, Infinity, Infinity, Infinity, Infinity, Infinity, Infinity, Infinity, Infinity, Infinity, Infinity, Infinity, Infinity, Infinity, Infinity, Infinity, Infinity, Infinity, Infinity]}}}", "action_space": "{\"type\": \"Box\", \"dtype\": \"float32\", \"shape\": [8], \"low\": [-1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0, -1.0], \"high\": [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]}", "env_spec": "{\"id\": \"AntMaze_UMaze-v4\", \"entry_point\": \"gymnasium_robotics.envs.maze.ant_maze_v4:AntMazeEnv\", \"reward_threshold\": null, \"nondeterministic\": false, \"max_episode_steps\": 700, \"order_enforce\": true, \"disable_env_checker\": false, \"kwargs\": {\"maze_map\": [[1, 1, 1, 1, 1], [1, \"g\", 0, 0, 1], [1, 1, 1, 0, 1], [1, \"r\", 0, 0, 1], [1, 1, 1, 1, 1]], \"reward_type\": \"sparse\", \"continuing_task\": true, \"reset_target\": false}, \"additional_wrappers\": [], \"vector_entry_point\": null}", "dataset_id": "D4RL/antmaze/umaze-v1", "algorithm_name": "QIteration+SAC", "author": ["Alex Davey"], "author_email": ["[email protected]"], "code_permalink": "https://github.com/rodrigodelazcano/d4rl-minari-dataset-generation", "minari_version": "0.4.3", "eval_env_spec": "{\"id\": \"AntMaze_UMaze-v4\", \"entry_point\": \"gymnasium_robotics.envs.maze.ant_maze_v4:AntMazeEnv\", \"reward_threshold\": null, \"nondeterministic\": false, \"max_episode_steps\": 700, \"order_enforce\": true, \"disable_env_checker\": false, \"kwargs\": {\"maze_map\": [[1, 1, 1, 1, 1], [1, 0, 0, \"r\", 1], [1, 0, 1, 1, 1], [1, 0, 0, \"g\", 1], [1, 1, 1, 1, 1]], \"reward_type\": \"sparse\", \"continuing_task\": true, \"reset_target\": false}, \"additional_wrappers\": [], \"vector_entry_point\": null}", "ref_max_score": 452.19000244140625, "ref_min_score": 0.0, "num_episodes_average_score": 100, "dataset_size": 605.0, "description": "The data is collected from the [`AntMaze_UMaze-v4`](https://robotics.farama.org/envs/maze/ant_maze/) environment, which contains a U shape maze. Every episode has the same fixed goal and reset locations. The success rate of all the trajectories is more than 90%, failed trajectories occur because the Ant flips and can't stand up again. Also note that when the Ant reaches the goal the episode doesn't terminate or generate a new target leading to a reward accumulation. The Ant reaches the goals by following a set of waypoints using a goal-reaching policy trained using SAC.", "requirements": ["mujoco>=3.1.1,<=3.1.6", "gymnasium-robotics>=1.2.3"]}
|