Upload folder using huggingface_hub
Browse files
README.md
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
license: mit
|
| 3 |
+
language:
|
| 4 |
+
- en
|
| 5 |
+
pipeline_tag: image-to-image
|
| 6 |
+
tags:
|
| 7 |
+
- pytorch
|
| 8 |
+
- robotics
|
| 9 |
+
library_name: diffusers
|
| 10 |
+
base_model:
|
| 11 |
+
- timbrooks/instruct-pix2pix
|
| 12 |
+
---
|
| 13 |
+
|
| 14 |
+
<h1> UniSkill: Imitating Human Videos via Cross-Embodiment Skill Representations</h1>
|
| 15 |
+
<p>
|
| 16 |
+
<a href="https://kimhanjung.github.io/UniSkill">Website</a>  |   <a href="https://arxiv.org/abs/">Paper</a>  |   <a href="https://github.com/KimHanjung/UniSkill">Github</a>  
|
| 17 |
+
<br>
|
| 18 |
+
|
| 19 |
+
- UniSkill is a universal skill representation learning approach that enables the use of large-scale video data by removing the need for labels or any form of alignment constraints.
|
| 20 |
+
|
| 21 |
+
- UniSkill shows effective human-to-robot and robot-to-robot imitation in both simulation and real-world experiments through its embodiment-agnostic skill representation.
|
| 22 |
+
|
| 23 |
+
## Model Summary
|
| 24 |
+
|
| 25 |
+
- **Developed by:** Yonsei University
|
| 26 |
+
- **Model type:** image generation (language, image => robot actions)
|
| 27 |
+
- **License:** MIT
|
| 28 |
+
- **Finetuned from:** [`InstructPix2Pix`](https://huggingface.co/timbrooks/instruct-pix2pix)
|
| 29 |
+
- **Website:** https://kimhanjung.github.io/UniSkill/
|
| 30 |
+
- **Paper:** https://arxiv.org/abs/
|
| 31 |
+
- **Code:** https://github.com/KimHanjung/UniSkill
|
| 32 |
+
|
| 33 |
+
### Uses
|
| 34 |
+
|
| 35 |
+
UniSkill is designed to learn cross-embodiment skill representations from large-scale human and robot video datasets. Its embodiment-agnostic skill representations can be used in various ways, such as training policies and generating sub-goal images.
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
## License
|
| 39 |
+
The model is licensed under the [MIT license](./LICENSE).
|
UniSkill_final_weight/.gitattributes
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
UniSkill_final_weight/idm.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:50db59970ea8c523165da669967f94d03a13618d193c1d70c0d6415cc630f8ba
|
| 3 |
+
size 80338210
|
UniSkill_final_weight/unet/config.json
ADDED
|
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_class_name": "UNet2DConditionModel",
|
| 3 |
+
"_diffusers_version": "0.30.2",
|
| 4 |
+
"_name_or_path": "timbrooks/instruct-pix2pix",
|
| 5 |
+
"act_fn": "silu",
|
| 6 |
+
"addition_embed_type": null,
|
| 7 |
+
"addition_embed_type_num_heads": 64,
|
| 8 |
+
"addition_time_embed_dim": null,
|
| 9 |
+
"attention_head_dim": 8,
|
| 10 |
+
"attention_type": "default",
|
| 11 |
+
"block_out_channels": [
|
| 12 |
+
320,
|
| 13 |
+
640,
|
| 14 |
+
1280,
|
| 15 |
+
1280
|
| 16 |
+
],
|
| 17 |
+
"center_input_sample": false,
|
| 18 |
+
"class_embed_type": null,
|
| 19 |
+
"class_embeddings_concat": false,
|
| 20 |
+
"conv_in_kernel": 3,
|
| 21 |
+
"conv_out_kernel": 3,
|
| 22 |
+
"cross_attention_dim": 768,
|
| 23 |
+
"cross_attention_norm": null,
|
| 24 |
+
"down_block_types": [
|
| 25 |
+
"CrossAttnDownBlock2D",
|
| 26 |
+
"CrossAttnDownBlock2D",
|
| 27 |
+
"CrossAttnDownBlock2D",
|
| 28 |
+
"DownBlock2D"
|
| 29 |
+
],
|
| 30 |
+
"downsample_padding": 1,
|
| 31 |
+
"dropout": 0.0,
|
| 32 |
+
"dual_cross_attention": false,
|
| 33 |
+
"encoder_hid_dim": null,
|
| 34 |
+
"encoder_hid_dim_type": null,
|
| 35 |
+
"flip_sin_to_cos": true,
|
| 36 |
+
"freq_shift": 0,
|
| 37 |
+
"in_channels": 8,
|
| 38 |
+
"layers_per_block": 2,
|
| 39 |
+
"mid_block_only_cross_attention": null,
|
| 40 |
+
"mid_block_scale_factor": 1,
|
| 41 |
+
"mid_block_type": "UNetMidBlock2DCrossAttn",
|
| 42 |
+
"norm_eps": 1e-05,
|
| 43 |
+
"norm_num_groups": 32,
|
| 44 |
+
"num_attention_heads": null,
|
| 45 |
+
"num_class_embeds": null,
|
| 46 |
+
"only_cross_attention": false,
|
| 47 |
+
"out_channels": 4,
|
| 48 |
+
"projection_class_embeddings_input_dim": null,
|
| 49 |
+
"resnet_out_scale_factor": 1.0,
|
| 50 |
+
"resnet_skip_time_act": false,
|
| 51 |
+
"resnet_time_scale_shift": "default",
|
| 52 |
+
"reverse_transformer_layers_per_block": null,
|
| 53 |
+
"sample_size": 64,
|
| 54 |
+
"time_cond_proj_dim": null,
|
| 55 |
+
"time_embedding_act_fn": null,
|
| 56 |
+
"time_embedding_dim": null,
|
| 57 |
+
"time_embedding_type": "positional",
|
| 58 |
+
"timestep_post_act": null,
|
| 59 |
+
"transformer_layers_per_block": 1,
|
| 60 |
+
"up_block_types": [
|
| 61 |
+
"UpBlock2D",
|
| 62 |
+
"CrossAttnUpBlock2D",
|
| 63 |
+
"CrossAttnUpBlock2D",
|
| 64 |
+
"CrossAttnUpBlock2D"
|
| 65 |
+
],
|
| 66 |
+
"upcast_attention": false,
|
| 67 |
+
"use_linear_projection": false
|
| 68 |
+
}
|
UniSkill_final_weight/unet/diffusion_pytorch_model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6b80622ee6c5038079df0f6d0aaadad654aec4f56bf0c691810c2dd1d76589bf
|
| 3 |
+
size 3438213624
|