|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""
|
|
|
Worker file for the Multi-Object Video (MOVi) C (and CC) datasets.
|
|
|
* The number of objects is randomly chosen between
|
|
|
--min_num_objects (3) and --max_num_objects (10)
|
|
|
* The objects are randomly chosen from the Google Scanned Objects dataset
|
|
|
|
|
|
* Background is an random HDRI from the HDRI Haven dataset,
|
|
|
projected onto a Dome (half-sphere).
|
|
|
The HDRI is also used for lighting the scene.
|
|
|
"""
|
|
|
|
|
|
import logging
|
|
|
|
|
|
import bpy
|
|
|
import copy
|
|
|
import os
|
|
|
import kubric as kb
|
|
|
from kubric.simulator import PyBullet
|
|
|
from kubric.renderer import Blender
|
|
|
import numpy as np
|
|
|
import random
|
|
|
import shutil
|
|
|
|
|
|
from GSO_transfer import GSO_dict
|
|
|
from utils import save_scene_instruction, dataset_dir
|
|
|
|
|
|
|
|
|
DATASET_TYPE = "closer"
|
|
|
|
|
|
SPAWN_REGION = [(-8, -8, 0), (8, 8, 5)]
|
|
|
SPAWN_REGION_OBJ = [[-6, -6, 0.5], [6, 6, 0.5]]
|
|
|
VELOCITY_RANGE = [(-4., -4., 0.), (4., 4., 0.)]
|
|
|
|
|
|
|
|
|
parser = kb.ArgumentParser()
|
|
|
parser.add_argument("--objects_split", choices=["train", "test"],
|
|
|
default="train")
|
|
|
|
|
|
parser.add_argument("--min_num_objects", type=int, default=1,
|
|
|
help="minimum number of objects")
|
|
|
parser.add_argument("--max_num_objects", type=int, default=5,
|
|
|
help="maximum number of objects")
|
|
|
|
|
|
parser.add_argument("--floor_friction", type=float, default=0.3)
|
|
|
parser.add_argument("--floor_restitution", type=float, default=0.5)
|
|
|
parser.add_argument("--backgrounds_split", choices=["train", "test"],
|
|
|
default="train")
|
|
|
|
|
|
parser.add_argument("--camera", choices=["fixed_random", "linear_movement"],
|
|
|
default="fixed_random")
|
|
|
parser.add_argument("--max_camera_movement", type=float, default=4.0)
|
|
|
parser.add_argument("--smallest_scale", type=float, default=2.)
|
|
|
parser.add_argument("--largest_scale", type=float, default=4.)
|
|
|
|
|
|
|
|
|
parser.add_argument("--kubasic_assets", type=str,
|
|
|
default="gs://kubric-public/assets/KuBasic/KuBasic.json")
|
|
|
parser.add_argument("--hdri_assets", type=str,
|
|
|
default="gs://kubric-public/assets/HDRI_haven/HDRI_haven.json")
|
|
|
parser.add_argument("--gso_assets", type=str,
|
|
|
default="gs://kubric-public/assets/GSO/GSO.json")
|
|
|
parser.add_argument("--save_state", dest="save_state", action="store_true")
|
|
|
parser.set_defaults(save_state=False, frame_end=24, frame_rate=12,
|
|
|
resolution=512)
|
|
|
parser.add_argument("--sub_outputdir", type=str, default="test sub output dir")
|
|
|
parser.add_argument("--generate_idx", type=int, default=-1, help="generation idx")
|
|
|
FLAGS = parser.parse_args()
|
|
|
|
|
|
|
|
|
import pyquaternion as pyquat
|
|
|
def default_rng():
|
|
|
return np.random.RandomState()
|
|
|
|
|
|
def random_rotation(axis=None, rng=default_rng()):
|
|
|
""" Compute a random rotation as a quaternion.
|
|
|
If axis is None the rotation is sampled uniformly over all possible orientations.
|
|
|
Otherwise it corresponds to a random rotation around the given axis."""
|
|
|
|
|
|
if axis is None:
|
|
|
|
|
|
|
|
|
r1, r2, r3 = rng.random(3)
|
|
|
|
|
|
q1 = np.sqrt(1.0 - r1) * (np.sin(2 * np.pi * r2))
|
|
|
q2 = np.sqrt(1.0 - r1) * (np.cos(2 * np.pi * r2))
|
|
|
q3 = np.sqrt(r1) * (np.sin(2 * np.pi * r3))
|
|
|
q4 = np.sqrt(r1) * (np.cos(2 * np.pi * r3))
|
|
|
|
|
|
return q1, q2, q3, q4
|
|
|
|
|
|
else:
|
|
|
if isinstance(axis, str) and axis.upper() in ["X", "Y", "Z"]:
|
|
|
axis = {"X": (1., 0., 0.),
|
|
|
"Y": (0., 1., 0.),
|
|
|
"Z": (0., 0., 1.)}[axis.upper()]
|
|
|
|
|
|
|
|
|
quat = pyquat.Quaternion(axis=axis, angle=rng.uniform(-0.5*np.pi, 0.5*np.pi))
|
|
|
return tuple(quat)
|
|
|
|
|
|
|
|
|
from kubric.core import objects
|
|
|
def rotation_sampler(axis=None):
|
|
|
def _sampler(obj: objects.PhysicalObject, rng):
|
|
|
obj.quaternion = random_rotation(axis=axis, rng=rng)
|
|
|
return _sampler
|
|
|
|
|
|
|
|
|
def move_until_no_overlap(asset, simulator, spawn_region=((-1, -1, -1), (1, 1, 1)), max_trials=100,
|
|
|
rng=default_rng()):
|
|
|
return kb.randomness.resample_while(asset,
|
|
|
|
|
|
samplers=[kb.randomness.position_sampler(spawn_region)],
|
|
|
condition=simulator.check_overlap,
|
|
|
max_trials=max_trials,
|
|
|
rng=rng)
|
|
|
|
|
|
|
|
|
def check_ok(obj, pos, region):
|
|
|
|
|
|
x, y, z = pos
|
|
|
if pos[0]<region[0][0] or pos[0]>region[1][0] or pos[1]<region[0][1] or pos[1]>region[1][1]:
|
|
|
return False
|
|
|
|
|
|
if simulator.check_overlap(obj):
|
|
|
return False
|
|
|
|
|
|
return True
|
|
|
|
|
|
|
|
|
def get_obj_x_left(bound, scale):
|
|
|
return -bound[0][0] * scale[0]
|
|
|
|
|
|
def get_obj_x_right(bound, scale):
|
|
|
return bound[1][0] * scale[0]
|
|
|
|
|
|
def get_obj_y_front(bound, scale):
|
|
|
return -bound[0][1] * scale[1]
|
|
|
|
|
|
def get_obj_y_behind(bound, scale):
|
|
|
return bound[1][1] * scale[1]
|
|
|
|
|
|
def get_obj_z(bound, scale):
|
|
|
return bound[0][2] * scale[2]
|
|
|
|
|
|
def get_obj_z_up(bound, scale):
|
|
|
return bound[1][2] * scale[2]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def get_new_pos(bounds, scale, ref_location, ref_pos, ref_z_up, ref_object, rng):
|
|
|
|
|
|
obj_z = -get_obj_z(bounds, scale) + 0.2
|
|
|
|
|
|
|
|
|
spawn_x_min, spawn_y_min, _ = SPAWN_REGION_OBJ[0]
|
|
|
spawn_x_max, spawn_y_max, _ = SPAWN_REGION_OBJ[1]
|
|
|
|
|
|
|
|
|
max_offset_x = spawn_x_max - spawn_x_min + 1
|
|
|
max_offset_y = spawn_y_max - spawn_y_min + 1
|
|
|
|
|
|
|
|
|
import random
|
|
|
|
|
|
|
|
|
return locations.get(ref_location, [ref_pos[0], ref_pos[1], obj_z])
|
|
|
|
|
|
def add_new_obj(scene, new_obj, ref_location, ref_object, rng, max_trails=50):
|
|
|
|
|
|
ref_obj_pos = ref_object.position
|
|
|
|
|
|
ref_obj_z_up = get_obj_z_up(ref_object.bounds, ref_object.scale)
|
|
|
new_obj_pos = get_new_pos(new_obj.bounds, new_obj.scale, ref_location, ref_obj_pos, ref_obj_z_up, ref_object, rng)
|
|
|
new_obj.position = new_obj_pos
|
|
|
scene += new_obj
|
|
|
|
|
|
|
|
|
trails = 0
|
|
|
while not check_ok(new_obj, new_obj.position, SPAWN_REGION_OBJ):
|
|
|
trails += 1
|
|
|
|
|
|
new_obj.position = get_new_pos(new_obj.bounds, new_obj.scale, ref_location, ref_obj_pos, ref_obj_z_up, ref_object, rng)
|
|
|
|
|
|
if trails > max_trails:
|
|
|
print('cannot put the object, break')
|
|
|
|
|
|
return None
|
|
|
print('try {} times'.format(trails))
|
|
|
return scene
|
|
|
|
|
|
def gen_caption(obj_name, obj_scale, ref_obj_name, ref_obj_scale, type='closer'):
|
|
|
if type == 'closer':
|
|
|
captions = [f'Move the {obj_name} and the {ref_obj_name} closer together.', f'Move the {obj_name} and the {ref_obj_name} closer.', f'Move the {obj_name} and the {ref_obj_name} closer to each other.', f'Move both objects closer', f'Move the two objects closer together.', f'Move the two objects closer to each other.']
|
|
|
elif type == 'further':
|
|
|
captions = [f'Move the {obj_name} further away from the {ref_obj_name}.', f'Move the {obj_name} and the {ref_obj_name} further apart.', f'Move the {obj_name} and the {ref_obj_name} further away from each other.', f'Move both objects further apart', f'Move the two objects further away from each other.', f'Move the two objects further apart.']
|
|
|
elif type == 'swap':
|
|
|
captions = [f'Swap the positions of the {obj_name} and the {ref_obj_name}.', f'Swap the positions of the {ref_obj_name} and the {obj_name}.', f'Swap the positions of the two objects.', f'Swap positions of both items.']
|
|
|
return random.choice(captions)
|
|
|
|
|
|
|
|
|
|
|
|
print('Generate {} Sample'.format(FLAGS.generate_idx))
|
|
|
scene, rng, output_dir, scratch_dir = kb.setup(FLAGS)
|
|
|
output_dir = output_dir / FLAGS.sub_outputdir
|
|
|
|
|
|
|
|
|
|
|
|
simulator = PyBullet(scene, scratch_dir)
|
|
|
renderer = Blender(scene, scratch_dir, samples_per_pixel=64)
|
|
|
kubasic = kb.AssetSource.from_manifest(FLAGS.kubasic_assets)
|
|
|
gso = kb.AssetSource.from_manifest(FLAGS.gso_assets)
|
|
|
hdri_source = kb.AssetSource.from_manifest(FLAGS.hdri_assets)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
train_backgrounds, test_backgrounds = hdri_source.get_test_split(fraction=0.)
|
|
|
logging.info("Choosing one of the %d training backgrounds...", len(train_backgrounds))
|
|
|
hdri_id = rng.choice(train_backgrounds)
|
|
|
|
|
|
background_hdri = hdri_source.create(asset_id=hdri_id)
|
|
|
|
|
|
logging.info("Using background %s", hdri_id)
|
|
|
scene.metadata["background"] = hdri_id
|
|
|
renderer._set_ambient_light_hdri(background_hdri.filename)
|
|
|
|
|
|
|
|
|
|
|
|
dome = kubasic.create(asset_id="dome", name="dome",
|
|
|
friction=FLAGS.floor_friction,
|
|
|
restitution=FLAGS.floor_restitution,
|
|
|
static=True, background=True)
|
|
|
assert isinstance(dome, kb.FileBasedObject)
|
|
|
scene += dome
|
|
|
dome_blender = dome.linked_objects[renderer]
|
|
|
texture_node = dome_blender.data.materials[0].node_tree.nodes["Image Texture"]
|
|
|
texture_node.image = bpy.data.images.load(background_hdri.filename)
|
|
|
|
|
|
|
|
|
|
|
|
def get_linear_camera_motion_start_end(
|
|
|
movement_speed: float,
|
|
|
inner_radius: float = 8.,
|
|
|
outer_radius: float = 12.,
|
|
|
z_offset: float = 0.1,
|
|
|
):
|
|
|
"""Sample a linear path which starts and ends within a half-sphere shell."""
|
|
|
while True:
|
|
|
camera_start = np.array(kb.sample_point_in_half_sphere_shell(inner_radius,
|
|
|
outer_radius,
|
|
|
z_offset))
|
|
|
direction = rng.rand(3) - 0.5
|
|
|
movement = direction / np.linalg.norm(direction) * movement_speed
|
|
|
camera_end = camera_start + movement
|
|
|
if (inner_radius <= np.linalg.norm(camera_end) <= outer_radius and
|
|
|
camera_end[2] > z_offset):
|
|
|
return camera_start, camera_end
|
|
|
|
|
|
|
|
|
|
|
|
logging.info("Setting up the Camera...")
|
|
|
scene.camera = kb.PerspectiveCamera(focal_length=35., sensor_width=36)
|
|
|
if FLAGS.camera == "fixed_random":
|
|
|
|
|
|
|
|
|
scene.camera.position = (0, -10, 15)
|
|
|
scene.camera.look_at((0, 0, 0))
|
|
|
elif FLAGS.camera == "linear_movement":
|
|
|
camera_start, camera_end = get_linear_camera_motion_start_end(
|
|
|
movement_speed=rng.uniform(low=0., high=FLAGS.max_camera_movement)
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for frame in range(FLAGS.frame_start - 1, FLAGS.frame_end + 2):
|
|
|
interp = ((frame - FLAGS.frame_start + 1) /
|
|
|
(FLAGS.frame_end - FLAGS.frame_start + 3))
|
|
|
scene.camera.position = (interp * np.array(camera_start) +
|
|
|
(1 - interp) * np.array(camera_end))
|
|
|
scene.camera.look_at((0, 0, 0))
|
|
|
scene.camera.keyframe_insert("position", frame)
|
|
|
scene.camera.keyframe_insert("quaternion", frame)
|
|
|
|
|
|
|
|
|
|
|
|
train_split, test_split = gso.get_test_split(fraction=0.)
|
|
|
|
|
|
logging.info("Choosing one of the %d training objects...", len(train_split))
|
|
|
|
|
|
active_split = list(GSO_dict.keys())
|
|
|
|
|
|
|
|
|
|
|
|
num_objects = 2
|
|
|
|
|
|
logging.info("Step 1: Randomly placing %d objects:", num_objects)
|
|
|
object_state_save_dict = {}
|
|
|
object_state_ref_dict = {}
|
|
|
|
|
|
|
|
|
|
|
|
object_id_list = random.sample(active_split, num_objects+1)
|
|
|
|
|
|
for i in range(num_objects):
|
|
|
|
|
|
object_id = object_id_list[i]
|
|
|
obj = gso.create(asset_id=object_id)
|
|
|
|
|
|
|
|
|
assert isinstance(obj, kb.FileBasedObject)
|
|
|
scale = rng.uniform(FLAGS.smallest_scale, FLAGS.largest_scale)
|
|
|
obj.scale = scale / np.max(obj.bounds[1] - obj.bounds[0])
|
|
|
|
|
|
|
|
|
obj_pos_z = - get_obj_z(obj.bounds, obj.scale)
|
|
|
SPAWN_REGION_OBJ[0][2], SPAWN_REGION_OBJ[1][2] = obj_pos_z, obj_pos_z
|
|
|
obj.position = rng.uniform(*SPAWN_REGION_OBJ)
|
|
|
|
|
|
obj.metadata["scale"] = scale
|
|
|
scene += obj
|
|
|
move_until_no_overlap(obj, simulator, spawn_region=SPAWN_REGION_OBJ, rng=rng)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
obj.velocity = [0, 0, 0]
|
|
|
logging.info(" Added %s at %s", obj.asset_id, obj.position)
|
|
|
object_state_save_dict[i] = {'object_id': object_id,
|
|
|
'object_scale': obj.scale,
|
|
|
'object_quaternion': obj.quaternion,
|
|
|
'object_bounds': obj.bounds}
|
|
|
object_state_ref_dict[i] = {'object': obj}
|
|
|
|
|
|
|
|
|
ref_object = object_state_ref_dict[list(object_state_ref_dict.keys())[0]]['object']
|
|
|
ref_object_name = GSO_dict[ref_object.asset_id]
|
|
|
ref_location = ref_object.position
|
|
|
|
|
|
obj = object_state_ref_dict[list(object_state_ref_dict.keys())[1]]['object']
|
|
|
new_object_name = GSO_dict[obj.asset_id]
|
|
|
|
|
|
|
|
|
print('Generate the first scene.')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if scene is None:
|
|
|
exit()
|
|
|
frame = renderer.render_still()
|
|
|
|
|
|
edits = ['close', 'swap']
|
|
|
edit = random.choice(edits)
|
|
|
|
|
|
os.makedirs(output_dir/'{}'.format(FLAGS.generate_idx), exist_ok=True)
|
|
|
kb.write_png(frame["rgba"], output_dir/"{}/image0.png".format(FLAGS.generate_idx))
|
|
|
caption_1 = gen_caption(new_object_name, obj.metadata["scale"], ref_object_name, ref_object.metadata["scale"], type="further" if edit == 'close' else 'swap')
|
|
|
print(caption_1)
|
|
|
|
|
|
|
|
|
|
|
|
object_state_save_dict[i+1] = {'object_id': object_id,
|
|
|
'object_scale': obj.scale,
|
|
|
'object_pos': obj.position,
|
|
|
'object_quaternion': obj.quaternion,
|
|
|
'object_bounds': obj.bounds}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print('Generate the second scene.')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
ref_obj_pos = ref_object.position
|
|
|
ref_obj_z_up = get_obj_z_up(ref_object.bounds, ref_object.scale)
|
|
|
logging.info(f'Object position: {obj.position}')
|
|
|
|
|
|
obj1_pos = obj.position
|
|
|
obj2_pos = ref_location
|
|
|
if edit == 'close':
|
|
|
direction = obj2_pos - obj1_pos
|
|
|
factor1, factor2 = random.uniform(0.1, 0.3), random.uniform(0.1, 0.3)
|
|
|
obj.position = obj1_pos + direction * factor1
|
|
|
ref_object.position = obj2_pos - direction * factor2
|
|
|
else:
|
|
|
obj.position = obj2_pos
|
|
|
ref_object.position = obj1_pos
|
|
|
|
|
|
frame = renderer.render_still()
|
|
|
kb.write_png(frame["rgba"], output_dir/"{}/image1.png".format(FLAGS.generate_idx))
|
|
|
caption_2 = gen_caption(new_object_name, obj.metadata["scale"], ref_object_name, ref_object.metadata["scale"], type="closer" if edit == 'close' else 'swap')
|
|
|
print(caption_2)
|
|
|
|
|
|
|
|
|
object_state_save_dict[i+1] = {'object_id': object_id,
|
|
|
'object_scale': obj.scale,
|
|
|
'object_pos': obj.position,
|
|
|
'object_quaternion': obj.quaternion,
|
|
|
'object_bounds': obj.bounds}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
local_ann = [{
|
|
|
'input': dataset_dir(DATASET_TYPE) + "{}/image0.png".format(FLAGS.generate_idx),
|
|
|
'output': dataset_dir(DATASET_TYPE) + "{}/image1.png".format(FLAGS.generate_idx),
|
|
|
'instruction': caption_2,
|
|
|
},
|
|
|
{
|
|
|
'input': dataset_dir(DATASET_TYPE) + "{}/image1.png".format(FLAGS.generate_idx),
|
|
|
'output': dataset_dir(DATASET_TYPE) + "{}/image0.png".format(FLAGS.generate_idx),
|
|
|
'instruction': caption_1,
|
|
|
}
|
|
|
]
|
|
|
save_scene_instruction(f"{output_dir}/eq_kubric_{DATASET_TYPE}.json", local_ann, DATASET_TYPE, FLAGS.generate_idx)
|
|
|
|
|
|
kb.done()
|
|
|
|