How to use render_results method in Lemoncheesecake

Best Python code snippet using lemoncheesecake

render_utils.py

Source:render_utils.py Github

copy

Full Screen

1import os2import cv23import gibson24import numpy as np5import py360convert6from configs.data_config import IG59CLASSES7from utils.transform_utils import points2bdb2d, contour2bfov8hdr_texture = os.path.join(9 gibson2.ig_dataset_path, 'scenes', 'background', 'probe_02.hdr')10hdr_texture2 = os.path.join(11 gibson2.ig_dataset_path, 'scenes', 'background', 'probe_03.hdr')12background_texture = os.path.join(13 gibson2.ig_dataset_path, 'scenes', 'background',14 'urban_street_01.jpg')15def seg2obj(seg, i_obj, camera=None):16 """17 Extract contour and bounding box/fov from instance segmentation image.18 Parameters19 ----------20 seg: H x W numpy array of instance segmentation image21 i_obj: instance ID22 Returns23 -------24 dict of object contour and 2D bounding box: dict{25 'bfov': {'lon': float, 'lat': float, 'x_fov': float, 'y_fov': float} in rad26 'bdb2d': {'x1': int, 'x2': int, 'y1': int, 'y2': int} in pixel27 'contour': {'x': 1-d numpy array, 'y': 1-d numpy array, 'area': float} in pixel28 }29 definition of output pixel coordinate:30 x: (left) 0 --> width - 1 (right)31 y: (up) 0 --> height - 1 (down)32 definition of longitude and latitude in radiation:33 longitude: (left) -pi -- 0 --> +pi (right)34 latitude: (up) -pi/2 -- 0 --> +pi/2 (down)35 """36 height, width = seg.shape[:2]37 pano = camera is None or 'K' not in camera38 if pano:39 # if is panorama, repeat image along x axis to connect segmentation mask divided by edge40 seg = np.tile(seg, 2)41 # find and sort contours42 obj_mask = seg == i_obj43 contours, hierarchy = cv2.findContours(44 obj_mask.astype(np.uint8), cv2.RETR_TREE, cv2.CHAIN_APPROX_NONE)45 area = [cv2.contourArea(contour) for contour in contours]46 contours = [x for _, x in sorted(zip(area, contours), key=lambda pair: pair[0], reverse=True)]47 area = sorted(area, reverse=True)48 if pano:49 # if is panorama, consider objects on edge50 if len(area) > 1 and abs(area[0] - area[1]) < 1:51 # if object is not on the edge, choose the left contour52 contour_a, contour_b = contours[0][:, 0, :], contours[1][:, 0, :]53 contour = contour_a if np.min(contour_a[:, 0]) < np.min(contour_b[:, 0]) else contour_b54 elif len(area) == 0:55 return56 else:57 # if object is on the edge, choose the largest contour58 contour = contours[0][:, 0, :]59 else:60 # if is perspective camera, choose the largest contour61 contour = contours[0][:, 0, :]62 # from contour to bdb2d/bfov63 bdb2d = points2bdb2d(contour)64 bdb2d = {k: int(v) for k, v in bdb2d.items()}65 contour = {66 'x': contour[..., 0].astype(np.int32),67 'y': contour[..., 1].astype(np.int32),68 'area': float(area[0])69 }70 bfov = contour2bfov(contour, height, width, camera)71 return {72 'bfov': bfov,73 'bdb2d': bdb2d,74 'contour': contour75 }76def render_camera(renderer, camera, render_types: (list, str), perspective=None, obj_groups=None, objects_by_id=None):77 if isinstance(render_types, str):78 render_types = [render_types]79 # map render types80 render_type_mapping = {'sem': 'seg', 'depth': '3d'}81 igibson_types = set()82 for save_type in render_types:83 igibson_types.add(render_type_mapping.get(save_type, save_type))84 # render85 if perspective is None:86 perspective = 'K' in camera87 if perspective:88 renderer.set_fov(camera['vertical_fov'])89 renderer.set_camera(camera["pos"], camera["target"], camera["up"])90 render_results = renderer.render(modes=igibson_types)91 render_results = {t: r for t, r in zip(igibson_types, render_results)}92 for t, r in render_results.items():93 interpolation = cv2.INTER_LINEAR if t == 'rgb' else cv2.INTER_NEAREST94 render_results[t] = cv2.resize(r, (camera['width'], camera['height']), interpolation=interpolation)95 else:96 renderer.set_fov(90)97 render_results = render_pano(renderer, camera, igibson_types)98 render_results = {t: render_results[render_type_mapping.get(t, t)] for t in render_types}99 # convert igibson format100 for render_type, im in render_results.items():101 im = im[:, :, :3].copy()102 if render_type == 'seg':103 im = im[:, :, 0]104 im = (im * 255).astype(np.uint8)105 ids = np.unique(im)106 if obj_groups:107 # merge sub objects and super object (for example pillows)108 # into the main sub object (for example bed)109 for main_object, sub_objs in obj_groups.items():110 for i_subobj in sub_objs:111 if i_subobj in ids:112 im[im == i_subobj] = main_object113 elif render_type == 'sem':114 seg = im[:, :, 0]115 seg = (seg * 255).astype(np.uint8)116 instances = np.unique(seg)117 im = seg.copy()118 for instance in instances:119 category = objects_by_id[instance].category120 class_id = IG59CLASSES.index(category)121 im[seg == instance] = class_id122 elif render_type == 'depth':123 if 'K' in camera:124 im = - im[:, :, 2]125 else:126 im = np.linalg.norm(im, axis=-1)127 else:128 im = (im * 255).astype(np.uint8)129 render_results[render_type] = im130 return render_results131def render_pano(renderer, camera, igibson_types):132 forward_v = camera["target"] - camera["pos"]133 left_v = np.array([-forward_v[1], forward_v[0], 0])134 up_v = np.array(camera["up"])135 rot_mat = np.stack([forward_v, left_v, up_v]).T136 cubemaps = {render_type: {} for render_type in igibson_types}137 for direction, up, name in [138 [[-1, 0, 0], [0, 0, 1], 'B'],139 [[0, 0, -1], [1, 0, 0], 'D'],140 [[0, 1, 0], [0, 0, 1], 'L'],141 [[0, -1, 0], [0, 0, 1], 'R'],142 [[0, 0, 1], [-1, 0, 0], 'U'],143 [[1, 0, 0], [0, 0, 1], 'F'],144 ]:145 direction = np.matmul(rot_mat, np.array(direction))146 up = np.matmul(rot_mat, np.array(up))147 renderer.set_camera(camera["pos"], camera["pos"] + direction, up)148 frame = renderer.render(modes=igibson_types)149 for i_render, render_type in enumerate(igibson_types):150 cubemaps[render_type][name] = frame[i_render]151 render_results = {}152 for render_type in igibson_types:153 cubemaps[render_type]['R'] = np.flip(cubemaps[render_type]['R'], 1)154 cubemaps[render_type]['B'] = np.flip(cubemaps[render_type]['B'], 1)155 cubemaps[render_type]['U'] = np.flip(cubemaps[render_type]['U'], 0)156 pano = py360convert.c2e(157 cubemaps[render_type],158 camera['height'], camera['width'],159 mode='bilinear' if render_type == 'rgb' else 'nearest',160 cube_format='dict')161 pano = pano.astype(np.float32)162 render_results[render_type] = pano163 return render_results164def is_obj_valid(obj_dict, min_contour_area=20, min_contour_len=30, min_bdb2d_width=10):165 # check contour length and area166 contour = obj_dict['contour']167 if contour['area'] < min_contour_area:168 return False169 if len(contour['x']) < min_contour_len:170 return False171 # check bdb2d width172 bdb2d = obj_dict['bdb2d']173 if bdb2d['x2'] - bdb2d['x1'] < min_bdb2d_width or bdb2d['y2'] - bdb2d['y1'] < min_bdb2d_width:174 return False...

Full Screen

Full Screen

render_xm_unshadow_one.py

Source:render_xm_unshadow_one.py Github

copy

Full Screen

1import sys2import json3import os4import math5# Blender6import bpy7sys.path.append(os.path.abspath((os.path.dirname(__file__))))8import xiuminglib as xm9def main():10 # Open scene11 xm.blender.scene.open_blend('./test.blend')12 # model path13 xm.blender.render.set_cycles(w=512,h=512)14 # Remove existing cameras and lights, if any15 for o in bpy.data.objects:16 o.select = o.type in ('LAMP', 'CAMERA')17 for obj in bpy.context.scene.objects:18 obj.select=True19 bpy.ops.object.delete()20 bpy.context.scene.update()21 # loading obj22 bpy.ops.import_scene.obj(filepath='./127711540134843-h/127711540134843_scaled.obj',23 axis_forward='-Z', axis_up='Y', filter_glob="*.obj;*.mtl") #-Z, Y()24 # rotate obj25 # xm.blender.object.rotate_object(rotate_angle=math.pi/6,rotate_axis=(True,False,False))26 # xm.blender.object.rotate_object(rotate_angle=math.pi/2,rotate_axis=(False,True,False))27 # xm.blender.object.scale_object(scale=(1.1,1.1,1.1),scale_axis=(True,True,True))28 # loading cam and light29 with open('metas/Oppo/cams/P01.json', 'r') as h:30 cam = json.load(h)31 with open('metas/Olat/trainvali_lights/L001.json', 'r') as h:32 light = json.load(h)33 cam_obj = xm.blender.camera.add_camera(34 xyz=cam['position'], rot_vec_rad=cam['rotation'],35 name=cam['name'], f=cam['focal_length'],36 sensor_width=cam['sensor_width'], sensor_height=cam['sensor_height'],37 clip_start=cam['clip_start'], clip_end=cam['clip_end'])38 # xm.blender.light.add_light_env(env='/media/jcn/新加卷/JCN/CLOTHES/Blender_rendering/HDRmaps/round_platform_4k.hdr')39 xm.blender.light.add_light_point(40 xyz=light['position'], name=light['name'], size=light['size'])41 xm.blender.render.easyset(n_samples=64, color_mode='RGB')42 bpy.context.scene.use_nodes = True43 xm.blender.render.render_unshadow('/media/jcn/新加卷/JCN/CLOTHES/Blender_rendering/render_results')44 #45 # rgb_camspc_f = os.path.join('render_results/rgb.png')46 # xm.blender.render.render(rgb_camspc_f)47 # normal_f = os.path.join('render_results/normal')48 # xm.blender.render.render_normal(normal_f)49 #50 # alpha_f = os.path.join('render_results/alpha.png')51 # xm.blender.render.render_alpha(alpha_f,samples=64)52 # # depth_f=os.path.join('render_results/depth')53 # # xm.blender.render.render_depth(depth_f)54 # AO_f = os.path.join('render_results/AO.png')55 # xm.blender.render.render_ambient(AO_f)56 # albedo_f = os.path.join('render_results/albedo.png')57 # xm.blender.render.render_albedo(albedo_f)58 # xm.blender.object.rotate_object(rotate_angle=-math.pi / 2, rotate_axis=(False, True, False))59 # xm.blender.object.rotate_object(rotate_angle=-math.pi / 6, rotate_axis=(True, False, False))60 # xm.blender.object.scale_object(scale=(1.1, 1.1, 1.1), scale_axis=(True, True, True))61 # rgb_camspc_f = os.path.join('/media/jcn/新加卷/JCN/CLOTHES/Blender_rendering/render_results')62 # xm.blender.render.render(rgb_camspc_f)63 # normal_f = os.path.join('render_results/normal_1')64 # xm.blender.render.render_normal(normal_f)65 # alpha_f = os.path.join('render_results/alpha_1.png')66 # xm.blender.render.render_alpha(alpha_f, samples=64)67 # depth_f = os.path.join('render_results/depth_1')68 # xm.blender.render.render_depth(depth_f)69 # AO_f = os.path.join('render_results/AO_1.png')70 # xm.blender.render.render_ambient(AO_f)71 # albedo_f = os.path.join('render_results/albedo_1.png')72 # xm.blender.render.render_albedo(albedo_f)73 # xm.blender.scene.save_blend('./blend_1.blend')74if __name__ == '__main__':...

Full Screen

Full Screen

render_xm_one.py

Source:render_xm_one.py Github

copy

Full Screen

1import sys2import json3import os4import math5# Blender6import bpy7sys.path.append(os.path.abspath((os.path.dirname(__file__))))8import xiuminglib as xm9def main():10 # Open scene11 xm.blender.scene.open_blend('./test.blend')12 # model path13 xm.blender.render.set_cycles(w=512,h=512)14 # Remove existing cameras and lights, if any15 for o in bpy.data.objects:16 o.select = o.type in ('LAMP', 'CAMERA')17 for obj in bpy.context.scene.objects:18 obj.select=True19 bpy.ops.object.delete()20 bpy.context.scene.update()21 # loading obj22 bpy.ops.import_scene.obj(filepath='./127711540134843-h/127711540134843_scaled.obj',23 axis_forward='-Z', axis_up='Y', filter_glob="*.obj;*.mtl") #-Z, Y()24 # rotate obj25 # xm.blender.object.rotate_object(rotate_angle=math.pi/6,rotate_axis=(True,False,False))26 # xm.blender.object.rotate_object(rotate_angle=math.pi/2,rotate_axis=(False,True,False))27 # xm.blender.object.scale_object(scale=(1.1,1.1,1.1),scale_axis=(True,True,True))28 # loading cam and light29 with open('metas/Oppo/cams/P01.json', 'r') as h:30 cam = json.load(h)31 cam_obj = xm.blender.camera.add_camera(32 xyz=cam['position'], rot_vec_rad=cam['rotation'],33 name=cam['name'], f=cam['focal_length'],34 sensor_width=cam['sensor_width'], sensor_height=cam['sensor_height'],35 clip_start=cam['clip_start'], clip_end=cam['clip_end'])36 xm.blender.light.add_light_env(env='/media/jcn/新加卷/JCN/CLOTHES/Blender_rendering/HDRmaps/round_platform_4k.hdr')37 xm.blender.render.easyset(n_samples=64, color_mode='RGB')38 bpy.context.scene.use_nodes = True39 xm.blender.render.render_all_color('/media/jcn/新加卷/JCN/CLOTHES/Blender_rendering/render_results')40 #41 # rgb_camspc_f = os.path.join('render_results/rgb.png')42 # xm.blender.render.render(rgb_camspc_f)43 # normal_f = os.path.join('render_results/normal')44 # xm.blender.render.render_normal(normal_f)45 #46 # alpha_f = os.path.join('render_results/alpha.png')47 # xm.blender.render.render_alpha(alpha_f,samples=64)48 # # depth_f=os.path.join('render_results/depth')49 # # xm.blender.render.render_depth(depth_f)50 # AO_f = os.path.join('render_results/AO.png')51 # xm.blender.render.render_ambient(AO_f)52 # albedo_f = os.path.join('render_results/albedo.png')53 # xm.blender.render.render_albedo(albedo_f)54 # xm.blender.object.rotate_object(rotate_angle=-math.pi / 2, rotate_axis=(False, True, False))55 # xm.blender.object.rotate_object(rotate_angle=-math.pi / 6, rotate_axis=(True, False, False))56 # xm.blender.object.scale_object(scale=(1.1, 1.1, 1.1), scale_axis=(True, True, True))57 # rgb_camspc_f = os.path.join('/media/jcn/新加卷/JCN/CLOTHES/Blender_rendering/render_results')58 # xm.blender.render.render(rgb_camspc_f)59 # normal_f = os.path.join('render_results/normal_1')60 # xm.blender.render.render_normal(normal_f)61 # alpha_f = os.path.join('render_results/alpha_1.png')62 # xm.blender.render.render_alpha(alpha_f, samples=64)63 # depth_f = os.path.join('render_results/depth_1')64 # xm.blender.render.render_depth(depth_f)65 # AO_f = os.path.join('render_results/AO_1.png')66 # xm.blender.render.render_ambient(AO_f)67 # albedo_f = os.path.join('render_results/albedo_1.png')68 # xm.blender.render.render_albedo(albedo_f)69 xm.blender.scene.save_blend('./blend_1.blend')70if __name__ == '__main__':...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Lemoncheesecake automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful