Best Python code snippet using lemoncheesecake
region.py
Source:region.py  
...18        # which causes issues due to dictionaries resizing19        self._chunk_temp: queue.Queue = queue.Queue()20        self._chunk_temp_set = set()21        self._rebuild_regions = []22    def add_render_chunk(self, render_chunk: RenderChunk):23        """Add a RenderChunk to the database.24        A call to _merge_chunk_temp from the main thread will be needed for them to be drawn.25        This is done after the next draw call."""26        self._chunk_temp.put(render_chunk)27        chunk_coords = (render_chunk.cx, render_chunk.cz)28        self._chunk_temp_set.add(chunk_coords)29    def render_chunk_needs_rebuild(self, chunk_coords: Tuple[int, int]) -> bool:30        return (31            chunk_coords not in self._chunk_temp_set32            and self.render_chunk_in_main_database(chunk_coords)33            and self.get_render_chunk(chunk_coords).needs_rebuild()34        )35    def get_render_chunk(self, chunk_coords: Tuple[int, int]) -> RenderChunk:36        """Get a RenderChunk from the database.37        Might throw a key error if it has not been added to the real database yet."""38        return self._regions[self.region_coords(*chunk_coords)].get_render_chunk(39            chunk_coords40        )41    def _merge_chunk_temp(self):42        for _ in range(self._chunk_temp.qsize()):43            render_chunk = self._chunk_temp.get()44            region_coords = self.region_coords(render_chunk.cx, render_chunk.cz)45            if region_coords not in self._regions:46                self._regions[region_coords] = RenderRegion(47                    region_coords[0],48                    region_coords[1],49                    self.region_size,50                    self.context_identifier,51                    self._resource_pack,52                )53            self._regions[region_coords].add_render_chunk(render_chunk)54        self._chunk_temp_set.clear()55    def __contains__(self, chunk_coords: Tuple[int, int]):56        return (57            chunk_coords in self._chunk_temp_set58            or self.render_chunk_in_main_database(chunk_coords)59        )60    def render_chunk_in_main_database(self, chunk_coords: Tuple[int, int]) -> bool:61        region_coords = self.region_coords(*chunk_coords)62        return (63            region_coords in self._regions64            and chunk_coords in self._regions[region_coords]65        )66    def region_coords(self, cx, cz):67        return cx // self.region_size, cz // self.region_size68    def draw(self, camera_matrix, camera):69        cam_rx, cam_rz = numpy.floor(70            numpy.array(camera)[[0, 2]] / (16 * self.region_size)71        )72        cam_cx, cam_cz = numpy.floor(numpy.array(camera)[[0, 2]] / 16)73        for region in sorted(74            self._regions.values(),75            key=lambda x: abs(x.rx - cam_rx) + abs(x.rz - cam_rz),76            reverse=True,77        ):78            region.draw(camera_matrix, cam_cx, cam_cz)79        self._merge_chunk_temp()80    def unload(self, safe_area: Tuple[int, int, int, int] = None):81        if safe_area is None:82            for _ in range(self._chunk_temp.qsize()):83                self._chunk_temp.get()84            self._chunk_temp_set.clear()85            for region in self._regions.values():86                region.unload()87            self._regions.clear()88        else:89            min_rx, min_rz = self.region_coords(*safe_area[:2])90            max_rx, max_rz = self.region_coords(*safe_area[2:])91            delete_regions = []92            for region in self._regions.values():93                if not (94                    min_rx <= region.rx <= max_rx and min_rz <= region.rz <= max_rz95                ):96                    region.unload()97                    delete_regions.append((region.rx, region.rz))98            for region in delete_regions:99                del self._regions[region]100    def rebuild(self):101        """Rebuild a single region which was last rebuild the longest ago.102        Put this on a semi-fast clock to rebuild all regions."""103        if not self._rebuild_regions:104            self._rebuild_regions = list(self._regions.keys())105        if self._rebuild_regions:106            region = self._rebuild_regions.pop(0)107            if region in self._regions:108                self._regions[region].rebuild()109class RenderRegion(TriMesh):110    def __init__(111        self,112        rx: int,113        rz: int,114        region_size: int,115        context_identifier: str,116        resource_pack: OpenGLResourcePack,117    ):118        """A group of RenderChunks to minimise the number of draw calls"""119        super().__init__(120            context_identifier, resource_pack.get_atlas_id(context_identifier)121        )122        self.rx = rx123        self.rz = rz124        self._chunks: Dict[Tuple[int, int], RenderChunk] = {}125        self._merged_chunk_locations: Dict[126            Tuple[int, int], Tuple[int, int, int, int]127        ] = {}128        self._manual_chunks: Dict[Tuple[int, int], RenderChunk] = {}129        self.region_transform = numpy.eye(4, dtype=numpy.float64)130        self.region_transform[3, [0, 2]] = numpy.array([rx, rz]) * region_size * 16131    @property132    def vertex_usage(self):133        return GL_DYNAMIC_DRAW134    def __repr__(self):135        return f"RenderRegion({self.rx}, {self.rz})"136    def __contains__(self, item):137        return item in self._chunks138    def add_render_chunk(self, render_chunk: RenderChunk):139        """Add a chunk to the region"""140        chunk_coords = (render_chunk.cx, render_chunk.cz)141        if chunk_coords in self._chunks:142            self._chunks[chunk_coords].unload()143        self._disable_merged_chunk(chunk_coords)144        self._chunks[chunk_coords] = render_chunk145        self._manual_chunks[chunk_coords] = render_chunk146    def get_render_chunk(self, chunk_coords: Tuple[int, int]):147        return self._chunks[chunk_coords]148    def _disable_merged_chunk(self, chunk_coords: Tuple[int, int]):149        """Zero out the region of memory in the merged chunks related to a given chunk"""150        if chunk_coords in self._merged_chunk_locations:151            (152                offset,153                size,154                translucent_offset,155                translucent_size,156            ) = self._merged_chunk_locations.pop(chunk_coords)157            glBindVertexArray(self._vao)158            glBindBuffer(GL_ARRAY_BUFFER, self._vbo)159            glBufferSubData(160                GL_ARRAY_BUFFER,...train.py
Source:train.py  
1import os.path2import numpy as np3import imageio4import torch5from options.options import get_parser6from data import creat_dataset7from model import creat_nerf8from tqdm import tqdm, trange9from model.networks import *10import datetime11import sys121314if __name__ == "__main__":15    begin = datetime.datetime.now()16    torch.set_default_tensor_type('torch.cuda.FloatTensor')  # ææå®ä¹çtensorå
¨é¨éå°cudaä¸17    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')18    opt = get_parser()19    dataset = creat_dataset(opt)20    global_step, optimizer, train_kwargs, test_kwargs, result_dir = creat_nerf(opt)21    bds_dict = {'near': dataset['near'], 'far': dataset['far']}22    train_kwargs.update(bds_dict)23    test_kwargs.update(bds_dict)24    print('begin')25    print('View for train', dataset['i_train'], '\nView for validation', dataset['i_val'])26    # to tensor27    val_poses = torch.from_numpy(dataset['val_poses']).to(device)28    test_poses = torch.from_numpy(dataset['test_poses']).to(device)29    rays = torch.from_numpy(dataset['rays_train']).to(device)3031    if opt.render_only:32        with torch.no_grad():33            rend_dir = os.path.join(result_dir, 'render_only_{:06d}'.format(global_step))34            os.makedirs(rend_dir, exist_ok=True)35            rgbs, disps = render_path(test_poses[:1], dataset['hwf'], opt.render_chunk, rend_dir, **test_kwargs)36            rgb_dir = os.path.join(rend_dir, 'rgb.mp4')37            disp_dir = os.path.join(rend_dir, 'disp.mp4')38            imageio.mimwrite(rgb_dir, tobyte(rgbs), fps=30, quality=8)39            imageio.mimwrite(disp_dir, tobyte(disps / np.max(disps)), fps=30, quality=8)40            sys.exit()41    i_batch = 042    for i in trange(global_step + 1, opt.N_iters + 1):43        num = int(rays.shape[0])44        if i_batch >= num:45            i_batch = 046            idx = torch.randperm(num)47            rays = rays[idx]48            print('shuffle data after an epoch')49        batch = rays[i_batch: i_batch + opt.N_batch_rays]50        batch_rays, target = batch[:, :2], batch[:, -1]51        i_batch += opt.N_batch_rays52        out = render(batch_rays, dataset['hwf'], opt.render_chunk, **train_kwargs)53        rgb = out['rgb']54        rgb0 = out['rgb0']55        optimizer.zero_grad()56        loss = img2mse(rgb, target)57        psnr = mse2psnr(loss)58        loss += img2mse(target, rgb0)59        loss.backward()60        optimizer.step()61        # update learning rate62        decay_rate = 0.163        lr_decay = opt.lr_decay * 100064        new_lr = opt.lr * (decay_rate ** (i / lr_decay))65        for param_group in optimizer.param_groups:66            param_group['lr'] = new_lr6768        if i % opt.i_print == 0:69            tqdm.write(f"[Train] Iter: {i} Loss: {loss.item()} PSNR: {psnr.item()}")70        if i % opt.i_weights == 0:71            dir_w = os.path.join(result_dir, '{:06d}.tar'.format(i))72            torch.save({73                'global_step': i,74                'optimizer_dict': optimizer.state_dict(),75                'model_dict': train_kwargs['model'].state_dict(),76                'model_fn_dict': train_kwargs['model_fn'].state_dict()77            }, dir_w)78            print(f'checkpoints saved at {result_dir}')79        if i % opt.i_video == 0:80            test_dir = os.path.join(result_dir, 'spiral_{:06d}'.format(i))81            os.makedirs(test_dir, exist_ok=True)82            with torch.no_grad():83                rgbs, disps = render_path(test_poses, dataset['hwf'], opt.render_chunk, test_dir, **test_kwargs)84            save_dir_rgb = os.path.join(test_dir, 'rgb.mp4')85            save_dir_disp = os.path.join(test_dir, 'disp.mp4')86            imageio.mimwrite(save_dir_rgb, tobyte(rgbs), fps=30, quality=8)87            imageio.mimwrite(save_dir_disp, tobyte(disps/np.max(disps)), fps=30, quality=8)88        if i % opt.i_test == 0:89            val_dir = os.path.join(result_dir, 'val_{:06d}'.format(i))90            os.makedirs(val_dir, exist_ok=True)91            with torch.no_grad():92                render_path(val_poses, dataset['hwf'], opt.render_chunk, val_dir, **test_kwargs)93        # torch.cuda.empty_cache()
...MainPage.py
Source:MainPage.py  
...34		self.render('templates/index.html')35		36	def get_articles(self):37		return model.Article.all().filter('draft', False).filter('deletion_date', None).order('-creation_date').fetch(5)38		# return self.render_chunk('templates/index-articles.html', {'articles': articles})39	def get_communities(self):40		return model.Community.all().order('-members').fetch(5)41		# return self.render_chunk('templates/index-communities.html', {'communities': communities})42	def get_threads(self):43		return model.Thread.all().filter('parent_thread', None).order('-last_response_date').fetch(5)...Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!
