How to use is_parallel method in molecule

Best Python code snippet using molecule_python

test_pruning.py

Source:test_pruning.py Github

copy

Full Screen

1#2# Copyright (c) 2018 Intel Corporation3#4# Licensed under the Apache License, Version 2.0 (the "License");5# you may not use this file except in compliance with the License.6# You may obtain a copy of the License at7#8# http://www.apache.org/licenses/LICENSE-2.09#10# Unless required by applicable law or agreed to in writing, software11# distributed under the License is distributed on an "AS IS" BASIS,12# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.13# See the License for the specific language governing permissions and14# limitations under the License.15#16from collections import namedtuple17import numpy as np18import logging19import torch20import os21import sys22try:23 import distiller24except ImportError:25 module_path = os.path.abspath(os.path.join('..'))26 sys.path.append(module_path)27 import distiller28import common29import pytest30from models import create_model31from apputils import save_checkpoint, load_checkpoint32# Logging configuration33logging.basicConfig(level=logging.INFO)34fh = logging.FileHandler('test.log')35logger = logging.getLogger()36logger.addHandler(fh)37NetConfig = namedtuple("test_config", "arch dataset bn_name module_pairs")38#39# Model configurations40#41def simplenet(is_parallel):42 if is_parallel:43 return NetConfig(arch="simplenet_cifar", dataset="cifar10",44 module_pairs=[("module.conv1", "module.conv2")],45 bn_name=None)46 else:47 return NetConfig(arch="simplenet_cifar", dataset="cifar10",48 module_pairs=[("conv1", "conv2")],49 bn_name=None)50def resnet20_cifar(is_parallel):51 if is_parallel:52 return NetConfig(arch="resnet20_cifar", dataset="cifar10",53 module_pairs=[("module.layer1.0.conv1", "module.layer1.0.conv2")],54 bn_name="module.layer1.0.bn1")55 else:56 return NetConfig(arch="resnet20_cifar", dataset="cifar10",57 module_pairs=[("layer1.0.conv1", "layer1.0.conv2")],58 bn_name="layer1.0.bn1")59def vgg19_imagenet(is_parallel):60 if is_parallel:61 return NetConfig(arch="vgg19", dataset="imagenet",62 module_pairs=[("features.module.0", "features.module.2"),63 ("features.module.21", "features.module.23"),64 ("features.module.23", "features.module.25"),65 ("features.module.25", "features.module.28"),66 ("features.module.28", "features.module.30"),67 ("features.module.30", "features.module.32"),68 ("features.module.32", "features.module.34")],69 bn_name=None)70 else:71 return NetConfig(arch="vgg19", dataset="imagenet",72 module_pairs=[("features.21", "features.23"),73 ("features.23", "features.25"),74 ("features.25", "features.28"),75 ("features.28", "features.30"),76 ("features.30", "features.32"),77 ("features.32", "features.34")],78 bn_name=None)79def vgg16_cifar(is_parallel):80 if is_parallel:81 return NetConfig(arch="vgg16_cifar", dataset="cifar10",82 module_pairs=[("features.module.0", "features.module.2")],83 bn_name=None)84 else:85 return NetConfig(arch="vgg16_cifar", dataset="cifar10",86 module_pairs=[("features.0", "features.2")],87 bn_name=None)88@pytest.fixture(params=[True, False])89def parallel(request):90 return request.param91def test_ranked_filter_pruning(parallel):92 ranked_filter_pruning(resnet20_cifar(parallel), ratio_to_prune=0.1, is_parallel=parallel)93 ranked_filter_pruning(resnet20_cifar(parallel), ratio_to_prune=0.5, is_parallel=parallel)94 ranked_filter_pruning(simplenet(parallel), ratio_to_prune=0.5, is_parallel=parallel)95 ranked_filter_pruning(vgg19_imagenet(parallel), ratio_to_prune=0.1, is_parallel=parallel)96 model, zeros_mask_dict = ranked_filter_pruning(vgg19_imagenet(parallel),97 ratio_to_prune=0.1,98 is_parallel=parallel)99 test_conv_fc_interface(parallel, model, zeros_mask_dict)100def test_prune_all_filters(parallel):101 """Pruning all of the filteres in a weights tensor of a Convolution102 is illegal and should raise an exception.103 """104 with pytest.raises(ValueError):105 ranked_filter_pruning(resnet20_cifar(parallel), ratio_to_prune=1.0, is_parallel=parallel)106def ranked_filter_pruning(config, ratio_to_prune, is_parallel):107 """Test L1 ranking and pruning of filters.108 First we rank and prune the filters of a Convolutional layer using109 a L1RankedStructureParameterPruner. Then we physically remove the110 filters from the model (via "thining" process).111 """112 model, zeros_mask_dict = common.setup_test(config.arch, config.dataset, is_parallel)113 for pair in config.module_pairs:114 # Test that we can access the weights tensor of the first convolution in layer 1115 conv1_p = distiller.model_find_param(model, pair[0] + ".weight")116 assert conv1_p is not None117 num_filters = conv1_p.size(0)118 # Test that there are no zero-filters119 assert distiller.sparsity_3D(conv1_p) == 0.0120 # Create a filter-ranking pruner121 pruner = distiller.pruning.L1RankedStructureParameterPruner("filter_pruner",122 group_type="Filters",123 desired_sparsity=ratio_to_prune,124 weights=pair[0] + ".weight")125 pruner.set_param_mask(conv1_p, pair[0] + ".weight", zeros_mask_dict, meta=None)126 conv1 = common.find_module_by_name(model, pair[0])127 assert conv1 is not None128 # Test that the mask has the correct fraction of filters pruned.129 # We asked for 10%, but there are only 16 filters, so we have to settle for 1/16 filters130 expected_cnt_removed_filters = int(ratio_to_prune * conv1.out_channels)131 expected_pruning = expected_cnt_removed_filters / conv1.out_channels132 masker = zeros_mask_dict[pair[0] + ".weight"]133 assert masker is not None134 assert distiller.sparsity_3D(masker.mask) == expected_pruning135 # Use the mask to prune136 assert distiller.sparsity_3D(conv1_p) == 0137 masker.apply_mask(conv1_p)138 assert distiller.sparsity_3D(conv1_p) == expected_pruning139 # Remove filters140 conv2 = common.find_module_by_name(model, pair[1])141 assert conv2 is not None142 assert conv1.out_channels == num_filters143 assert conv2.in_channels == num_filters144 # Test thinning145 distiller.remove_filters(model, zeros_mask_dict, config.arch, config.dataset, optimizer=None)146 assert conv1.out_channels == num_filters - expected_cnt_removed_filters147 assert conv2.in_channels == num_filters - expected_cnt_removed_filters148 return model, zeros_mask_dict149def test_arbitrary_channel_pruning(parallel):150 arbitrary_channel_pruning(resnet20_cifar(parallel),151 channels_to_remove=[0, 2],152 is_parallel=parallel)153 arbitrary_channel_pruning(simplenet(parallel),154 channels_to_remove=[0, 2],155 is_parallel=parallel)156 arbitrary_channel_pruning(vgg19_imagenet(parallel),157 channels_to_remove=[0, 2],158 is_parallel=parallel)159 arbitrary_channel_pruning(vgg16_cifar(parallel),160 channels_to_remove=[0, 2],161 is_parallel=parallel)162def test_prune_all_channels(parallel):163 """Pruning all of the channels in a weights tensor of a Convolution164 is illegal and should raise an exception.165 """166 with pytest.raises(ValueError):167 arbitrary_channel_pruning(resnet20_cifar(parallel),168 channels_to_remove=[ch for ch in range(16)],169 is_parallel=parallel)170def test_channel_pruning_conv_bias(parallel):171 arbitrary_channel_pruning(simplenet(parallel),172 channels_to_remove=[0, 1],173 is_parallel=parallel)174def create_channels_mask(conv_p, channels_to_remove):175 assert conv_p.dim() == 4176 num_filters = conv_p.size(0)177 num_channels = conv_p.size(1)178 kernel_height = conv_p.size(2)179 kernel_width = conv_p.size(3)180 # Let's build our 4D mask.181 # We start with a 1D mask of channels, with all but our specified channels set to one182 channels = torch.ones(num_channels)183 for ch in channels_to_remove:184 channels[ch] = 0185 # Now let's expand back up to a 4D mask186 mask = channels.expand(num_filters, num_channels)187 mask.unsqueeze_(-1)188 mask.unsqueeze_(-1)189 mask = mask.expand(num_filters, num_channels, kernel_height, kernel_width).contiguous().cuda()190 assert mask.shape == conv_p.shape191 return mask192def run_forward_backward(model, optimizer, dummy_input):193 criterion = torch.nn.CrossEntropyLoss().cuda()194 model.train()195 output = model(dummy_input)196 target = torch.LongTensor(1).random_(2).cuda()197 loss = criterion(output, target)198 optimizer.zero_grad()199 loss.backward()200 optimizer.step()201def arbitrary_channel_pruning(config, channels_to_remove, is_parallel):202 """Test removal of arbitrary channels.203 The test receives a specification of channels to remove.204 Based on this specification, the channels are pruned and then physically205 removed from the model (via a "thinning" process).206 """207 model, zeros_mask_dict = common.setup_test(config.arch, config.dataset, is_parallel)208 pair = config.module_pairs[0]209 conv2 = common.find_module_by_name(model, pair[1])210 assert conv2 is not None211 # Test that we can access the weights tensor of the first convolution in layer 1212 conv2_p = distiller.model_find_param(model, pair[1] + ".weight")213 assert conv2_p is not None214 assert conv2_p.dim() == 4215 num_channels = conv2_p.size(1)216 cnt_nnz_channels = num_channels - len(channels_to_remove)217 mask = create_channels_mask(conv2_p, channels_to_remove)218 assert distiller.density_ch(mask) == (conv2.in_channels - len(channels_to_remove)) / conv2.in_channels219 # Cool, so now we have a mask for pruning our channels.220 # Use the mask to prune221 zeros_mask_dict[pair[1] + ".weight"].mask = mask222 zeros_mask_dict[pair[1] + ".weight"].apply_mask(conv2_p)223 all_channels = set([ch for ch in range(num_channels)])224 nnz_channels = set(distiller.find_nonzero_channels_list(conv2_p, pair[1] + ".weight"))225 channels_removed = all_channels - nnz_channels226 logger.info("Channels removed {}".format(channels_removed))227 # Now, let's do the actual network thinning228 distiller.remove_channels(model, zeros_mask_dict, config.arch, config.dataset, optimizer=None)229 conv1 = common.find_module_by_name(model, pair[0])230 assert conv1231 assert conv1.out_channels == cnt_nnz_channels232 assert conv2.in_channels == cnt_nnz_channels233 assert conv1.weight.size(0) == cnt_nnz_channels234 assert conv2.weight.size(1) == cnt_nnz_channels235 if config.bn_name is not None:236 bn1 = common.find_module_by_name(model, config.bn_name)237 assert bn1.running_var.size(0) == cnt_nnz_channels238 assert bn1.running_mean.size(0) == cnt_nnz_channels239 assert bn1.num_features == cnt_nnz_channels240 assert bn1.bias.size(0) == cnt_nnz_channels241 assert bn1.weight.size(0) == cnt_nnz_channels242 dummy_input = common.get_dummy_input(config.dataset)243 optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=0.1)244 run_forward_backward(model, optimizer, dummy_input)245 # Let's test saving and loading a thinned model.246 # We save 3 times, and load twice, to make sure to cover some corner cases:247 # - Make sure that after loading, the model still has hold of the thinning recipes248 # - Make sure that after a 2nd load, there no problem loading (in this case, the249 # - tensors are already thin, so this is a new flow)250 # (1)251 save_checkpoint(epoch=0, arch=config.arch, model=model, optimizer=None)252 model_2 = create_model(False, config.dataset, config.arch, parallel=is_parallel)253 model(dummy_input)254 model_2(dummy_input)255 conv2 = common.find_module_by_name(model_2, pair[1])256 assert conv2 is not None257 with pytest.raises(KeyError):258 model_2, compression_scheduler, start_epoch = load_checkpoint(model_2, 'checkpoint.pth.tar')259 compression_scheduler = distiller.CompressionScheduler(model)260 hasattr(model, 'thinning_recipes')261 run_forward_backward(model, optimizer, dummy_input)262 # (2)263 save_checkpoint(epoch=0, arch=config.arch, model=model, optimizer=None, scheduler=compression_scheduler)264 model_2, compression_scheduler, start_epoch = load_checkpoint(model_2, 'checkpoint.pth.tar')265 assert hasattr(model_2, 'thinning_recipes')266 logger.info("test_arbitrary_channel_pruning - Done")267 # (3)268 save_checkpoint(epoch=0, arch=config.arch, model=model_2, optimizer=None, scheduler=compression_scheduler)269 model_2, compression_scheduler, start_epoch = load_checkpoint(model_2, 'checkpoint.pth.tar')270 assert hasattr(model_2, 'thinning_recipes')271 logger.info("test_arbitrary_channel_pruning - Done 2")272def test_conv_fc_interface(is_parallel=parallel, model=None, zeros_mask_dict=None):273 """A special case of convolution filter-pruning occurs when the next layer is274 fully-connected (linear). This test is for this case and uses VGG16.275 """276 arch = "vgg19"277 dataset = "imagenet"278 ratio_to_prune = 0.1279 if is_parallel:280 conv_name = "features.module.34"281 else:282 conv_name = "features.34"283 fc_name = "classifier.0"284 dummy_input = torch.randn(1, 3, 224, 224).cuda()285 if model is None or zeros_mask_dict is None:286 model, zeros_mask_dict = common.setup_test(arch, dataset, is_parallel)287 # Run forward and backward passes, in order to create the gradients and optimizer params288 optimizer = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9, weight_decay=0.1)289 run_forward_backward(model, optimizer, dummy_input)290 conv = common.find_module_by_name(model, conv_name)291 assert conv is not None292 conv_p = distiller.model_find_param(model, conv_name + ".weight")293 assert conv_p is not None294 assert conv_p.dim() == 4295 # Create a filter-ranking pruner296 pruner = distiller.pruning.L1RankedStructureParameterPruner("filter_pruner",297 group_type="Filters",298 desired_sparsity=ratio_to_prune,299 weights=conv_name + ".weight")300 pruner.set_param_mask(conv_p, conv_name + ".weight", zeros_mask_dict, meta=None)301 # Use the mask to prune302 masker = zeros_mask_dict[conv_name + ".weight"]303 assert masker is not None304 masker.apply_mask(conv_p)305 num_filters = conv_p.size(0)306 expected_cnt_removed_filters = int(ratio_to_prune * conv.out_channels)307 # Remove filters308 fc = common.find_module_by_name(model, fc_name)309 assert fc is not None310 # Test thinning311 fm_size = fc.in_features // conv.out_channels312 num_nnz_filters = num_filters - expected_cnt_removed_filters313 distiller.remove_filters(model, zeros_mask_dict, arch, dataset, optimizer)314 assert conv.out_channels == num_nnz_filters315 assert fc.in_features == fm_size * num_nnz_filters316 # Run again, to make sure the optimizer and gradients shapes were updated correctly317 run_forward_backward(model, optimizer, dummy_input)318 run_forward_backward(model, optimizer, dummy_input)319def test_threshold_mask():320 # Create a 4-D tensor of 1s321 a = torch.ones(3, 64, 32, 32)322 # Change one element323 a[1, 4, 17, 31] = 0.2324 # Create and apply a mask325 mask = distiller.threshold_mask(a, threshold=0.3)326 assert np.sum(distiller.to_np(mask)) == (distiller.volume(a) - 1)327 assert mask[1, 4, 17, 31] == 0328 assert common.almost_equal(distiller.sparsity(mask), 1/distiller.volume(a))329def test_magnitude_pruning():330 # Create a 4-D tensor of 1s331 a = torch.ones(3, 64, 32, 32)332 # Change one element333 a[1, 4, 17, 31] = 0.2334 # Create a masks dictionary and populate it with one ParameterMasker335 zeros_mask_dict = {}336 masker = distiller.ParameterMasker('a')337 zeros_mask_dict['a'] = masker338 # Try to use a MagnitudeParameterPruner with defining a default threshold339 with pytest.raises(AssertionError):340 pruner = distiller.pruning.MagnitudeParameterPruner("test", None)341 # Now define the default threshold342 thresholds = {"*": 0.4}343 pruner = distiller.pruning.MagnitudeParameterPruner("test", thresholds)344 assert distiller.sparsity(a) == 0345 # Create a mask for parameter 'a'346 pruner.set_param_mask(a, 'a', zeros_mask_dict, None)347 assert common.almost_equal(distiller.sparsity(zeros_mask_dict['a'].mask), 1/distiller.volume(a))348 # Let's now use the masker to prune a parameter349 masker = zeros_mask_dict['a']350 masker.apply_mask(a)351 assert common.almost_equal(distiller.sparsity(a), 1/distiller.volume(a))352 # We can use the masker on other tensors, if we want (and if they have the correct shape).353 # Remember that the mask was created already, so we're not thresholding - we are pruning354 b = torch.ones(3, 64, 32, 32)355 b[:] = 0.3356 masker.apply_mask(b)357 assert common.almost_equal(distiller.sparsity(b), 1/distiller.volume(a))358if __name__ == '__main__':359 for is_parallel in [True, False]:360 test_ranked_filter_pruning(is_parallel)361 test_arbitrary_channel_pruning(is_parallel)362 test_prune_all_channels(is_parallel)363 model, zeros_mask_dict = ranked_filter_pruning(vgg19_imagenet(is_parallel),364 ratio_to_prune=0.1,365 is_parallel=is_parallel)366 test_conv_fc_interface(is_parallel, model, zeros_mask_dict)367 arbitrary_channel_pruning(vgg19_imagenet(parallel),368 channels_to_remove=[0, 2],...

Full Screen

Full Screen

main.py

Source:main.py Github

copy

Full Screen

1#校园卡定位以及动态矫正23import cv24import numpy as np5from numpy.core.fromnumeric import shape, sort678def my_cross_point(line1, line2, size):9 '''10 求两条直线的交点和是否平行11 line1,line2:每条line用两个点的xy坐标表示12 size:图像的二维尺寸13 out_x,out_y:交点的坐标。如果两直线交点超出图像范围则返回-114 is_parallel:True-两直线近似平行,交点在图像外;False:两直线在图像内有交点15 '''1617 [[x1, y1, x2, y2]] = line118 [[x3, y3, x4, y4]] = line219 x_max, y_max = size20 '''21 这里加一个斜率为0或是斜率不存在的判断22 '''23 if(abs(x1 - x2) < 1 or abs(x3 - x4) < 1): # 两条线中至少一条是垂直线24 if(abs(x1 - x2) < 1 and abs(x3-x4) > 1): # 只有line1是垂直线25 out_x = x126 out_y = (y3-y4)/(x3-x4)*(x1-x3)+y3 # 也就是k3*(x-x3)+y327 is_parallel = False28 # return out_x, out_y, is_parallel29 elif(abs(x1 - x2) > 1 and abs(x3-x4) < 1): # 只有line2是垂直线30 out_x = x331 out_y = (y1-y2)/(x1-x2)*(x3-x1)+y1 # 也就是k1*(x-x1)+y132 is_parallel = False3334 else: # 两条都是垂直线35 out_x = -136 out_y = -137 is_parallel = True38 else: # 两条都不是垂直线 斜率都存在 代直线公式解坐标39 k1 = (y1-y2)/(x1-x2)40 k3 = (y3-y4)/(x3-x4)41 is_parallel = False # 默认不平行4243 # k1 = k3时 直接返回平行,不需要再计算44 if(abs(k1-k3) < 1):45 out_x = -146 out_y = -147 is_parallel = True4849 return out_x, out_y, is_parallel5051 # k1!=k352 out_x = (k1*x1-k3*x3-y1+y3)/(k1-k3)53 out_y = k1*(out_x-x1)+y15455 # 如果输出的xy超出了图像本身的范围 说明两者是近乎平行的关系56 if(out_x <= 0 or out_y <= 0 or out_x > x_max or out_y > y_max):57 out_x = -158 out_y = -159 is_parallel = True60 # return out_x, out_y, is_parallel6162 return out_x, out_y, is_parallel636465cap = cv2.VideoCapture("1.mp4")66while(cap.isOpened()):67 print("open")68 ret, videoframe = cap.read()69 cv2.imshow('videoframe', videoframe)70 if cv2.waitKey(25) & 0xFF == ord('q'):71 break72 # 对videoframe做处理73 img = videoframe74 # ---- 图像处理 ----7576 # 使用颜色HSV进行提取77 mat = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)78 # 由于校园卡会存在不同程度的褪色 因此只划定了色调范围,亮度和饱和度都不指定79 low_blue = np.array([78, 0, 0])80 upper_blue = np.array([124, 255, 255])81 mask = cv2.inRange(mat, low_blue, upper_blue)82 ROI = mask83 SE = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))84 ROI_open = cv2.morphologyEx(ROI, cv2.MORPH_CLOSE, (5, 5))85 # 用小SE腐蚀,再和原图做差,得到外轮廓86 SE_small = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))87 ROI_erode = cv2.erode(ROI_open, SE_small)88 ROI_edge = ROI_open-ROI_erode89 # 开闭操作 去除少量孤立点90 ROI_edge = cv2.morphologyEx(ROI_edge, cv2.MORPH_CLOSE, SE)91 ROI_edge = cv2.morphologyEx(92 ROI_edge, cv2.MORPH_OPEN, cv2.getStructuringElement(cv2.MORPH_RECT, (1, 1)))93 # 图像中可能有其他杂物 因此需要从所有轮廓中找到最大的那个作为校园卡的轮廓94 contours_none = cv2.findContours(95 ROI_edge, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)96 biggest = sorted(contours_none, key=lambda x: cv2.contourArea(97 x), reverse=True) # 根据contour的面积大小排序98 drawing_none = cv2.drawContours(img, biggest, 0, (0, 255, 0), 3)99 #cv2.imshow("处理后", img)100101 # ---- 找到目标顶点 ----102103 # 用多边形拟合校园卡的外轮廓 其中epsilon是参数,可以调整拟合的精度104 epsilon = 0.01105 approx = cv2.approxPolyDP(106 biggest[0], epsilon * cv2.arcLength(biggest[0], True), True)107 boxPoints = approx.reshape(-1, 2)108 # 画出拟合多边形的顶点和边109 num = len(approx) # 顶点数目 = 边数110 for i in range(0, num):111 cv2.line(img, boxPoints[i], boxPoints[(i+1) % num], (255, 100, 100), 4)112 for i in range(0, num):113 cv2.circle(img, boxPoints[i], 10, (240, 240, 200), -1)114 # cv2.imshow("多边形",img)115 # 拟合得到的多边形不一定是四边形,可能有很多条边。我们选择长度最长的四条边作为校园卡的四条边116 mysidelen = np.ones((num, 1))117 for i in range(0, num):118 mysidelen[i] = np.linalg.norm(boxPoints[(i+1) % num]-boxPoints[i])119120 four_side_len = sorted(mysidelen, reverse=True)[0:4]121 # 得到最长的四条边的顶点编号,[编号-编号+1]即是该边的两个端点 %num是为了处理边界情况122 pointID = sorted(range(num), key=lambda k: mysidelen[k], reverse=True)[0:4]123 four_side = np.ones((4, 1, 4)) # 4行4列 每一个元素是构成一条直线的两个点的坐标 后面算交点要用124 for i in range(0, 4):125 cv2.line(img, boxPoints[pointID[i]],126 boxPoints[(pointID[i]+1) % num], (0, 0, 255), 4)127 four_side[i] = np.array([boxPoints[pointID[i]][0], boxPoints[pointID[i]][1], boxPoints[(128 pointID[i]+1) % num][0], boxPoints[(pointID[i]+1) % num][1]])129130 # 校园卡的长宽比大概是1.57左右,因此大部分的情况下,图像中最长的边一般就是校园卡的长131 # four_side是按长度降序排序的。因此four_side[0]指示的一定是最长边132 # 其他的边无法用长度确定,因为在透视扭曲得很严重的情况下,长边可能会比宽边短133 # 判断最长边和其他三条边判断是否平行。若不平行则是宽边,可以找到两个交点134 # 若平行则是长边,则接下来用平行的这条边和两条宽边相交,找到剩下的两个交点135136 cross_points = np.zeros((4, 2))137 j = 0138 for i in range(1, 4):139 temp_x, temp_y, is_parallel = my_cross_point(140 four_side[0], four_side[i], (1080, 1440)) # (np.shape(img)[1], np.shape(img)[0]))141 if(is_parallel): # 找到平行边142 parallel_index = i143 continue144 cross_points[j, :] = temp_x, temp_y145 j += 1146 # parallel_index:和长边平行的那条边的编号147148 # 找平行边和其他两条边的交点149 j = 2150 for i in range(1, 4):151 if(i == parallel_index):152 continue153 temp_x, temp_y, is_parallel = my_cross_point(154 four_side[parallel_index], four_side[i], (1080, 1440)) # (np.shape(img)[1], np.shape(img)[0]))155 cross_points[j, :] = temp_x, temp_y156 j += 1157158 # 在图上标出四个点159 for i in range(0, 4):160 cv2.circle(img, np.array(161 [cross_points[i][0], cross_points[i][1]], dtype=np.int32), 5, (0, 0, 0), -1)162 #cv2.imshow("box2", img)163164 # 需要找到四个交点在图像中的相对方位,按照“左上 左下 右上 右下”的顺序排列165 # 方法:先按列排序,可以把顶点分成左右两组,再在组内按行排序,分出上下166 sorted_cross_points = sorted(cross_points, key=lambda x: x[0])167 sorted_cross_points = sorted(sorted_cross_points[0:2], key=lambda x: x[1]) + sorted(168 sorted_cross_points[2:4], key=lambda x: x[1])169170 # 由于目标映射的点需要按照顺时针排列,即“左上 右上 右下 左下”171 # 然而由于卡可能是竖着放的,因此图像中的左上角不一定是卡的左上角,所以需要分类处理172 # horizontal:卡是横着放的 vertical:卡是竖着放的173 indices_horizontal = [0, 2, 3, 1] # 左上 右上 右下 左下(顺时针)的顺序174 indices_vertical = [1, 0, 2, 3]175176 # 判断标准:左上离左下更近为横着放 左上离右上更近-竖着放177 h_or_v = np.linalg.norm(sorted_cross_points[0]-sorted_cross_points[1]) < np.linalg.norm(178 sorted_cross_points[0]-sorted_cross_points[2])179 if(h_or_v): # 横着放180 sorted_cross_points = np.array(sorted_cross_points)[181 indices_horizontal[:]]182 for i in range(0, 4): #画线183 cv2.line(img, np.array(cross_points[indices_horizontal[i]], dtype=np.int32), np.array(184 cross_points[indices_horizontal[(i+1) % 4]], dtype=np.int32), (0, 0, 255), 4)185186 else: # 竖着放187 sorted_cross_points = np.array(sorted_cross_points)[188 indices_vertical[:]]189 for i in range(0, 4): #画线190 cv2.line(img, np.array(cross_points[indices_horizontal[i]], dtype=np.int32), np.array(191 cross_points[indices_horizontal[(i+1) % 4]], dtype=np.int32), (0, 0, 255), 4)192193 cv2.imshow("fixed",img)194 # ---- 透视变换 ----195196 # 校园卡的尺寸大概是850mm*540mm197 # 求出透视矩阵并做透视变换198 dstPoints = np.array(199 [[0, 0], [850, 0], [850, 540], [0, 540]], dtype=np.int32)200 M = cv2.findHomography(sorted_cross_points, dstPoints)201 Ma = np.array(M[0], dtype=np.float64)202203 # ---- 结果展示 ----204 dst_img = img205 img_f64 = np.float64(dst_img)206 output = cv2.warpPerspective(dst_img, M[0], (850, 540))207208 cv2.imshow("outpu", output)209 210211cap.release()212cv2.destroyAllWindows()213 ...

Full Screen

Full Screen

vector.py

Source:vector.py Github

copy

Full Screen

...58 return acos(dot_prod)59 def get_angle_deg(self, other):60 degrees_per_rad = 180. / pi61 return degrees_per_rad * self.get_angle_rad(other)62 def is_parallel(self, other):63 return (self.is_zero() or other.is_zero() or64 self.get_angle_rad(other) in [0, pi])65 def is_orthogonal(self, other):66 return round(self.dot_product(other), 3) == 067 def get_projected_vector(self, other):68 """69 Gets projection of vector v in b70 """71 b_normalized = other.normalize()72 return b_normalized.times_scalar(self.dot_product(b_normalized))73 def get_orthogonal_vector(self, other):74 return self.minus(self.get_projected_vector(other))75 def cross_product(self, other):76 [x1, y1, z1] = self.coordinates77 [x2, y2, z2] = other.coordinates78 x = (y1 * z2) - (y2 * z1)79 y = -((x1 * z2) - (x2 * z1))80 z = (x1 * y2) - (x2 * y1)81 return Vector([x, y, z])82 def area_parallelogram(self, other):83 return self.cross_product(other).magnitude()84 def area_triangle(self, other):85 return self.cross_product(other).magnitude() / 286if __name__ == '__main__':87 v = Vector([8.218, -9.341])88 w = Vector([-1.129, 2.111])89 addition = v.plus(w)90 print 'addition: {}'.format(addition)91 v = Vector([7.119, 8.215])92 w = Vector([-8.223, 0.878])93 subtraction = v.minus(w)94 print 'subtraction: {}'.format(subtraction)95 v = Vector([1.671, -1.012, -0.318])96 multiplication = v.times_scalar(7.41)97 print 'multiplication: {}'.format(multiplication)98 # *****************99 v = Vector([-0.221, 7.437])100 first_magintude = v.magnitude()101 print 'first_magintude: {}'.format(round(first_magintude, 3))102 v = Vector([8.813, -1.331, -6.247])103 second_magintude = v.magnitude()104 print 'second_magintude: {}'.format(round(second_magintude, 3))105 v = Vector([5.581, -2.136])106 first_normalization = v.normalize()107 print 'first_normailization: {}'.format(first_normalization)108 v = Vector([1.996, 3.108, -4.554])109 second_normalization = v.normalize()110 print 'second_normailization: {}'.format(second_normalization)111 # *****************112 v = Vector([7.887, 4.138])113 w = Vector([-8.802, 6.776])114 dot_product = v.dot_product(w)115 print 'first_dot_product: {}'.format(round(dot_product, 3))116 v = Vector([-5.955, -4.904, -1.874])117 w = Vector([-4.496, -8.755, 7.103])118 dot_product = v.dot_product(w)119 print 'second_dot_product: {}'.format(round(dot_product, 3))120 # *****************121 v = Vector([3.183, -7.627])122 w = Vector([-2.668, 5.319])123 angle_rads = v.get_angle_rad(w)124 print 'first_angle_rads: {}'.format(angle_rads)125 v = Vector([7.35, 0.221, 5.188])126 w = Vector([2.751, 8.259, 3.985])127 angle_degrees = v.get_angle_deg(w)128 print 'first_angle_rads: {}'.format(angle_degrees)129 # *****************130 v = Vector([-7.579, -7.88])131 w = Vector([22.737, 23.64])132 is_parallel = v.is_parallel(w)133 is_orthogonal = v.is_orthogonal(w)134 print '1 parallel: {}, orthogonal: {}'.format(is_parallel, is_orthogonal)135 v = Vector([-2.029, 9.97, 4.172])136 w = Vector([-9.231, -6.639, -7.245])137 is_parallel = v.is_parallel(w)138 is_orthogonal = v.is_orthogonal(w)139 print '2 parallel: {}, orthogonal: {}'.format(is_parallel, is_orthogonal)140 v = Vector([-2.328, -7.284, -1.214])141 w = Vector([-1.821, 1.072, -2.94])142 is_parallel = v.is_parallel(w)143 is_orthogonal = v.is_orthogonal(w)144 print '3 parallel: {}, orthogonal: {}'.format(is_parallel, is_orthogonal)145 v = Vector([2.118, 4.827])146 w = Vector([0, 0])147 is_parallel = v.is_parallel(w)148 is_orthogonal = v.is_orthogonal(w)149 print '4 parallel: {}, orthogonal: {}'.format(is_parallel, is_orthogonal)150 # *****************151 v = Vector([3.039, 1.879])152 w = Vector([0.825, 2.036])153 projected_vector = v.get_projected_vector(w)154 print 'projected vector is: {}'.format(projected_vector)155 v = Vector([-9.88, -3.264, -8.159])156 w = Vector([-2.155, -9.353, -9.473])157 orthogonal_vector = v.get_orthogonal_vector(w)158 print 'orthogonal vector is: {}'.format(orthogonal_vector)159 v = Vector([3.009, -6.172, 3.692, -2.51])160 w = Vector([6.404, -9.144, 2.759, 8.718])161 projected_vector = v.get_projected_vector(w)...

Full Screen

Full Screen

test_641_construction_ray.py

Source:test_641_construction_ray.py Github

copy

Full Screen

...22 ray1 = ConstructionRay((10, 1), (20, 10))23 ray2 = ConstructionRay((17, -7), (-10, 3))24 point = ray1.intersect(ray2)25 assert point.isclose(Vec3(5.7434, -2.8309), abs_tol=1e-4)26 assert ray1.is_parallel(ray2) is False27 def test_ray2d_parallel(self):28 ray1 = ConstructionRay((17, -8), (-10, 2))29 ray2 = ConstructionRay((-10, 3), (17, -7))30 ray3 = ConstructionRay((-10, 4), (17, -6))31 assert ray2.is_parallel(ray3) is True32 assert ray1.is_parallel(ray3) is True33 with pytest.raises(ParallelRaysError):34 _ = ray2.intersect(ray3)35 def test_ray2d_intersect_with_vertical(self):36 ray1 = ConstructionRay((10, 1), (10, -7))37 ray2 = ConstructionRay((-10, 3), (17, -7))38 point = ray1.intersect(ray2)39 assert point.x == 1040 assert point.isclose(Vec3(10.0, -4.4074), abs_tol=1e-4)41 with pytest.raises(ArithmeticError):42 _ = ray1.yof(1)43 def test_ray2d_intersect_with_horizontal(self):44 ray1 = ConstructionRay((-10, 10), (10, 10))45 ray2 = ConstructionRay((-10, 20), (10, 0))46 point = ray1.intersect(ray2)47 assert point.y == 1048 assert point.isclose(Vec3(0.0, 10.0), abs_tol=1e-4)49 def test_ray2d_intersect_with_vertical_and_horizontal(self):50 ray1 = ConstructionRay((-10, 10), (10, 10))51 ray2 = ConstructionRay((5, 0), (5, 20))52 point = ray1.intersect(ray2)53 assert point.y == 1054 assert point.x == 555 assert point.isclose(Vec3(5.0, 10.0), abs_tol=1e-4)56 def test_ray2d_parallel_vertical(self):57 ray1 = ConstructionRay((10, 1), (10, -7))58 ray2 = ConstructionRay((11, 0), angle=HALF_PI)59 ray3 = ConstructionRay((12, -10), (12, 7))60 ray4 = ConstructionRay((0, 0), (1, 1))61 ray5 = ConstructionRay((0, 0), angle=0)62 with pytest.raises(ParallelRaysError):63 _ = ray1.intersect(ray3)64 assert ray1.is_parallel(ray3) is True65 assert ray1.is_parallel(ray2) is True66 assert ray2.is_parallel(ray2) is True67 assert ray1.is_parallel(ray4) is False68 assert ray2.is_parallel(ray4) is False69 assert ray3.is_parallel(ray4) is False70 assert ray1.is_parallel(ray5) is False71 assert ray2.is_parallel(ray5) is False72 assert ray3.is_parallel(ray5) is False73 # vertical rays can't calc a y-value74 with pytest.raises(ArithmeticError):75 _ = ray1.yof(-1.0)76 def test_ray2d_normal_vertical(self):77 ray = ConstructionRay((10, 1), (10, -7)) # vertical line78 ortho = ray.orthogonal((3, 3))79 point = ray.intersect(ortho)80 assert point.isclose(Vec3(10, 3))81 def test_ray2d_normal(self):82 ray = ConstructionRay((-10, 3), (17, -7))83 ortho = ray.orthogonal((3, 3))84 point = ray.intersect(ortho)85 assert point.isclose(Vec3(1.4318, -1.234), abs_tol=1e-4)86 def test_ray2d_normal_horizontal(self):87 ray = ConstructionRay((10, 10), (20, 10)) # horizontal line88 ortho = ray.orthogonal((3, 3))89 point = ray.intersect(ortho)90 assert point.isclose(Vec3(3, 10))91 def test_ray2d_angle(self):92 ray = ConstructionRay((10, 10), angle=HALF_PI)93 assert ray._is_vertical is True94 ray = ConstructionRay((10, 10), angle=0)95 assert ray._is_horizontal is True96 ray = ConstructionRay((10, 10), angle=math.pi / 4)97 assert math.isclose(ray._slope, 1.0)98 def test_bisectrix(self):99 ray1 = ConstructionRay((10, 10), angle=math.pi / 3)100 ray2 = ConstructionRay((3, -5), angle=math.pi / 2)101 ray3 = ConstructionRay((1, 1), angle=math.pi / 3)102 a = ray1.bisectrix(ray2)103 assert math.isclose(a._angle, 1.309, abs_tol=1e-4)104 assert math.isclose(a.yof(7), 12.80385, abs_tol=1e-4)105 with pytest.raises(ParallelRaysError):106 _ = ray1.bisectrix(ray3)107 def test_two_close_horizontal_rays(self):108 p1 = (39340.75302672016, 32489.73349764998)109 p2 = (39037.75302672119, 32489.73349764978)110 p3 = (38490.75302672015, 32489.73349764997)111 ray1 = ConstructionRay(p1, p2)112 ray2 = ConstructionRay(p2, p3)113 assert ray1.is_horizontal is True114 assert ray2.is_horizontal is True115 assert ray1.is_parallel(ray2) is True116 assert (117 math.isclose(ray1.slope, ray2.slope) is False...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run molecule automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful