How to use windowSize method in fMBT

Best Python code snippet using fMBT_python

main.py

Source:main.py Github

copy

Full Screen

1# python 3.9.52from math import ceil3import cv24import numpy as np5from functions import *6# define quantization tables7QTY = np.array([[16, 11, 10, 16, 24, 40, 51, 61], # luminance quantization table8 [12, 12, 14, 19, 26, 48, 60, 55],9 [14, 13, 16, 24, 40, 57, 69, 56],10 [14, 17, 22, 29, 51, 87, 80, 62],11 [18, 22, 37, 56, 68, 109, 103, 77],12 [24, 35, 55, 64, 81, 104, 113, 92],13 [49, 64, 78, 87, 103, 121, 120, 101],14 [72, 92, 95, 98, 112, 100, 103, 99]])15QTC = np.array([[17, 18, 24, 47, 99, 99, 99, 99], # chrominance quantization table16 [18, 21, 26, 66, 99, 99, 99, 99],17 [24, 26, 56, 99, 99, 99, 99, 99],18 [47, 66, 99, 99, 99, 99, 99, 99],19 [99, 99, 99, 99, 99, 99, 99, 99],20 [99, 99, 99, 99, 99, 99, 99, 99],21 [99, 99, 99, 99, 99, 99, 99, 99],22 [99, 99, 99, 99, 99, 99, 99, 99]])23# define window size24windowSize = len(QTY)25# read image26imgOriginal = cv2.imread('marbles.bmp', cv2.IMREAD_COLOR)27# convert BGR to YCrCb28img = cv2.cvtColor(imgOriginal, cv2.COLOR_BGR2YCR_CB)29width = len(img[0])30height = len(img)31y = np.zeros((height, width), np.float32) + img[:, :, 0]32cr = np.zeros((height, width), np.float32) + img[:, :, 1]33cb = np.zeros((height, width), np.float32) + img[:, :, 2]34# size of the image in bits before compression35totalNumberOfBitsWithoutCompression = len(y) * len(y[0]) * 8 + len(cb) * len(cb[0]) * 8 + len(cr) * len(cr[0]) * 836# channel values should be normalized, hence subtract 12837y = y - 12838cr = cr - 12839cb = cb - 12840# 4: 2: 2 subsampling is used # another subsampling scheme can be used41# thus chrominance channels should be sub-sampled42# define subsampling factors in both horizontal and vertical directions43SSH, SSV = 2, 244# filter the chrominance channels using a 2x2 averaging filter # another type of filter can be used45crf = cv2.boxFilter(cr, ddepth=-1, ksize=(2, 2))46cbf = cv2.boxFilter(cb, ddepth=-1, ksize=(2, 2))47crSub = crf[::SSV, ::SSH]48cbSub = cbf[::SSV, ::SSH]49# check if padding is needed,50# if yes define empty arrays to pad each channel DCT with zeros if necessary51yWidth, yLength = ceil(len(y[0]) / windowSize) * windowSize, ceil(len(y) / windowSize) * windowSize52if (len(y[0]) % windowSize == 0) and (len(y) % windowSize == 0):53 yPadded = y.copy()54else:55 yPadded = np.zeros((yLength, yWidth))56 for i in range(len(y)):57 for j in range(len(y[0])):58 yPadded[i, j] += y[i, j]59# chrominance channels have the same dimensions, meaning both can be padded in one loop60cWidth, cLength = ceil(len(cbSub[0]) / windowSize) * windowSize, ceil(len(cbSub) / windowSize) * windowSize61if (len(cbSub[0]) % windowSize == 0) and (len(cbSub) % windowSize == 0):62 crPadded = crSub.copy()63 cbPadded = cbSub.copy()64# since chrominance channels have the same dimensions, one loop is enough65else:66 crPadded = np.zeros((cLength, cWidth))67 cbPadded = np.zeros((cLength, cWidth))68 for i in range(len(crSub)):69 for j in range(len(crSub[0])):70 crPadded[i, j] += crSub[i, j]71 cbPadded[i, j] += cbSub[i, j]72# get DCT of each channel73# define three empty matrices74yDct, crDct, cbDct = np.zeros((yLength, yWidth)), np.zeros((cLength, cWidth)), np.zeros((cLength, cWidth))75# number of iteration on x axis and y axis to calculate the luminance cosine transform values76hBlocksForY = int(len(yDct[0]) / windowSize) # number of blocks in the horizontal direction for luminance77vBlocksForY = int(len(yDct) / windowSize) # number of blocks in the vertical direction for luminance78# number of iteration on x axis and y axis to calculate the chrominance channels cosine transforms values79hBlocksForC = int(len(crDct[0]) / windowSize) # number of blocks in the horizontal direction for chrominance80vBlocksForC = int(len(crDct) / windowSize) # number of blocks in the vertical direction for chrominance81# define 3 empty matrices to store the quantized values82yq, crq, cbq = np.zeros((yLength, yWidth)), np.zeros((cLength, cWidth)), np.zeros((cLength, cWidth))83# and another 3 for the zigzags84yZigzag = np.zeros(((vBlocksForY * hBlocksForY), windowSize * windowSize))85crZigzag = np.zeros(((vBlocksForC * hBlocksForC), windowSize * windowSize))86cbZigzag = np.zeros(((vBlocksForC * hBlocksForC), windowSize * windowSize))87for i in range(vBlocksForY):88 for j in range(hBlocksForY):89 yDct[i * windowSize: i * windowSize + windowSize, j * windowSize: j * windowSize + windowSize] = cv2.dct(90 yPadded[i * windowSize: i * windowSize + windowSize, j * windowSize: j * windowSize + windowSize])91 yq[i * windowSize: i * windowSize + windowSize, j * windowSize: j * windowSize + windowSize] = np.ceil(92 yDct[i * windowSize: i * windowSize + windowSize, j * windowSize: j * windowSize + windowSize] / QTY)93 yZigzag[i * j] += zigzag(94 yq[i * windowSize: i * windowSize + windowSize, j * windowSize: j * windowSize + windowSize])95yZigzag = yZigzag.astype(np.int16)96# either crq or cbq can be used to compute the number of blocks97for i in range(vBlocksForC):98 for j in range(hBlocksForC):99 crDct[i * windowSize: i * windowSize + windowSize, j * windowSize: j * windowSize + windowSize] = cv2.dct(100 crPadded[i * windowSize: i * windowSize + windowSize, j * windowSize: j * windowSize + windowSize])101 crq[i * windowSize: i * windowSize + windowSize, j * windowSize: j * windowSize + windowSize] = np.ceil(102 crDct[i * windowSize: i * windowSize + windowSize, j * windowSize: j * windowSize + windowSize] / QTC)103 crZigzag[i * j] += zigzag(104 crq[i * windowSize: i * windowSize + windowSize, j * windowSize: j * windowSize + windowSize])105 cbDct[i * windowSize: i * windowSize + windowSize, j * windowSize: j * windowSize + windowSize] = cv2.dct(106 cbPadded[i * windowSize: i * windowSize + windowSize, j * windowSize: j * windowSize + windowSize])107 cbq[i * windowSize: i * windowSize + windowSize, j * windowSize: j * windowSize + windowSize] = np.ceil(108 cbDct[i * windowSize: i * windowSize + windowSize, j * windowSize: j * windowSize + windowSize] / QTC)109 cbZigzag[i * j] += zigzag(110 cbq[i * windowSize: i * windowSize + windowSize, j * windowSize: j * windowSize + windowSize])111crZigzag = crZigzag.astype(np.int16)112cbZigzag = cbZigzag.astype(np.int16)113# find the run length encoding for each channel114# then get the frequency of each component in order to form a Huffman dictionary115yEncoded = run_length_encoding(yZigzag)116yFrequencyTable = get_freq_dict(yEncoded)117yHuffman = find_huffman(yFrequencyTable)118crEncoded = run_length_encoding(crZigzag)119crFrequencyTable = get_freq_dict(crEncoded)120crHuffman = find_huffman(crFrequencyTable)121cbEncoded = run_length_encoding(cbZigzag)122cbFrequencyTable = get_freq_dict(cbEncoded)123cbHuffman = find_huffman(cbFrequencyTable)124# calculate the number of bits to transmit for each channel125# and write them to an output file126file = open("CompressedImage.asfh", "w")127yBitsToTransmit = str()128for value in yEncoded:129 yBitsToTransmit += yHuffman[value]130crBitsToTransmit = str()131for value in crEncoded:132 crBitsToTransmit += crHuffman[value]133cbBitsToTransmit = str()134for value in cbEncoded:135 cbBitsToTransmit += cbHuffman[value]136if file.writable():137 file.write(yBitsToTransmit + "\n" + crBitsToTransmit + "\n" + cbBitsToTransmit)138file.close()139totalNumberOfBitsAfterCompression = len(yBitsToTransmit) + len(crBitsToTransmit) + len(cbBitsToTransmit)140print(141 "Compression Ratio is " + str(...

Full Screen

Full Screen

LBPfuncs.py

Source:LBPfuncs.py Github

copy

Full Screen

1import cv22import numpy as np34import get_LBP_from_Image as LBP567def region_flag(Sub, yt, xt, windowSize, region_thresh):8 return np.sum(Sub[yt:yt + windowSize, xt:xt + windowSize]) > (windowSize ** 2) / region_thresh910def region_flag_out(out, yt, xt, windowSize, region_thresh_out):11 return np.sum(out[yt:yt + windowSize, xt:xt + windowSize]) <= region_thresh_out12131415def MyResize(I, factor):16 w = I.shape[0]17 h = I.shape[1]18 w0 = int(w / factor)19 h0 = int(h / factor)20 out = cv2.resize(I, (w0, h0), interpolation=cv2.INTER_LINEAR)21 return out222324def calc_regional_LBP(target, background, lbp, out, yt, xt, windowSize):25 t_windowed = target[yt:yt + windowSize, xt:xt + windowSize]26 b_windowed = background[yt:yt + windowSize, xt:xt + windowSize]27 t_LBP_map = lbp.lbp_uniform(t_windowed) # LBP等价模式特征28 b_LBP_map = lbp.lbp_uniform(b_windowed)29 t_hist = lbp.get_uniform_hist(t_LBP_map) # 获取LBP特征归一化统计直方图30 b_hist = lbp.get_uniform_hist(b_LBP_map)31 d = LBP.chi2_distance(t_hist, b_hist)32 # print((j, i))33 out[yt:yt + windowSize, xt:xt + windowSize] += np.ones((windowSize, windowSize)) * d343536def CompareLBP(target, background, Sub, windowSize=32, step=4, region_thresh=3, decay=0.05):37 [h, w] = [target.shape[0], target.shape[1]]38 x_iter = int((w - windowSize) / step) + 139 y_iter = int((h - windowSize) / step) + 140 x_left = w - windowSize - step * (x_iter - 1)41 y_left = h - windowSize - step * (y_iter - 1)42 out = np.zeros((h, w))43 lbp = LBP.LBP()44 flag = 045 deviation = decay46 deviation1 = deviation * 0.7547 deviation2 = deviation * 0.7548 deviation3 = deviation * 0.7549 deviation4 = deviation * 0.7550 for j in range(y_iter):51 for i in range(x_iter):52 xt = i * step53 yt = j * step54 CalcFlag = (np.sum(Sub[yt:yt + windowSize, xt:xt + windowSize]) > (windowSize ** 2) / region_thresh)55 if j < y_iter - 2 and j > 1 and i < x_iter - 2 and i > 1:56 CalcFlag = CalcFlag or region_flag(Sub, yt + windowSize, xt - windowSize, windowSize, region_thresh)57 CalcFlag = CalcFlag or region_flag(Sub, yt + windowSize, xt, windowSize, region_thresh)58 CalcFlag = CalcFlag or region_flag(Sub, yt + windowSize, xt + windowSize, windowSize, region_thresh)59 CalcFlag = CalcFlag or region_flag(Sub, yt, xt + windowSize, windowSize, region_thresh)60 CalcFlag = CalcFlag and region_flag_out(out, yt + windowSize, xt - windowSize, windowSize, 1)61 CalcFlag = CalcFlag and region_flag_out(out, yt + windowSize, xt, windowSize, 1)62 CalcFlag = CalcFlag and region_flag_out(out, yt + windowSize, xt + windowSize, windowSize, 1)63 CalcFlag = CalcFlag and region_flag_out(out, yt, xt + windowSize, windowSize, 1)64 if CalcFlag:65 flag += 366 if flag > 0:67 calc_regional_LBP(target, background, lbp, out, yt, xt, windowSize)68 69 if j < y_iter - 2 and j > 1 and i < x_iter - 2 and i > 1:70 # if not region_flag(Sub, yt - windowSize, xt - windowSize, windowSize, region_thresh):71 if region_flag(out, yt - windowSize, xt - windowSize, windowSize, region_thresh * deviation1):72 calc_regional_LBP(target, background, lbp, out, yt - windowSize, xt - windowSize,73 windowSize)74 if region_flag(out, yt - windowSize, xt, windowSize, region_thresh * deviation2):75 calc_regional_LBP(target, background, lbp, out, yt - windowSize, xt, windowSize)76 if region_flag(out, yt - windowSize, xt + windowSize, windowSize, region_thresh * deviation3):77 calc_regional_LBP(target, background, lbp, out, yt - windowSize, xt + windowSize,78 windowSize)79 if region_flag(out, yt, xt - windowSize, windowSize, region_thresh * deviation4):80 calc_regional_LBP(target, background, lbp, out, yt, xt - windowSize, windowSize)81 if region_flag(out, yt, xt + windowSize, windowSize, region_thresh * deviation):82 calc_regional_LBP(target, background, lbp, out, yt, xt + windowSize, windowSize)83 if region_flag(out, yt + windowSize, xt - windowSize, windowSize, region_thresh * deviation):84 calc_regional_LBP(target, background, lbp, out, yt + windowSize, xt - windowSize,85 windowSize)86 if region_flag(out, yt + windowSize, xt, windowSize, region_thresh * deviation):87 calc_regional_LBP(target, background, lbp, out, yt + windowSize, xt, windowSize)88 if region_flag(out, yt + windowSize, xt + windowSize, windowSize, region_thresh * deviation):89 calc_regional_LBP(target, background, lbp, out, yt + windowSize, xt + windowSize,90 windowSize)9192 flag -= 193 # LBP.ShowSubIm("out", out, out[yt:yt + windowSize, xt:xt + windowSize])949596 if x_left:97 for j in range(y_iter):98 yt = j * step99 t_windowed = target[yt:yt + windowSize, w - x_left:w]100 b_windowed = background[yt:yt + windowSize, w - x_left:w]101 t_LBP_map = lbp.lbp_revolve(t_windowed) # LBP旋转不变模式特征102 b_LBP_map = lbp.lbp_revolve(b_windowed)103 t_hist = lbp.get_revolve_hist(t_LBP_map) # LBP旋转不变归一化统计直方图104 b_hist = lbp.get_revolve_hist(b_LBP_map)105 d = LBP.chi2_distance(t_hist, b_hist)106 out[yt:yt + windowSize, w - x_left:w] += np.ones((windowSize, x_left)) * d107 if y_left:108 for i in range(x_iter):109 xt = i * step110 t_windowed = target[h - y_left:h, xt:xt + windowSize]111 b_windowed = background[h - y_left:h, xt:xt + windowSize]112 t_LBP_map = lbp.lbp_revolve(t_windowed)113 b_LBP_map = lbp.lbp_revolve(b_windowed)114 t_hist = lbp.get_revolve_hist(t_LBP_map)115 b_hist = lbp.get_revolve_hist(b_LBP_map)116 d = LBP.chi2_distance(t_hist, b_hist)117 out[h - y_left:h, xt:xt + windowSize] += np.ones((y_left, windowSize)) * d118 if y_left and x_left:119 t_windowed = target[h - y_left:h, w - x_left:w]120 b_windowed = background[h - y_left:h, w - x_left:w]121 t_LBP_map = lbp.lbp_revolve(t_windowed)122 b_LBP_map = lbp.lbp_revolve(b_windowed)123 t_hist = lbp.get_revolve_hist(t_LBP_map)124 b_hist = lbp.get_revolve_hist(b_LBP_map)125 d = LBP.chi2_distance(t_hist, b_hist)126 out[h - y_left:h, w - x_left:w] += np.ones((y_left, x_left)) * d127 # out = out / np.max(out) ...

Full Screen

Full Screen

match_features.py

Source:match_features.py Github

copy

Full Screen

1import cv22import numpy as np3def match_features(feature_coords1, feature_coords2, image1, image2, windowSize = None):4 if windowSize == None :5 windowSize = 206 7 dictFeature2To1 = {}8 dictFeature1To2 = {}9 image1 = cv2.cvtColor(image1, cv2.COLOR_RGB2GRAY)10 image2 = cv2.cvtColor(image2, cv2.COLOR_RGB2GRAY)11 for i in feature_coords1:12 windowIn1 = image1[i[0] - windowSize:i[0] + windowSize, i[1] - windowSize:i[1] + windowSize]13 if (windowIn1.shape != (windowSize * 2, windowSize * 2)): continue14 maxVal = -115 index = [0, 0]16 for j in feature_coords2:17 windowIn2 = image2[j[0] - windowSize:j[0] + windowSize, j[1] - windowSize:j[1] + windowSize]18 if (windowIn2.shape != (windowSize * 2, windowSize * 2)): continue19 product = np.mean((windowIn1 - windowIn1.mean()) * (windowIn2 - windowIn2.mean()))20 stds = windowIn1.std() * windowIn2.std()21 product /= stds22 if maxVal < product:23 maxVal = product24 index = j25 dictFeature2To1[i] = index26 for i in feature_coords2:27 windowIn2 = image2[i[0] - windowSize:i[0] + windowSize, i[1] - windowSize:i[1] + windowSize]28 if (windowIn2.shape != (windowSize * 2, windowSize * 2)): continue29 maxVal = -130 index = [0, 0]31 for j in feature_coords1:32 windowIn1 = image1[j[0] - windowSize:j[0] + windowSize, j[1] - windowSize:j[1] + windowSize]33 if (windowIn1.shape != (windowSize * 2, windowSize * 2)): continue34 product = np.mean((windowIn2 - windowIn2.mean()) * (windowIn1 - windowIn1.mean()))35 stds = windowIn2.std() * windowIn1.std()36 product /= stds37 if maxVal < product:38 maxVal = product39 index = j40 dictFeature1To2[i] = index41 else:42 windowSize = windowSize43 dictFeature2To1 = {}44 dictFeature1To2 = {}45 image1 = cv2.cvtColor(image1, cv2.COLOR_RGB2GRAY)46 image2 = cv2.cvtColor(image2, cv2.COLOR_RGB2GRAY)47 for i in feature_coords1:48 windowIn1 = image1[i[0] - windowSize:i[0] + windowSize, i[1] - windowSize:i[1] + windowSize]49 if (windowIn1.shape != (windowSize * 2, windowSize * 2)): continue50 maxVal = -151 index = [0,0]52 for j in feature_coords2:53 windowIn2 = image2[j[0] - windowSize:j[0] + windowSize, j[1] - windowSize:j[1] + windowSize]54 if (windowIn2.shape != (windowSize * 2, windowSize * 2)): continue55 product = np.mean((windowIn1 - windowIn1.mean()) * (windowIn2 - windowIn2.mean()))56 stds = windowIn1.std() * windowIn2.std()57 product /= stds58 if maxVal < product:59 maxVal = product60 index = j61 dictFeature2To1[i] = index62 for i in feature_coords2:63 windowIn2 = image2[i[0] - windowSize:i[0] + windowSize, i[1] - windowSize:i[1] + windowSize]64 if (windowIn2.shape != (windowSize * 2, windowSize * 2)):continue65 maxVal = -166 index = [0,0]67 for j in feature_coords1:68 windowIn1 = image1[j[0] - windowSize:j[0] + windowSize, j[1] - windowSize:j[1] + windowSize]69 if (windowIn1.shape != (windowSize * 2, windowSize * 2)):continue70 product = np.mean((windowIn2 - windowIn2.mean()) * (windowIn1 - windowIn1.mean()))71 stds = windowIn2.std() * windowIn1.std()72 product /= stds73 if maxVal < product:74 maxVal = product75 index = j76 dictFeature1To2[i] = index77 matches = list()78 listInPairs = list()79 for i in dictFeature2To1.keys():80 temp = dictFeature2To1[i]81 if dictFeature1To2[temp] == i:82 index1 = feature_coords1.index(i)83 index2 = feature_coords2.index(temp)84 listInPairs.append([index1,index2])85 matches.append(i)86 matches.append(temp)87 print len(matches)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run fMBT automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful