How to use start_chunk method in Playwright Python

Best Python code snippet using playwright-python

organizer_copy.py

Source:organizer_copy.py Github

copy

Full Screen

1import numpy as np 2from datetime import datetime as dt3from backports.datetime_fromisoformat import MonkeyPatch4MonkeyPatch.patch_fromisoformat()5MAX_PACKET_SIZE = 40966BYTES_IN_PACKET = 14567np.set_printoptions(threshold=np.inf,linewidth=325)8class Organizer:9 def __init__(self, all_data, num_chirp_loops, num_rx, num_tx, num_samples):10 self.data = all_data[0]11 self.packet_num = all_data[1]12 self.byte_count = all_data[2]13 self.num_packets = len(self.byte_count)14 self.num_chirps = num_chirp_loops*num_tx15 self.num_rx = num_rx16 self.num_samples = num_samples17 self.BYTES_IN_FRAME = self.num_chirps * self.num_rx * self.num_samples * 2 * 218 self.BYTES_IN_FRAME_CLIPPED = (self.BYTES_IN_FRAME // BYTES_IN_PACKET) * BYTES_IN_PACKET19 self.UINT16_IN_FRAME = self.BYTES_IN_FRAME // 220 self.NUM_PACKETS_PER_FRAME = self.BYTES_IN_FRAME // BYTES_IN_PACKET21 self.start_time = all_data[3]22 self.end_time = all_data[4]23 def iq(self, raw_frame):24 """Reorganizes raw ADC data into a full frame25 Args:26 raw_frame (ndarray): Data to format27 num_chirps: Number of chirps included in the frame28 num_rx: Number of receivers used in the frame29 num_samples: Number of ADC samples included in each chirp30 Returns:31 ndarray: Reformatted frame of raw data of shape (num_chirps, num_rx, num_samples)32 """33 ret = np.zeros(len(raw_frame) // 2, dtype=np.csingle)34 # Separate IQ data35 ret[0::2] = raw_frame[0::4] + 1j * raw_frame[2::4]36 ret[1::2] = raw_frame[1::4] + 1j * raw_frame[3::4]37 return ret.reshape((self.num_chirps, self.num_rx, self.num_samples))38 def get_frames(self, start_chunk, end_chunk, bc):39 # if first packet received is not the first byte transmitted40 if bc[start_chunk] == 0:41 bytes_left_in_curr_frame = 042 start = start_chunk*(BYTES_IN_PACKET // 2)43 else:44 frames_so_far = bc[start_chunk] // self.BYTES_IN_FRAME45 bytes_so_far = frames_so_far * self.BYTES_IN_FRAME46 # bytes_left_in_curr_frame = bc[start_chunk] - bytes_so_far47 bytes_left_in_curr_frame = (frames_so_far+1)*self.BYTES_IN_FRAME - bc[start_chunk]48 start = (bytes_left_in_curr_frame // 2) + start_chunk*(BYTES_IN_PACKET // 2)49 # print(start_chunk, start)50 # find num of frames51 total_bytes = bc[end_chunk] - (bc[start_chunk] + bytes_left_in_curr_frame)52 num_frames = total_bytes // (self.BYTES_IN_FRAME)53 # print(bc[end_chunk])54 # print(num_frames, start_chunk, end_chunk, self.BYTES_IN_FRAME)55 frames = np.zeros((num_frames, self.UINT16_IN_FRAME), dtype=np.int16)56 ret_frames = np.zeros((num_frames, self.num_chirps, self.num_rx, self.num_samples), dtype=complex) 57 # compress all received data into one byte stream58 all_uint16 = np.array(self.data).reshape(-1)59 # only choose uint16 starting from a new frame60 all_uint16 = all_uint16[start:]61 # organizing into frames62 for i in range(num_frames):63 frame_start_idx = i*self.UINT16_IN_FRAME64 frame_end_idx = (i+1)*self.UINT16_IN_FRAME65 frame = all_uint16[frame_start_idx:frame_end_idx]66 frames[i][:len(frame)] = frame.astype(np.int16)67 ret_frames[i] = self.iq(frames[i]) 68 return ret_frames69 def organize(self):70 radar_unix_start_time = dt.timestamp(dt.fromisoformat(self.start_time[:-1]))*1e671 radar_unix_end_time = dt.timestamp(dt.fromisoformat(self.end_time[:-1]))*1e672 print('Start time: ', self.start_time)73 print('End time: ', self.end_time)74 self.byte_count = np.array(self.byte_count)75 self.data = np.array(self.data)76 self.packet_num = np.array(self.packet_num)77 # Reordering packets78 # sorted_idx = np.argsort(self.packet_num)79 # print(sorted_idx.dtype)80 # print(len(self.packet_num), len(self.byte_count), len(self.data), sorted_idx.shape)81 # self.packet_num = self.packet_num[sorted_idx]82 # self.data = self.data[sorted_idx]83 # self.byte_count = self.byte_count[sorted_idx]84 # self.packet_num = self.packet_num.tolist()85 # self.byte_count = self.byte_count.tolist()86 # self.data = self.data.tolist()87 bc = np.array(self.byte_count)88 packets_ooo = np.where(np.array(self.packet_num[1:])-np.array(self.packet_num[0:-1]) != 1)[0]89 is_not_monotonic = np.where(np.array(self.packet_num[1:])-np.array(self.packet_num[0:-1]) < 0)[0]90 print('Non monotonic packets: ', is_not_monotonic)91 if len(packets_ooo) == 0:92 print('packets in order')93 start_chunk = 094 ret_frames = self.get_frames(start_chunk, -1, bc)95 elif len(packets_ooo) == 1:96 print('1 packet not in order')97 start_chunk = packets_ooo[0]+198 ret_frames = self.get_frames(start_chunk, -1, bc)99 # start_chunk = 0100 else:101 print('Packet num not in order')102 packets_ooo = np.append(packets_ooo, len(self.packet_num)-1)103 # print('Packets ooo', packets_ooo)104 # print(self.NUM_PACKETS_PER_FRAME)105 # diff = [44]106 # for i in range(len(packets_ooo)-1):107 # # print(i, len(packets_ooo))108 # diff.append(self.packet_num[packets_ooo[i+1]]-self.packet_num[packets_ooo[i]+1])109 110 # print('Packets received before atleast 1 loss ', diff)111 # print('Total packets received ', np.sum(np.array(diff)))112 diff = []113 for i in range(len(packets_ooo)-1):114 diff.append(self.packet_num[packets_ooo[i]+1]-self.packet_num[packets_ooo[i]])115 116 # print('Packets lost before atleast 1 reception ', diff)117 packets_lost = np.sum(np.array(diff))118 packets_expected = self.packet_num[-1]-self.packet_num[0]+1119 print('Total packets lost ', packets_lost)120 print('Total packets expected ', packets_expected)121 print('Fraction lost ', packets_lost/packets_expected)122 new_packets_ooo = []123 start_new_packets_ooo = []124 end_new_packets_ooo = []125 for i in range(1, len(packets_ooo)):126 if (packets_ooo[i] - packets_ooo[i-1]) > self.NUM_PACKETS_PER_FRAME*2:127 new_packets_ooo.append(packets_ooo[i-1])128 start_new_packets_ooo.append(packets_ooo[i-1])129 end_new_packets_ooo.append(packets_ooo[i])130 new_packets_ooo = np.append(new_packets_ooo, -1)131 # print('New packets ooo', new_packets_ooo)132 # print('Start new packets ooo', start_new_packets_ooo)133 # print('End new packets ooo', end_new_packets_ooo)134 # exit()135 for i in range(len(start_new_packets_ooo)):136 # for i in range(len(new_packets_ooo)-1):137 # for i in [len(new_packets_ooo)-2]:138 # start_chunk = new_packets_ooo[i]+1139 # end_chunk = new_packets_ooo[i+1]140 start_chunk = start_new_packets_ooo[i]+1141 end_chunk = end_new_packets_ooo[i]142 # print(self.packet_num[start_chunk],self.packet_num[start_chunk-1])143 # print(self.byte_count[start_chunk],self.byte_count[start_chunk-1])144 curr_frames = self.get_frames(start_chunk, end_chunk, bc)145 if i == 0:146 ret_frames = curr_frames147 else:148 ret_frames = np.concatenate((ret_frames, curr_frames), axis=0)149 return ret_frames150 # Old approach151 # frame_start_idx = np.where((bc % self.BYTES_IN_FRAME_CLIPPED == 0) & (bc != 0))[0]152 # num_frames = len(frame_start_idx)-1153 # frames = np.zeros((num_frames, self.UINT16_IN_FRAME), dtype=np.int16)154 # ret_frames = np.zeros((num_frames, self.num_chirps, self.num_rx, self.num_samples), dtype=complex)155 # for i in range(num_frames):156 # d = np.array(self.data[frame_start_idx[i]:frame_start_idx[i+1]])157 # frame = d.reshape(-1)158 # frames[i][:len(frame)] = frame.astype(np.int16)159 # ret_frames[i] = self.iq(frames[i])...

Full Screen

Full Screen

pm_rmq.py

Source:pm_rmq.py Github

copy

Full Screen

1import numpy as np2import math3import itertools4import random5#################################################6### Solution 1: O(1) query, O(n log n) space ####7#################################################8def preprocess_naive(arr):9 num_elem = len(arr)10 num_powers_of_2 = math.ceil(math.log(num_elem, 2)) + 111 solutions = np.full((num_elem, num_powers_of_2), -1)12 # Stor answer from every start point...13 for start_idx in range(num_elem):14 # ...for every interval lenth = power of 215 for i in range(num_powers_of_2):16 end_idx = start_idx + 2 ** i17 solutions[start_idx, i] = np.argmin(arr[start_idx : end_idx])18 return solutions19def query_naive(arr, solutions, start, end):20 assert(start != end)21 largest_pow = math.floor(math.log(end - start, 2))22 interval_size = 2 ** largest_pow23 start_1 = start24 start_2 = end - interval_size25 interval_1_argmin = start_1 + solutions[start_1, largest_pow]26 interval_2_argmin = start_2 + solutions[start_2, largest_pow]27 if arr[interval_1_argmin] <= arr[interval_2_argmin]:28 return interval_1_argmin29 else:30 return interval_2_argmin31def test_method_1():32 arr = [0, 1, 2, 1, 0, 1, 2, 3, 2, 3, 2, 1, 2, 1, 0, 1]33 print("Array:")34 print(arr)35 soln = preprocess_naive(arr)36 print("--------------")37 print("Preprocessing:")38 print(soln)39 print("--------------")40 print("Query (2, 5):", query_naive(arr, soln, 2, 5))41 for i in range(len(arr)):42 for j in range(i + 1, len(arr)):43 expected = i + np.argmin(arr[i:j])44 actual = query_naive(arr, soln, i, j)45 assert(expected == actual)46 print("All tests passed!")47###########################################48### Solution 2: O(1) query, O(n) space ####49###########################################50# Input: numpy array51# Output: (chunk_size, num_chunks, top_array, bottom_array)52# where top_array[i] is the argmin of ith chunk53# and bottom_array[i] is a view of a numpy array `l`54# such that l[start, end] is the argmin of the 55# values between `start' and `end' in the ith chunk56def pm_rmq_preprocess(arr):57 arr = np.array(arr)58 # 1) Split array into chunks of 1/2 lg n size59 chunk_size = math.floor(1/2 * math.log(len(arr), 2))60 num_chunks = math.ceil(len(arr)/chunk_size)61 # 2) Construct full lookup table62 # * Enumerate all possible 2^{chunk_size} +- sqeuences63 lookup = np.zeros(shape = ((2, ) * (chunk_size - 1)) + (chunk_size, chunk_size + 1), dtype = int)64 for step_sequence in itertools.product([0,1], repeat = chunk_size - 1):65 sequence = np.zeros(shape = (chunk_size, ), dtype = int)66 for i in range(1, chunk_size):67 sequence[i] = sequence[i - 1] + (-1 if step_sequence[i - 1] == 0 else 1)68 69 # * For each, compute the answers to all possible queries70 for start_query in range(0, chunk_size):71 for end_query in range(start_query + 1, chunk_size + 1):72 lookup_index = tuple(step_sequence) + (start_query, end_query)73 lookup[lookup_index] = np.argmin(sequence[start_query : end_query])74 # 3) Construct "top" array by brute force and "bottom" array of pointers75 chunk_summaries = np.zeros(num_chunks, dtype = int)76 bottom_lookup = [] # a list of *views* onto the full lookup table77 for i in range(num_chunks):78 start_chunk = i * chunk_size79 end_chunk = (i + 1) * chunk_size80 chunk = arr[start_chunk : end_chunk] - arr[start_chunk]81 chunk_sequence = [0 if d == -1 else 1 for d in np.diff(chunk)]82 chunk_summaries[i] = arr[start_chunk + np.argmin(chunk)]83 bottom_lookup.append(lookup[tuple(chunk_sequence)])84 # 4) Preprocess "top" array with O(n log n) space approach85 top_preprocessing = preprocess_naive(chunk_summaries)86 return (arr, chunk_summaries, top_preprocessing, bottom_lookup)87def pm_rmq_query(preprocessed_arr, start_index, end_index):88 arr, top, top_soln, bottom_lookup = preprocessed_arr89 chunk_size = math.floor(1/2 * math.log(len(arr), 2))90 num_chunks = math.ceil(len(arr)/chunk_size)91 start_chunk = start_index // chunk_size92 end_chunk = end_index // chunk_size93 start_within_chunk = start_index - start_chunk * chunk_size94 end_within_chunk = end_index - end_chunk * chunk_size95 if start_chunk == end_chunk:96 chunk_lookup = bottom_lookup[start_chunk]97 return start_index + chunk_lookup[start_within_chunk, end_within_chunk]98 else:99 # Start value100 start_lookup = bottom_lookup[start_chunk]101 argmin_start = start_index + start_lookup[start_within_chunk, -1]102 min_start = arr[argmin_start]103 # End value104 if end_within_chunk != 0:105 end_lookup = bottom_lookup[end_chunk]106 argmin_end = end_chunk * chunk_size + end_lookup[0, end_within_chunk]107 min_end = arr[argmin_end]108 else:109 argmin_end = -1110 min_end = np.inf111 # Intermediate values112 if start_chunk + 1 != end_chunk:113 argmin_chunk = query_naive(top, top_soln, start_chunk + 1, end_chunk)114 argmin_intermediate = argmin_chunk * chunk_size + bottom_lookup[argmin_chunk][0, -1]115 min_intermediate = arr[argmin_intermediate]116 else:117 argmin_intermediate = -1118 min_intermediate = np.inf119 # Return minimum value120 argmins = [argmin_start, argmin_intermediate, argmin_end]121 mins = [min_start, min_intermediate, min_end]122 return argmins[np.argmin(mins)]123def test_method_2():124 steps = [random.choice([0,1]) for _ in range(100)]125 arr = np.cumsum(steps)126 print("Array:")127 print(arr)128 preprocessed_arr = pm_rmq_preprocess(arr)129 q1 = pm_rmq_query(preprocessed_arr, 3, 10)130 print("Query [3, 10): ", q1)131 q2 = pm_rmq_query(preprocessed_arr, 5, 6)132 print("Query [5, 6): ", q2)133 134 for i in range(len(arr)):135 for j in range(i + 1, len(arr)):136 expected = i + np.argmin(arr[i:j])137 actual = pm_rmq_query(preprocessed_arr, i, j)138 assert(expected == actual)139 print("All tests passed!")140if __name__ == '__main__':...

Full Screen

Full Screen

salsa.py

Source:salsa.py Github

copy

Full Screen

1from samson.utilities.manipulation import left_rotate, get_blocks2from samson.utilities.bytes import Bytes3from samson.core.primitives import StreamCipher, Primitive4from samson.core.metadata import SizeType, SizeSpec, EphemeralSpec, EphemeralType, ConstructionType, FrequencyType5from samson.ace.decorators import register_primitive6from copy import deepcopy7import math8def QUARTER_ROUND(a: int, b: int, c: int, d: int) -> (int, int, int, int):9 """10 Performs a quarter round of Salsa.11 Parameters:12 a (int): Salsa state variable.13 b (int): Salsa state variable.14 c (int): Salsa state variable.15 d (int): Salsa state variable.16 17 Returns:18 (int, int, int, int): New values for (a, b, c, d).19 """20 b = (b ^ left_rotate((a + d) & 0xFFFFFFFF, 7))21 c = (c ^ left_rotate((b + a) & 0xFFFFFFFF, 9))22 d = (d ^ left_rotate((c + b) & 0xFFFFFFFF, 13))23 a = (a ^ left_rotate((d + c) & 0xFFFFFFFF, 18))24 return a, b, c, d25@register_primitive()26class Salsa(StreamCipher):27 """28 Salsa stream cipher29 Add-rotate-xor (ARX) structure.30 https://en.wikipedia.org/wiki/Salsa2031 """32 CONSTRUCTION_TYPES = [ConstructionType.ADD_ROTATE_XOR]33 EPHEMERAL = EphemeralSpec(ephemeral_type=EphemeralType.NONCE, size=SizeSpec(size_type=SizeType.SINGLE, sizes=96))34 USAGE_FREQUENCY = FrequencyType.UNUSUAL35 def __init__(self, key: bytes, nonce: bytes, rounds: int=20, constant: bytes=b"expand 32-byte k"):36 """37 Parameters:38 key (bytes): Key (128 or 256 bits).39 nonce (bytes): Nonce (8 bytes).40 rounds (int): Number of rounds to perform.41 constant (bytes): Constant used in generating the keystream (16 bytes).42 """43 Primitive.__init__(self)44 # If key is 80 bits, zero pad it (https://cr.yp.to/snuffle/salsafamily-20071225.pdf, 4.1)45 if len(key) == 10:46 key = Bytes.wrap(key).zfill(16)47 # If key is 128 bits, just repeat it48 if len(key) == 16:49 key += key50 self.key = key51 self.nonce = nonce52 self.rounds = rounds53 self.constant = constant54 self.counter = 055 def full_round(self, block_num: int, state: list=None) -> Bytes:56 """57 Performs a full round of Salsa.58 Parameters:59 block_num (int): Current block number.60 61 Returns:62 Bytes: Keystream block.63 """64 ctr_bytes = int.to_bytes(block_num, 8, 'little')65 cons_blocks = [int.from_bytes(block, 'little') for block in get_blocks(self.constant, 4)]66 key_blocks = [int.from_bytes(block, 'little') for block in get_blocks(self.key, 4)]67 ctr_blocks = [int.from_bytes(block, 'little') for block in get_blocks(ctr_bytes, 4)]68 nonce_blocks = [int.from_bytes(block, 'little') for block in get_blocks(self.nonce, 4)]69 x = state or [70 cons_blocks[0], *key_blocks[:4],71 cons_blocks[1], *nonce_blocks,72 *ctr_blocks, cons_blocks[2],73 *key_blocks[4:], cons_blocks[3]74 ]75 x = deepcopy(x)76 tmp = deepcopy(x)77 for _ in range(self.rounds // 2):78 # Odd round79 x[ 0], x[ 4], x[ 8], x[12] = QUARTER_ROUND(x[ 0], x[ 4], x[ 8], x[12])80 x[ 5], x[ 9], x[13], x[ 1] = QUARTER_ROUND(x[ 5], x[ 9], x[13], x[ 1])81 x[10], x[14], x[ 2], x[ 6] = QUARTER_ROUND(x[10], x[14], x[ 2], x[ 6])82 x[15], x[ 3], x[ 7], x[11] = QUARTER_ROUND(x[15], x[ 3], x[ 7], x[11])83 # Even round84 x[ 0], x[ 1], x[ 2], x[ 3] = QUARTER_ROUND(x[ 0], x[ 1], x[ 2], x[ 3])85 x[ 5], x[ 6], x[ 7], x[ 4] = QUARTER_ROUND(x[ 5], x[ 6], x[ 7], x[ 4])86 x[10], x[11], x[ 8], x[ 9] = QUARTER_ROUND(x[10], x[11], x[ 8], x[ 9])87 x[15], x[12], x[13], x[14] = QUARTER_ROUND(x[15], x[12], x[13], x[14])88 for i in range(16):89 x[i] += tmp[i]90 return Bytes(b''.join([int.to_bytes(state_int & 0xFFFFFFFF, 4, 'little') for state_int in x]), byteorder='little')91 def yield_state(self, start_chunk: int=0, num_chunks: int=1, state: list=None):92 """93 Generates `num_chunks` chunks of keystream starting from `start_chunk`.94 Parameters:95 num_chunks (int): Desired number of 64-byte keystream chunks.96 start_chunk (int): Chunk number to start at.97 state (list): Custom state to be directly injected.98 99 Returns:100 generator: Keystream chunks.101 """102 for iteration in range(start_chunk, start_chunk + num_chunks):103 yield self.full_round(iteration, state=state)104 def generate(self, length: int) -> Bytes:105 """106 Generates `length` of keystream.107 Parameters:108 length (int): Desired length of keystream in bytes.109 110 Returns:111 Bytes: Keystream.112 """113 num_chunks = math.ceil(length / 64)114 start_chunk = self.counter // 64115 counter_mod = self.counter % 64116 if counter_mod:117 num_chunks += 1118 keystream = sum(list(self.yield_state(start_chunk=start_chunk, num_chunks=num_chunks)))[counter_mod:counter_mod+length]119 self.counter += length...

Full Screen

Full Screen

hpopt1.py

Source:hpopt1.py Github

copy

Full Screen

1# -*- coding: utf-8 -*-2"""3Prepare data4numeric.to_hdf('numeric_b1_b7_nf149.hdf', 'numeric')5"""6import sys7sys.path.insert(0, '../../bosch_helper')8from bosch_helper import *9param_grid = {'max_depth': [13, 14, 15, 16], 10 'eta': [0.025, 0.03, 0.035],11 'silent': [1],12 'objective': ['binary:logistic'],13 'nthread': [16],14 'lambda': [3.5, 4, 4.5],15 'alpha': [0, 0.25], 16 'subsample': [0.85, 0.9, 9.5],17 'min_child_weight': [4.5, 5, 5.5],18 'booster': ['gbtree', 'dart'],19 'base_score': [0.0058], 20 'colsample_bytree': [0.5, 0.55, 0.6, 0.65]}21param_list = list(ParameterSampler(param_grid, 22 n_iter=100, random_state=285749))23#%% Load data of both train and test sets24# load numeric data25numeric = pd.read_hdf('../../data/data.hdf', 'numeric')26# labels for the train set27y_train = numeric.loc[('train', slice(None)), 'Response']28y_train = y_train.astype(np.int8)29# Import names of important features30important_features = pd.read_csv(31 '../benchmark_1/important_numeric_features.csv', 32 index_col=0, header=None)33important_features = list(important_features.values.ravel())34numeric = numeric[important_features]35numeric.index.names = ['set', 'Id']36#%% Load features: station flow37date_train = pd.read_csv(38 '../benchmark_2/train_station_flow.csv.gz', 39 index_col=0, header=None)40date_test = pd.read_csv(41 '../benchmark_2/test_station_flow.csv.gz', 42 index_col=0, header=None)43# Change index and column names44station_flow = pd.concat((date_train, date_test), keys=['train', 'test'])45station_flow.index.names = ['set', 'Id']46station_flow.columns = ['hash_station_flow0']47# Encode hash 48le = LabelEncoder()49station_flow['hash_station_flow0'] = le.fit_transform(station_flow)50# Join to numeric51numeric = numeric.join(station_flow)52del station_flow53gc.collect()54#%% Load features: benchmark 3, consective Id chunk55start_chunk = pd.read_csv('../benchmark_3/start_chunk.csv.gz', index_col=0)56# Group start chunks by train and test sets57start_chunk_train = start_chunk.loc[start_chunk.Response!=-1].drop(58 ['Response'], axis=1)59start_chunk_test = start_chunk.loc[start_chunk.Response==-1].drop(60 ['Response'], axis=1)61start_chunk = pd.concat((start_chunk_train, start_chunk_test), 62 keys=['train', 'test'])63start_chunk.index.names = ['set', 'Id']64# Join to numeric65numeric = numeric.join(start_chunk)66del start_chunk, start_chunk_test, start_chunk_train67gc.collect()68#%% Load features: benchmark 4, neighor time and response records69n = pd.read_csv('../benchmark_4/benchmark_4_neighbors.csv.gz', index_col=0)70# Group by train and test 71neighbor_train = n.loc[n.Response!=-1]72neighbor_train.drop(['Response'], axis=1, inplace=True)73neighbor_test = n.loc[n.Response==-1]74neighbor_test.drop(['Response'], axis=1, inplace=True)75neighbor = pd.concat((neighbor_train, neighbor_test), keys=['train', 'test'])76neighbor.index.names = ['set', 'Id']77# Join to numeric78numeric = numeric.join(neighbor)79del neighbor, neighbor_test, neighbor_train, n80gc.collect()81#%% Load features: benchmark 6, neighbor numeric features82numeric.sort_index(by=['Id'], inplace=True)83numeric = numeric.join(numeric[important_features].shift(), 84 rsuffix='_previous')85numeric = numeric.join(numeric[important_features].shift(-1),86 rsuffix='_next')87#%% Load features: benchmark 7, time features without MeanTimeDiff88time_features = pd.read_hdf('../benchmark_7/time_features_diff.hdf', 89 'time_features')90time_features.drop(['time_start', 'time_end', 'time_duration', 'Response'], 91 axis=1, inplace=True)92time_features.drop(time_features.columns[-40:], axis=1, inplace=True)93time_features.index.names = ['set', 'Id']94# Join to numeric95numeric = numeric.join(time_features)96del time_features97gc.collect()98#%% Save numeric data to a HDF for later use99for c in tqdm.tqdm(numeric.columns):100 if numeric[c].dtype==np.float64:101 numeric[c] = numeric[c].astype(np.float16)102numeric.to_hdf('numeric_b1_b7_nf149.hdf', 'numeric')...

Full Screen

Full Screen

Playwright tutorial

LambdaTest’s Playwright tutorial will give you a broader idea about the Playwright automation framework, its unique features, and use cases with examples to exceed your understanding of Playwright testing. This tutorial will give A to Z guidance, from installing the Playwright framework to some best practices and advanced concepts.

Chapters:

  1. What is Playwright : Playwright is comparatively new but has gained good popularity. Get to know some history of the Playwright with some interesting facts connected with it.
  2. How To Install Playwright : Learn in detail about what basic configuration and dependencies are required for installing Playwright and run a test. Get a step-by-step direction for installing the Playwright automation framework.
  3. Playwright Futuristic Features: Launched in 2020, Playwright gained huge popularity quickly because of some obliging features such as Playwright Test Generator and Inspector, Playwright Reporter, Playwright auto-waiting mechanism and etc. Read up on those features to master Playwright testing.
  4. What is Component Testing: Component testing in Playwright is a unique feature that allows a tester to test a single component of a web application without integrating them with other elements. Learn how to perform Component testing on the Playwright automation framework.
  5. Inputs And Buttons In Playwright: Every website has Input boxes and buttons; learn about testing inputs and buttons with different scenarios and examples.
  6. Functions and Selectors in Playwright: Learn how to launch the Chromium browser with Playwright. Also, gain a better understanding of some important functions like “BrowserContext,” which allows you to run multiple browser sessions, and “newPage” which interacts with a page.
  7. Handling Alerts and Dropdowns in Playwright : Playwright interact with different types of alerts and pop-ups, such as simple, confirmation, and prompt, and different types of dropdowns, such as single selector and multi-selector get your hands-on with handling alerts and dropdown in Playright testing.
  8. Playwright vs Puppeteer: Get to know about the difference between two testing frameworks and how they are different than one another, which browsers they support, and what features they provide.
  9. Run Playwright Tests on LambdaTest: Playwright testing with LambdaTest leverages test performance to the utmost. You can run multiple Playwright tests in Parallel with the LammbdaTest test cloud. Get a step-by-step guide to run your Playwright test on the LambdaTest platform.
  10. Playwright Python Tutorial: Playwright automation framework support all major languages such as Python, JavaScript, TypeScript, .NET and etc. However, there are various advantages to Python end-to-end testing with Playwright because of its versatile utility. Get the hang of Playwright python testing with this chapter.
  11. Playwright End To End Testing Tutorial: Get your hands on with Playwright end-to-end testing and learn to use some exciting features such as TraceViewer, Debugging, Networking, Component testing, Visual testing, and many more.
  12. Playwright Video Tutorial: Watch the video tutorials on Playwright testing from experts and get a consecutive in-depth explanation of Playwright automation testing.

Run Playwright Python automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful