How to use start_example method in hypothesis

Best Python code snippet using hypothesis

datasets.py

Source:datasets.py Github

copy

Full Screen

1#TODO : use a Timeseries Generator of Keras : https://keras.io/preprocessing/sequence/2__author__ = 'AlexBioY'3import numpy as np4import importlib5import os6from collections import namedtuple7from src.data.data_sources import get_combined_cleaned_onecoin_df8from src.data.settings import DATASET_TRANSFORM9import logging10logger = logging.getLogger(__name__)11logger.setLevel(logging.DEBUG)12# TODO: do it smarter (use keras function ot scipy) or use matrix multiplication13def _normalize_dataset(X):14 for example in range(X.shape[0]):15 X[example, :, 0] = (X[example, :, 0] - X[example, -1, 0]) / (np.max(X[example, :, 0]) - np.min(X[example, :, 0]))16 X[example, :, 1] = (X[example, :, 1] - X[example, -1, 1]) / (np.max(X[example, :, 1]) - np.min(X[example, :, 1]))17 X[example, :, 2] = (X[example, :, 2] - X[example, -1, 2]) / (np.max(X[example, :, 2]) - np.min(X[example, :, 2]))18 X[example, :, 3] = (X[example, :, 3] - X[example, -1, 3]) / (np.max(X[example, :, 3]) - np.min(X[example, :, 3]))19 return X20def df_to_X_onecoin(data_df, ds_transform):21 '''22 Transform an input ts into array [ examples, time points back fatures (LSTM modules), feature dimension ],23 Labels computing can be set to a different function24 '''25 win_size = DATASET_TRANSFORM[ds_transform].win_size26 stride = DATASET_TRANSFORM[ds_transform].stride27 label_func = DATASET_TRANSFORM[ds_transform].label_func28 num_classes = DATASET_TRANSFORM[ds_transform].num_classes29 future = DATASET_TRANSFORM[ds_transform].future30 res_period = DATASET_TRANSFORM[ds_transform].res_period31 n = len(data_df)32 if (n - win_size) < 0:33 logger.error(" DATASET is smaller then win_size! we need more data")34 num_examples = int((n - win_size) / stride) # how many times we can srtide via the timeseries (number of possible examples)35 # (4968, 96, 1)36 predictors = data_df.shape[1] # make prediction based on multivatiate ts, price and volume37 label_dummy_classes = num_classes38 data_set = np.zeros([num_examples, win_size, predictors])39 labels = np.zeros([num_examples, label_dummy_classes])40 # form training examples by shifting triugh the dataset41 logger.info(" One coin: Converting dataframe to dataset array, " + str(num_examples) + " examples")42 for start_example in range(0, num_examples):43 end_example = start_example + win_size44 # build X array45 data_set[start_example, :, 0] = data_df[start_example:end_example]['price'].values.reshape([-1, 1])[:, 0]46 data_set[start_example, :, 1] = data_df[start_example:end_example]['volume'].values.reshape([-1, 1])[:, 0]47 data_set[start_example, :, 2] = data_df[start_example:end_example]['price_var'].values.reshape([-1, 1])[:, 0]48 data_set[start_example, :, 3] = data_df[start_example:end_example]['volume_var'].values.reshape([-1, 1])[:, 0]49 data_set[start_example, :, 4] = data_df[start_example:end_example]['price_max'].values.reshape([-1, 1])[:, 0]50 data_set[start_example, :, 5] = data_df[start_example:end_example]['price_min'].values.reshape([-1, 1])[:, 0]51 #TODO: add blockchain info here52 # here we set the future values either to following proce values of price-max values (in case we predict max53 if label_func == 'label_3class_max_hit':54 future_values = data_df[end_example-1 : end_example + future]['price_max']55 threshold_1 = DATASET_TRANSFORM[ds_transform].threshold_156 threshold_2 = DATASET_TRANSFORM[ds_transform].threshold_257 elif label_func == 'label_3class_min_hit':58 future_values = data_df[end_example-1 : end_example + future]['price_min']59 threshold_1 = DATASET_TRANSFORM[ds_transform].threshold_160 threshold_2 = DATASET_TRANSFORM[ds_transform].threshold_261 elif label_func == 'label_2class_max_hit':62 future_values = data_df[end_example - 1: end_example + future]['price_min']63 threshold_1 = DATASET_TRANSFORM[ds_transform].threshold_164 threshold_2 = None65 else:66 future_values = data_df[end_example-1 : end_example + future]['price'] # we also need the last price from example67 threshold_1 = DATASET_TRANSFORM[ds_transform].return_target68 threshold_2 = None69 #build Y array (labels)70 module = importlib.import_module('src.data.datasets')71 func_obj = getattr(module, label_func)72 labels[start_example, :] = func_obj(future_values, threshold_1, threshold_2)73 # assert the array dimencions74 if start_example % 3000 == 0:75 logger.info(" ... df->array examples completed: " + str(start_example))76 logger.info(" One coin: finished.")77 return data_set, labels78def label_3class_return_target(future_prices, threshold_1, threshold_2):79 '''80 calculate a dummy class number out of 90 future prices as 0 - same / 1 - up / 2 - down81 '''82 return_target = threshold_183 # 0 -same, 1-up, 2 -down84 label_dummy_classes=385 open_price = future_prices[0]86 close_price = future_prices[-1]87 price_return = close_price - open_price88 percentage_return = 1 - (open_price - price_return) / open_price89 label = 0 if (abs(percentage_return) < return_target) else np.sign(percentage_return)90 dummy_labels = np.zeros([1,label_dummy_classes]).astype(int)91 # 0 - same / 1 - up / 2 - down92 if label == 0:93 dummy_labels[0, 0] = 194 elif label == 1:95 dummy_labels[0, 1] = 196 elif label == -1:97 dummy_labels[0, 2] = 198 return dummy_labels99def label_2class_return_target(future_prices, threshold_1, threshold_2):100 # NOTE: return tagret is ignored here101 return_target = threshold_1102 # 1 - up, - 1 - down103 label_dummy_classes = 2104 open_price = future_prices[0]105 close_price = future_prices[-1]106 price_return = close_price - open_price107 percentage_return = 1 - (open_price - price_return) / open_price108 label = np.sign(percentage_return)109 dummy_labels = np.zeros([1, label_dummy_classes]).astype(int)110 # 0 - same / 1 - up / 2 - down111 if label == 1:112 dummy_labels[0, 0] = 1113 elif label == -1:114 dummy_labels[0, 1] = 1115 return dummy_labels116def label_3class_max_hit(future_prices, threshold_1, threshold_2):117 # 0 -same, 1-threshold_1, 2 -threshold_2118 label_dummy_classes=3119 open_price = future_prices[0]120 close_price = future_prices[-1]121 # min_price = np.min(future_prices)122 # percent_min = 1 - (open_price - min_price) / open_price123 max_price = np.max(future_prices)124 percent_max = (max_price-open_price) / open_price125 if percent_max > threshold_2:126 label = 2127 elif percent_max > threshold_1:128 label = 1129 else:130 label = 0131 dummy_labels = np.zeros([1,label_dummy_classes]).astype(int)132 # 0 - same / 1 - up / 2 - down133 if label == 0:134 dummy_labels[0, 0] = int(1)135 elif label == 1:136 dummy_labels[0, 1] = int(1)137 elif label == 2:138 dummy_labels[0, 2] = int(1)139 return dummy_labels140def label_2class_max_hit(future_prices, threshold_1, threshold_2):141 # create a label set as follows:142 # label 1 (maxhit) if anytime within future_prices the price max value exceeded threshold_1 value143 # (i.e there was an opportunity to trade144 # labels: 0 -same, 1-threshold_1145 label_dummy_classes=2146 open_price = future_prices[0]147 close_price = future_prices[-1]148 # min_price = np.min(future_prices)149 # percent_min = 1 - (open_price - min_price) / open_price150 max_price = np.max(future_prices)151 percent_max = (max_price-open_price) / open_price152 if percent_max > threshold_1:153 label = 1154 else:155 label = 0156 dummy_labels = np.zeros([1,label_dummy_classes]).astype(int)157 # 0 - same / 1 - up / 2 - down158 if label == 0:159 dummy_labels[0, 0] = int(1)160 elif label == 1:161 dummy_labels[0, 1] = int(1)162 return dummy_labels163def combine_all_coins(COINS_LIST, db_name, ds_transform):164 '''165 Build the a full dataset X, Y by fusind all datasets of each coin from COIN_LIST166 - for each pair get ts of price and volume, calculate variance and build a df [time, price, vol, price_var, vol_var]167 - split this ts into pieces of win_size ad calculate a label for each168 - pile them up int one dataset169 '''170 res_period = DATASET_TRANSFORM[ds_transform].res_period171 X = [] # (147319, 200, 4) - 4 is price, volume, price_var, volume_var172 Y = [] # (147319, 3) - 3 is number of classes173 logger.info(" > Form data set X array from a coin list:" + str(COINS_LIST))174 for transaction_coin, counter_coin in COINS_LIST:175 # retrieve a time series df from DB as [time,price,volume, price_var, volume_var]176 data_df = get_combined_cleaned_onecoin_df(db_name, transaction_coin, counter_coin, res_period)177 # TODO: cut all old data from period of high volatility178 data_df = data_df.loc['2018-03-01':]179 # convert this df into a array of shape of (147319, 200, 4) = (examples, time_back, features)180 # all parameters of data transformation are in data.settings181 X_train_one, Y_train_one = df_to_X_onecoin(data_df, ds_transform)182 del data_df183 # pile up into one array184 if X == []:185 X = X_train_one186 Y = Y_train_one187 else:188 X = np.concatenate((X, X_train_one), axis=0)189 Y = np.concatenate((Y, Y_train_one), axis=0)190 # delete all examples with NaN inside191 idx2delete = []192 for n in range(X.shape[0] - 1):193 if np.isnan(X[n, :, :]).any():194 idx2delete.append(n)195 X = np.delete(X, (idx2delete), axis=0)196 Y = np.delete(Y, (idx2delete), axis=0)197 if DATASET_TRANSFORM[ds_transform].num_classes == 3:198 logger.info("> X,Y Datasets have been built: same= " + str(sum(Y[:,0])) + ' | UP= ' + str(sum(Y[:,1])) + ' | DOWN= ' + str(sum(Y[:,2])))199 elif DATASET_TRANSFORM[ds_transform].num_classes == 2:200 logger.info("> X,Y Datasets have been built: UP= " + str(sum(Y[:, 0])) + ' | DOWN= ' + str( sum(Y[:, 1])) )201 else:202 logger.error("UNKNOWN NUMBER OF CLASSES!")203 # normalize204 # TODO: can I do it in-place?205 X = _normalize_dataset(X)206 # sanity check207 logger.info(" ... Sanity Checking for NaN in dataset: check for any nan")208 for n in range(X.shape[0]):209 if np.isnan(X[n, :, :]).any():210 logger.info(n)211 logger.info("=======> final X dataset shape: " + str(X.shape))212 logger.info("=======> final Y dataset shape: " + str(Y.shape))213 # # TODO: check if folder is exists214 # np.save("data/processed/"+fname_x, X)215 # np.save("data/processed/"+fname_y, Y)...

Full Screen

Full Screen

run_glrp_grid_mnist.py

Source:run_glrp_grid_mnist.py Github

copy

Full Screen

1#!python2"""3Running the GLRP on GCNN model trained on MNIST data. Digits are graph signals on 8-nearest neighbor graph.4The model can be retrained uncommenting a part of the code.5"""6import numpy as np7from matplotlib import pyplot as plt8from sklearn.metrics import accuracy_score, f1_score9from tensorflow.examples.tutorials.mnist import input_data10from components import glrp_scipy, visualize_mnist11from lib import models, graph, coarsening12import time13COARSENING_LEVELS = 4 # to satisfy pooling of size 4 two times we need 4 level14DIR_DATA = "./data/mnist"15METRIC = 'euclidean'16NUMBER_EDGES = 817M = 28 # size of the digit's picture side18FEATURE_NUM = M * M19EPS = 1e-7 # for adjacency matrix20if __name__ == "__main__":21 # !!!22 # creating the adjacency matrix with all the non-zero weights equal to 123 z = graph.grid(M)24 dist, idx = graph.distance_sklearn_metrics(z, k=NUMBER_EDGES, metric=METRIC)25 A = graph.adjacency(dist, idx)26 A[A > EPS] = 127 graphs, perm = coarsening.coarsen(A, levels=COARSENING_LEVELS, self_connections=False)28 L = [graph.laplacian(A, normalized=True) for A in graphs]29 mnist = input_data.read_data_sets(DIR_DATA, one_hot=False)30 train_data = mnist.train.images.astype(np.float32)31 val_data = mnist.validation.images.astype(np.float32)32 test_data = mnist.test.images.astype(np.float32)33 train_labels = mnist.train.labels34 val_labels = mnist.validation.labels35 test_labels = mnist.test.labels36 train_data = coarsening.perm_data(train_data, perm)37 val_data = coarsening.perm_data(val_data, perm)38 test_data = coarsening.perm_data(test_data, perm)39 common = {}40 common['dir_name'] = 'mnist_grid_ones/'41 common['num_epochs'] = 3042 common['batch_size'] = 10043 common['decay_steps'] = mnist.train.num_examples / common['batch_size']44 common['eval_frequency'] = 30 * common['num_epochs']45 common['brelu'] = 'b1relu'46 common['pool'] = 'mpool1'47 C = max(mnist.train.labels) + 1 # number of classes48 common['regularization'] = 5e-449 common['dropout'] = 0.550 common['learning_rate'] = 0.0351 common['decay_rate'] = 0.9552 common['momentum'] = 0.953 common['F'] = [32, 64]54 common['K'] = [25, 25]55 common['p'] = [4, 4]56 common['M'] = [512, C]57 name = 'cgconv_cgconv_softmax_momentum'58 params = common.copy()59 params['dir_name'] += name60 params['filter'] = 'chebyshev5'61 model = models.cgcnn(L, **params)62 # !!!63 # To train again uncomment this part64 # start = time.time()65 # accuracy, loss, t_step, trained_losses = model.fit(train_data, train_labels, val_data, val_labels)66 # end = time.time()67 probas_ = model.get_probabilities(test_data)68 f1 = 100 * f1_score(test_labels, np.argmax(probas_, axis=1), average='weighted')69 acc = 100 * accuracy_score(test_labels, np.argmax(probas_, axis=1))70 print("\n\tTest F1 weighted: ", f1)71 print("\tTest Accuraccy:", acc, "\n")72 data_to_test = val_data[0:common["batch_size"], ]73 probas_ = model.get_probabilities(data_to_test)74 labels_by_network = np.argmax(probas_, axis=1)75 labels_data_to_test = val_labels[0:common["batch_size"], ]76 I = np.eye(10)77 labels_hot_encoded = I[labels_by_network]78 glrp = glrp_scipy.GraphLayerwiseRelevancePropagation(model, data_to_test, labels_hot_encoded)79 rel = glrp.get_relevances()[-1] # getting the relevances corresponding to the input layer80 data_to_test = coarsening.perm_data_back(data_to_test, perm, FEATURE_NUM)81 rel = coarsening.perm_data_back(rel, perm, FEATURE_NUM)82 results_dir = './figures/'83 start_example = 984 end_example = 1785 visualize_mnist.plot_numbers(data_to_test[start_example:end_example, ], rel[start_example:end_example, ],86 labels_data_to_test[start_example:end_example, ],87 labels_by_network[start_example:end_example, ], results_dir)88 # start_example = 989 # end_example = 1790 for i in range(start_example, end_example): # 9, 1791 heatmap = visualize_mnist.get_heatmap(rel[i,])92 fig = plt.figure()93 ax = fig.add_subplot(111)94 ax.axis('off')95 ax.imshow(heatmap, cmap='Reds', interpolation='bilinear')...

Full Screen

Full Screen

main.py

Source:main.py Github

copy

Full Screen

...13 os.system('termux-info | grep Linux>f.txt || echo not>f.txt')14 versio = str(open('f.txt').read())15 if 'Linux' in versio:16 startup_termux()17 start_example()18 else:19 startup_linux()20 lol = start_example() 21 sleep(5)22 killprocess(lol)23def start_example():24 starting_command = subprocess.run(["python3", "lo.py"], capture_output=True)25 print(starting_command)26 return(starting_command)27def killprocess(starting):28 starting.kill29 ...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run hypothesis automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful