How to use test_start method in avocado

Best Python code snippet using avocado_python

importer.py

Source:importer.py Github

copy

Full Screen

1import pandas as pd2import numpy as np3import statsmodels.api as sm4import statsmodels5import sklearn6import requests7import socket8from sklearn import preprocessing9import scipy10from scipy.stats import pearsonr11from math import sqrt12from pandas.plotting import register_matplotlib_converters13import os14dirname = os.path.dirname(__file__)15scaler = sklearn.preprocessing.MinMaxScaler()16# Define the path to CSV datasets here:17PATH_TO_DATASET_H = ""18PATH_TO_DATASET_M = ""19PATH_TO_DATASET_L = ""20def create_features(df, resolution):21 22 # Month sin and cos features:23 df['mnth_sin'] = np.sin((df.index.month-1)*(2.*np.pi/12))24 df['mnth_cos'] = np.cos((df.index.month-1)*(2.*np.pi/12))25 26 # Workday Feature -> True if not weekand and not swedish public holiday27 df["is_workday"] = (df.index.weekday < 5) & (np.logical_not(np.isin(df.index.date, sweden_holidays)))*128 29 months = [i for i in range(1, 13)]30 weekdays = [i for i in range(0, 7)]31 hours = [i for i in range(0, 24)]32 for month in months:33 df["month_" + str(month)] = (df.index.month == month)*1 # One hot encoding of current month34 for weekday in weekdays:35 df["weekday_" + str(weekday)] = (df.index.weekday == weekday)*1 # One hot encoding of current weekday36 if(not resolution == "1d"):37 # The hourly one hot encoded feature is only used for the 15min and 1h resolution, not for daily data:38 for hour in hours:39 df["hour_" + str(hour)] = (df.index.hour == hour)*140 df['hr_sin'] = np.sin((df.index.hour + (df.index.minute/60)) * (2.*np.pi/24))41 df['hr_cos'] = np.cos((df.index.hour + (df.index.minute/60)) * (2.*np.pi/24))42 return df43def load_h(resolution = "1h", full_dataset = False, scaler_only=False):44 45 # We import the values of the datasets and define the index46 df = pd.read_csv(os.path.join(dirname, PATH_TO_DATASET_H), delimiter=";", index_col="Time")47 df.index = pd.to_datetime(df.index, format="%Y-%m-%d %H:%M")48 df.index.freq = df.index.inferred_freq49 50 # Renaming of to english Feature Names:51 df["Windspeed"] = df["Vindhastighet"]52 df["Temperature"] = df["Lufttemperatur"]53 54 if(full_dataset):55 df = df["2017-01-24":"2019-03-07"]56 else:57 df = df["2017-01-24":"2018-12-07"]58 59 if(resolution=="15min"):60 # Down sampling to quarter-hourly resolution61 df = df.resample("15min").interpolate()62 if(resolution=="1d"):63 # Resampling to daily resolution64 df = df.resample("1d").mean()65 66 # Now we readd our newly generated features67 df = create_features(df, resolution)68 69 if(scaler_only==False):70 # Returns the scaled dataset71 df[["Load", "Windspeed","Temperature"]] = scaler.fit_transform(df[["Load", "Windspeed","Temperature"]].to_numpy())72 return df73 else:74 # Returns the scaler only -> used in economic part for inverse scaling back to original values75 return scaler.fit(df[["Load"]])76 77 78 79def load_m(resolution = "1h", full_dataset = False, scaler_only=False):80 df = pd.read_csv(PATH_TO_DATASET_M,index_col="Time")81 df.index = pd.to_datetime(df.index, format="%Y-%m-%d %H:%M")82 df.index.freq = df.index.inferred_freq83 84 if(full_dataset):85 df = df["2017-01-24":"2019-03-07"]86 else:87 df = df["2017-01-24":"2018-12-07"]88 89 if(resolution=="15min"):90 df = df.resample("15min").interpolate()91 if(resolution=="1d"):92 df = df.resample("1d").mean()93 94 # Now we readd the features95 df = create_features(df, resolution)96 97 if(scaler_only==False):98 df[["Load", "Windspeed","Temperature"]] = scaler.fit_transform(df[["Load", "Windspeed","Temperature"]].to_numpy())99 return df100 else:101 return scaler.fit(df[["Load"]])102 103 104 105 106 107def load_customer_l(resolution = "1h", full_dataset = False):108 df = pd.read_csv(os.path.join(dirname, PATH_TO_DATASET_L), usecols=["Time", "e507b", "Windspeed", "Temperature"], index_col="Time")109 df.index = pd.to_datetime(df.index)110 df.index.freq = df.index.inferred_freq111 if(full_dataset):112 df = df["2017-01-24":"2019-03-07"]113 else:114 df = df["2017-01-24":"2018-12-07"]115 116 if(resolution=="15min"):117 df = df.resample("15min").interpolate()118 if(resolution=="1d"):119 df = df.resample("1d").mean()120 121 # Now we readd the features122 df = create_features(df, resolution)123 124 df["Load"] = scaler.fit_transform(df[["Load"]].to_numpy())125 df["Windspeed"] = scaler.fit_transform(df[["Windspeed"]].to_numpy())126 df["Temperature"] = scaler.fit_transform(df[["Temperature"]].to_numpy())127 return df128 129 130 131sweden_holidays = ["2017-01-01", "2017-01-06", "2017-04-14", "2017-04-17", "2017-05-01", "2017-05-25", "2017-06-04", "2017-06-06", "2017-06-23", "2017-06-24", "2017-11-04", "2017-12-24", "2017-12-25", "2017-12-26", "2017-12-31", "2018-01-01", "2018-01-06", "2018-03-30", "2018-04-01", "2018-04-02", "2018-05-01", "2018-05-10", "2018-05-20", "2018-06-06", "2018-06-22", "2018-06-23", "2018-11-03", "2018-12-24", "2018-12-25", "2018-12-26", "2018-12-31", "2019-01-01", "2019-01-06"]132 133 134 135def get_datasets_with_config():136 return get_datasets_for_final_evaluation(full_dataset=False)137 138def get_datasets_for_final_evaluation(full_dataset=True):139 return [140 { 141 "name": "H@15min",142 "set": load_h(resolution="15min", full_dataset=True), 143 "horizon":4, 144 "seasonal": 96,145 "hw": {146 "smoothing_level": 1,147 "smoothing_trend": 0.99355,148 "smoothing_seasonal": 0.07195,149 "sample_size": 800,150 "trend": "add",151 "seasonal": "add",152 "seasonal_periods": 96 153 },154 "svr": {155 "C": 0.125,156 "epsilon": 0.03125157 },158 "rf": {159 "n_estimators": 1100,160 "max_features": 0.5,161 "min_samples_leaf": 50162 },163 "knn": {164 "n_neighbors": 5,165 "weights": "distance"166 },167 "arima": {168 "order": (5,0,2),169 "sample_size": 650170 },171 "arimax": {172 "features": ["Temperature", "Windspeed", "hr_cos", "hr_sin"] 173 },174 "ann": {175 "layers": 1,176 "learning_rate": 0.001,177 "neurons_per_layer": 16,178 "optimizer": "adam",179 "dropout": 0.1180 },181 "rnn": {182 "layers": 1,183 "learning_rate": 0.001,184 "neurons_per_layer": 16,185 "optimizer": "adam",186 "dropout": 0.1,187 "cell_type": "gru"188 }189 },190 { 191 "name": "M@15min",192 "set": load_m(resolution="15min", full_dataset=True),193 "horizon":4,194 "seasonal": 96,195 "hw": {196 "smoothing_level": 0.35716,197 "smoothing_trend": 0.62701,198 "smoothing_seasonal": 0.57316,199 "sample_size": 500,200 "trend": "add",201 "seasonal": "add",202 "seasonal_periods": 96 203 },204 "svr": {205 "C": 0.3125,206 "epsilon": 0.125207 },208 "rf": {209 "n_estimators": 800,210 "max_features": 0.33,211 "min_samples_leaf": 50212 },213 "knn": {214 "n_neighbors": 50,215 "weights": "uniform"216 },217 "arima": {218 "order": (5,0,3),219 "sample_size": 500220 },221 "arimax": {222 "features": ["Temperature", "Windspeed", "hr_cos", "mnth_cos", "mnth_sin", "is_workday"] 223 },224 "ann": {225 "layers": 2,226 "learning_rate": 0.001,227 "neurons_per_layer": 16,228 "optimizer": "adam",229 "dropout": 0.1230 },231 "rnn": {232 "layers": 1,233 "learning_rate": 0.001,234 "neurons_per_layer": 16,235 "optimizer": "adam",236 "dropout": 0.1,237 "cell_type": "gru"238 }239 },240 { 241 "set": load_l(resolution="15min", full_dataset=True),242 "name": "L@15min",243 "horizon":4,244 "seasonal": 96,245 "hw": {246 "smoothing_level": 0.16035,247 "smoothing_trend": 0.19047,248 "smoothing_seasonal": 0.58363,249 "sample_size": 500,250 "trend": "add",251 "seasonal": "add",252 "seasonal_periods": 96 253 },254 "svr": {255 "C": 0.125,256 "epsilon": 0.125257 },258 "rf": {259 "n_estimators": 1400,260 "max_features": 0.2,261 "min_samples_leaf": 50262 },263 "knn": {264 "n_neighbors": 150,265 "weights": "uniform"266 },267 "arima": {268 "order": (1,0,5),269 "sample_size": 650270 },271 "arimax": {272 "features": ["Temperature", "Windspeed", "hr_cos"] 273 },274 "ann": {275 "layers": 1,276 "learning_rate": 0.001,277 "neurons_per_layer": 16,278 "optimizer": "adam",279 "dropout": 0280 },281 "rnn": {282 "layers": 1,283 "learning_rate": 0.001,284 "neurons_per_layer": 16,285 "optimizer": "adam",286 "dropout": 0,287 "cell_type": "lstm"288 }289 },290 { 291 "name": "H@1h",292 "set": load_h(full_dataset=full_dataset),293 "horizon":24, 294 "seasonal": 24,295 "svr": {296 "C": 0.5,297 "epsilon": 0.03125298 },299 "rf": {300 "n_estimators": 800,301 "max_features": 0.5,302 "min_samples_leaf": 5303 },304 "knn": {305 "n_neighbors": 8,306 "weights": "uniform"307 },308 "arima": {309 "order": (4,0,5),310 "sample_size": 800311 },312 "arimax": {313 "features": ["Temperature", "Windspeed", "hr_cos", "mnth_cos", "mnth_sin"] 314 },315 "sarima": {316 "order": (1,0,0),317 "seasonal_order": (2,0,0,24),318 "sample_size": 800319 },320 "hw": {321 "smoothing_level": 0.00836,322 "smoothing_trend": 0.0145,323 "smoothing_seasonal": 0.01246,324 "sample_size": 850,325 "trend": "add",326 "seasonal": "add",327 "seasonal_periods": 24 328 },329 "ann": {330 "layers": 2,331 "learning_rate": 0.001,332 "neurons_per_layer": 16,333 "optimizer": "adam",334 "dropout": 0.1335 },336 "rnn": {337 "layers": 2,338 "learning_rate": 0.001,339 "neurons_per_layer": 16,340 "optimizer": "adam",341 "dropout": 0.1,342 "cell_type": "gru"343 }344 },345 { 346 "name": "M@1h",347 "set":load_m(full_dataset=full_dataset),348 "horizon":24,349 "seasonal": 24,350 "svr": {351 "C": 0.3125,352 "epsilon": 0.0625353 },354 "rf": {355 "n_estimators": 200,356 "max_features": 0.33,357 "min_samples_leaf": 25358 },359 "knn": {360 "n_neighbors": 15,361 "weights": "uniform"362 },363 "arima": {364 "order":(5,0,5),365 "sample_size": 900366 },367 "arimax": {368 "features": ["Temperature", "Windspeed", "hr_cos"] 369 },370 "sarima": {371 "order": (2,1,3),372 "seasonal_order": (2,0,1,24),373 "sample_size": 900374 },375 "hw": {376 "smoothing_level": 0.894132,377 "smoothing_trend": 0.31404,378 "smoothing_seasonal": 0.31394,379 "sample_size": 600,380 "trend": "add",381 "seasonal": "add",382 "seasonal_periods": 24 383 },384 "ann": {385 "layers": 2,386 "learning_rate": 0.001,387 "neurons_per_layer": 128,388 "optimizer": "adam",389 "dropout": 0.1390 },391 "rnn": {392 "layers": 1,393 "learning_rate": 0.001,394 "neurons_per_layer": 16,395 "optimizer": "adam",396 "dropout": 0.1,397 "cell_type": "gru"398 }399 },400 { 401 "name": "L@1h",402 "set": load_l(full_dataset=full_dataset),403 "horizon":24,404 "seasonal": 24,405 "svr": {406 "C": 0.5,407 "epsilon": 0.125408 },409 "rf": {410 "n_estimators": 500,411 "max_features": 0.5,412 "min_samples_leaf": 50413 },414 "knn": {415 "n_neighbors": 150,416 "weights": "uniform"417 },418 "arima": {419 "order": (2,1,1),420 "sample_size": 500421 },422 "arimax": {423 "features": ["Temperature", "Windspeed", "hr_cos", ] 424 },425 "sarima": {426 "order": (3,1,0),427 "seasonal_order": (2,0,0,24),428 "sample_size": 500429 },430 "hw": {431 "smoothing_level": 0.00154,432 "smoothing_trend": 0.17787,433 "smoothing_seasonal": 0.17986,434 "sample_size": 200,435 "trend": "add",436 "seasonal": "add",437 "seasonal_periods": 7438 },439 "ann": {440 "layers": 1,441 "learning_rate": 0.001,442 "neurons_per_layer": 32,443 "optimizer": "adam",444 "dropout": 0445 },446 "rnn": {447 "layers": 1,448 "learning_rate": 0.001,449 "neurons_per_layer": 32,450 "optimizer": "adam",451 "dropout": 0,452 "cell_type": "gru"453 }454 },455 {456 "name": "H@1d",457 "set": load_h(resolution="1d", full_dataset=full_dataset), 458 "horizon":30,459 "seasonal": 7,460 "svr": {461 "C": 2,462 "epsilon": 0.0078125463 },464 "rf": {465 "n_estimators": 1100,466 "max_features": 0.5,467 "min_samples_leaf": 5468 },469 "knn": {470 "n_neighbors": 70,471 "weights": "distance"472 },473 "arima": {474 "order": (5,1,3),475 "sample_size": 250476 },477 "arimax": {478 "features": ["Temperature", "Windspeed", "is_workday", ] 479 },480 "sarima": {481 "order": (0,1,1),482 "seasonal_order": (1,0,1,7),483 "sample_size": 450484 },485 "hw": {486 "smoothing_level": 0.03257,487 "smoothing_trend": 0.00125,488 "smoothing_seasonal": 0.00937,489 "sample_size": 500,490 "trend": "add",491 "seasonal": "add",492 "seasonal_periods": 7493 },494 "ann": {495 "layers": 2,496 "learning_rate": 0.001,497 "neurons_per_layer": 128,498 "optimizer": "adam",499 "dropout": 0.1500 },501 "rnn": {502 "layers": 2,503 "learning_rate": 0.001,504 "neurons_per_layer": 128,505 "optimizer": "adam",506 "dropout": 0,507 "cell_type": "gru"508 }509 },510 511 {512 "name": "M@1d", 513 "set": load_m(resolution="1d", full_dataset=full_dataset), 514 "horizon":30,515 "seasonal": 7,516 "svr": {517 "C": 2,518 "epsilon": 0.125519 },520 "rf": {521 "n_estimators": 1100,522 "max_features": 0.33,523 "min_samples_leaf": 5524 },525 "knn": {526 "n_neighbors": 70,527 "weights": "distance"528 },529 "arima": {530 "order": (4,1,2),531 "sample_size": 450532 },533 "arimax": {534 "features": ["Temperature", "Windspeed", "mnth_sin", "is_workday" ] 535 },536 "sarima": {537 "order": (5,0,2),538 "seasonal_order": (1,0,0,7),539 "sample_size": 450540 },541 "hw": {542 "smoothing_level": 0.00009,543 "smoothing_trend": 0.35317,544 "smoothing_seasonal": 0.00027,545 "sample_size": 450,546 "trend": "add",547 "seasonal": "add",548 "seasonal_periods": 7549 },550 "ann": {551 "layers": 1,552 "learning_rate": 0.001,553 "neurons_per_layer": 128,554 "optimizer": "adam",555 "dropout": 0556 },557 "rnn": {558 "layers": 2,559 "learning_rate": 0.001,560 "neurons_per_layer": 16,561 "optimizer": "adam",562 "dropout": 0.1,563 "cell_type": "gru"564 }565 },566 567 {568 "name": "L@1d",569 "set": load_l(resolution="1d", full_dataset=full_dataset), 570 "horizon":30,571 "seasonal": 7,572 "svr": {573 "C": 0.125,574 "epsilon": 0.125575 },576 "rf": {577 "n_estimators": 1100,578 "max_features": 0.5,579 "min_samples_leaf": 50580 },581 "knn": {582 "n_neighbors": 120,583 "weights": "uniform"584 },585 "arima": {586 "order": (5,0,5),587 "sample_size": 450588 },589 "arimax": {590 "features": ["Temperature", "Windspeed", "mnth_cos", "is_workday" ] 591 },592 "sarima": {593 "order": (1,1,2),594 "seasonal_order": (2,0,2,7),595 "sample_size": 450596 },597 "hw": {598 "smoothing_level": 0.048975,599 "smoothing_trend": 0.162792,600 "smoothing_seasonal": 0.16720,601 "sample_size": 500,602 "trend": "add",603 "seasonal": "add",604 "seasonal_periods": 7605 },606 "ann": {607 "layers": 2,608 "learning_rate": 0.001,609 "neurons_per_layer": 128,610 "optimizer": "rms",611 "dropout": 0612 },613 "rnn": {614 "layers": 1,615 "learning_rate": 0.001,616 "neurons_per_layer": 64,617 "optimizer": "rms",618 "dropout": 0.1,619 "cell_type": "lstm"620 }621 }622 ]623def get_test_windows(resolution="1h"):624 if(resolution=="15min"):625 return [{'id': 0, 'train_end': '2018-12-07 00:00:00', 'test_start': '2018-12-07 00:15:00'}, {'id': 1, 'train_end': '2018-12-07 00:45:00', 'test_start': '2018-12-07 01:00:00'}, {'id': 2, 'train_end': '2018-12-07 01:30:00', 'test_start': '2018-12-07 01:45:00'}, {'id': 3, 'train_end': '2018-12-07 02:15:00', 'test_start': '2018-12-07 02:30:00'}, {'id': 4, 'train_end': '2018-12-07 03:15:00', 'test_start': '2018-12-07 03:30:00'}, {'id': 5, 'train_end': '2018-12-07 04:00:00', 'test_start': '2018-12-07 04:15:00'}, {'id': 6, 'train_end': '2018-12-07 04:45:00', 'test_start': '2018-12-07 05:00:00'}, {'id': 7, 'train_end': '2018-12-07 05:45:00', 'test_start': '2018-12-07 06:00:00'}, {'id': 8, 'train_end': '2018-12-07 06:30:00', 'test_start': '2018-12-07 06:45:00'}, {'id': 9, 'train_end': '2018-12-07 07:15:00', 'test_start': '2018-12-07 07:30:00'}, {'id': 10, 'train_end': '2018-12-07 08:15:00', 'test_start': '2018-12-07 08:30:00'}, {'id': 11, 'train_end': '2018-12-07 09:00:00', 'test_start': '2018-12-07 09:15:00'}, {'id': 12, 'train_end': '2018-12-07 09:45:00', 'test_start': '2018-12-07 10:00:00'}, {'id': 13, 'train_end': '2018-12-07 10:45:00', 'test_start': '2018-12-07 11:00:00'}, {'id': 14, 'train_end': '2018-12-07 11:30:00', 'test_start': '2018-12-07 11:45:00'}, {'id': 15, 'train_end': '2018-12-07 12:15:00', 'test_start': '2018-12-07 12:30:00'}, {'id': 16, 'train_end': '2018-12-07 13:15:00', 'test_start': '2018-12-07 13:30:00'}, {'id': 17, 'train_end': '2018-12-07 14:00:00', 'test_start': '2018-12-07 14:15:00'}, {'id': 18, 'train_end': '2018-12-07 14:45:00', 'test_start': '2018-12-07 15:00:00'}, {'id': 19, 'train_end': '2018-12-07 15:45:00', 'test_start': '2018-12-07 16:00:00'}]626 if(resolution=="1h"):627 return [{'id': 0, 'train_end': '2018-12-07 00:00:00', 'test_start': '2018-12-07 01:00:00'}, {'id': 1, 'train_end': '2018-12-11 17:00:00', 'test_start': '2018-12-11 18:00:00'}, {'id': 2, 'train_end': '2018-12-16 10:00:00', 'test_start': '2018-12-16 11:00:00'}, {'id': 3, 'train_end': '2018-12-21 04:00:00', 'test_start': '2018-12-21 05:00:00'}, {'id': 4, 'train_end': '2018-12-25 21:00:00', 'test_start': '2018-12-25 22:00:00'}, {'id': 5, 'train_end': '2018-12-30 14:00:00', 'test_start': '2018-12-30 15:00:00'}, {'id': 6, 'train_end': '2019-01-04 08:00:00', 'test_start': '2019-01-04 09:00:00'}, {'id': 7, 'train_end': '2019-01-09 01:00:00', 'test_start': '2019-01-09 02:00:00'}, {'id': 8, 'train_end': '2019-01-13 18:00:00', 'test_start': '2019-01-13 19:00:00'}, {'id': 9, 'train_end': '2019-01-18 12:00:00', 'test_start': '2019-01-18 13:00:00'}, {'id': 10, 'train_end': '2019-01-23 05:00:00', 'test_start': '2019-01-23 06:00:00'}, {'id': 11, 'train_end': '2019-01-27 23:00:00', 'test_start': '2019-01-28 00:00:00'}, {'id': 12, 'train_end': '2019-02-01 16:00:00', 'test_start': '2019-02-01 17:00:00'}, {'id': 13, 'train_end': '2019-02-06 09:00:00', 'test_start': '2019-02-06 10:00:00'}, {'id': 14, 'train_end': '2019-02-11 03:00:00', 'test_start': '2019-02-11 04:00:00'}, {'id': 15, 'train_end': '2019-02-15 20:00:00', 'test_start': '2019-02-15 21:00:00'}, {'id': 16, 'train_end': '2019-02-20 13:00:00', 'test_start': '2019-02-20 14:00:00'}, {'id': 17, 'train_end': '2019-02-25 07:00:00', 'test_start': '2019-02-25 08:00:00'}, {'id': 18, 'train_end': '2019-03-02 00:00:00', 'test_start': '2019-03-02 01:00:00'}, {'id': 19, 'train_end': '2019-03-06 18:00:00', 'test_start': '2019-03-06 19:00:00'}]628 if(resolution=="1d"):629 return [{'id': 0, 'train_end': '2018-12-07 00:00:00', 'test_start': '2018-12-08 00:00:00'}, {'id': 1, 'train_end': '2018-12-10 00:00:00', 'test_start': '2018-12-11 00:00:00'}, {'id': 2, 'train_end': '2018-12-13 00:00:00', 'test_start': '2018-12-14 00:00:00'}, {'id': 3, 'train_end': '2018-12-16 00:00:00', 'test_start': '2018-12-17 00:00:00'}, {'id': 4, 'train_end': '2018-12-19 00:00:00', 'test_start': '2018-12-20 00:00:00'}, {'id': 5, 'train_end': '2018-12-23 00:00:00', 'test_start': '2018-12-24 00:00:00'}, {'id': 6, 'train_end': '2018-12-26 00:00:00', 'test_start': '2018-12-27 00:00:00'}, {'id': 7, 'train_end': '2018-12-29 00:00:00', 'test_start': '2018-12-30 00:00:00'}, {'id': 8, 'train_end': '2019-01-01 00:00:00', 'test_start': '2019-01-02 00:00:00'}, {'id': 9, 'train_end': '2019-01-04 00:00:00', 'test_start': '2019-01-05 00:00:00'}, {'id': 10, 'train_end': '2019-01-08 00:00:00', 'test_start': '2019-01-09 00:00:00'}, {'id': 11, 'train_end': '2019-01-11 00:00:00', 'test_start': '2019-01-12 00:00:00'}, {'id': 12, 'train_end': '2019-01-14 00:00:00', 'test_start': '2019-01-15 00:00:00'}, {'id': 13, 'train_end': '2019-01-17 00:00:00', 'test_start': '2019-01-18 00:00:00'}, {'id': 14, 'train_end': '2019-01-20 00:00:00', 'test_start': '2019-01-21 00:00:00'}, {'id': 15, 'train_end': '2019-01-24 00:00:00', 'test_start': '2019-01-25 00:00:00'}, {'id': 16, 'train_end': '2019-01-27 00:00:00', 'test_start': '2019-01-28 00:00:00'}, {'id': 17, 'train_end': '2019-01-30 00:00:00', 'test_start': '2019-01-31 00:00:00'}, {'id': 18, 'train_end': '2019-02-02 00:00:00', 'test_start': '2019-02-03 00:00:00'}, {'id': 19, 'train_end': '2019-02-06 00:00:00', 'test_start': '2019-02-07 00:00:00'}]630def return_pearson_r(x,y):...

Full Screen

Full Screen

CS535MLClassifier.py

Source:CS535MLClassifier.py Github

copy

Full Screen

1__credits__ = ["Harsh Fatepuria","Rahul Agrawal"]2__email__ = "fatepuri@usc.edu, rahulagr@usc.edu"3'''4command to execute: 5 python CS535MLClassifier.py -svm -svm3 -svmrbf -nb -nn6note: needs sklearn, statistics and numpy python libraries7'''8import statistics9import math10import sys11import numpy as np12from sklearn import svm13from sklearn import metrics14from sklearn.naive_bayes import GaussianNB15from sklearn.model_selection import KFold, cross_val_score16from sklearn.neural_network import MLPClassifier17class CS535MLClassifier:18 def __init__(self,filename):19 self.dataset_file_name=filename20 def get_fold_test_and_train_only(self,typeOfTest,test_start,test_end):21 input_file=open(self.dataset_file_name , 'r')22 dataset=input_file.read().split("\n")23 if typeOfTest==1:24 # accoustic25 start_column=326 end_column=527 elif typeOfTest==0:28 # visual29 start_column=630 end_column=831 else:32 # multimodal33 start_column=334 end_column=835 test1_data=list()36 test1_label=list()37 train1_data=list()38 train1_label=list()39 for i in range(test_start,test_end+1):40 row=list()41 data=dataset[i].split(",")42 for j in range(start_column,end_column+1):43 row.append(float(data[j]))44 test1_data.append(row)45 test1_label.append(int(data[2]))46 for i in range(1,test_start):47 row=list()48 data=dataset[i].split(",")49 for j in range(start_column,end_column+1):50 row.append(float(data[j]))51 train1_data.append(row)52 train1_label.append(int(data[2]))53 for i in range(test_end+1,281):54 row=list()55 data=dataset[i].split(",")56 for j in range(start_column,end_column+1):57 row.append(float(data[j]))58 train1_data.append(row)59 train1_label.append(int(data[2]))60 return test1_data,test1_label,train1_data,train1_label61 def get_fold(self,typeOfTest,test_start,test_end,validate_start,validate_end,train_start,train_end):62 input_file=open(self.dataset_file_name , 'r')63 dataset=input_file.read().splitlines()64 if typeOfTest==1:65 # accoustic66 start_column=367 end_column=568 elif typeOfTest==0:69 # visual70 start_column=671 end_column=872 else:73 # multimodal74 start_column=375 end_column=876 test1_data=list()77 test1_label=list()78 validate1_data=list()79 validate1_label=list()80 train1_data=list()81 train1_label=list()82 for i in range(test_start,test_end+1):83 row=list()84 data=dataset[i].split(",")85 for j in range(start_column,end_column+1):86 row.append(float(data[j]))87 test1_data.append(row)88 test1_label.append(int(data[2]))89 for i in range(validate_start,validate_end+1):90 row=list()91 data=dataset[i].split(",")92 for j in range(start_column,end_column+1):93 row.append(float(data[j]))94 validate1_data.append(row)95 validate1_label.append(int(data[2]))96 for i in range(train_start,train_end+1):97 row=list()98 data=dataset[i].split(",")99 for j in range(start_column,end_column+1):100 row.append(float(data[j]))101 train1_data.append(row)102 train1_label.append(int(data[2]))103 return test1_data,test1_label,validate1_data,validate1_label,train1_data,train1_label104 def perform_naivebayes(self,typeOfTest,test_start,test_end):105 test1_data,test1_label,train1_data,train1_label = self.get_fold_test_and_train_only(typeOfTest,test_start,test_end)106 clf = GaussianNB()107 test_results=list()108 clf.fit(train1_data, train1_label)109 for line in test1_data:110 predicted = clf.predict([line])[0]111 test_results.append(predicted)112 print "Accuracy in test: ",metrics.accuracy_score(test1_label,test_results)113 def perform_svm(self,typeOfTest,test_start,test_end,validate_start,validate_end,train_start,train_end):114 test1_data,test1_label,validate1_data,validate1_label,train1_data,train1_label = self.get_fold(typeOfTest,test_start,test_end,validate_start,validate_end,train_start,train_end)115 clf = svm.LinearSVC()116 accuracy=list()117 for c in [0.001,0.01,0.1,1,10,100]:118 validation_result=list()119 clf.set_params(C=c)120 clf.fit(train1_data, train1_label)121 correct_classification = 0122 i=0123 for line in validate1_data:124 validation_result.append(clf.predict([line])[0])125 accuracy.append(metrics.accuracy_score(validate1_label,validation_result))126 print "Accuracy in validation (max): ",max(accuracy) 127 cMax=math.pow(10,(accuracy.index(max(accuracy))-3))128 print "Hyperparameter(c) for max accuracy = ",cMax129 clf = svm.LinearSVC()130 test_results=list()131 train_results=list()132 clf.set_params(C=cMax)133 clf.fit(train1_data, train1_label)134 for line in train1_data:135 train_results.append(clf.predict([line])[0])136 print "Accuracy in training: ",metrics.accuracy_score(train1_label,train_results)137 138 for line in test1_data:139 predicted = clf.predict([line])[0]140 test_results.append(predicted)141 print "\nAccuracy in test: ",metrics.accuracy_score(test1_label,test_results)142 def perform_svm_n_fold(self,num_folds,typeOfTest,test_start,test_end):143 test1_data,test1_label,train1_data,train1_label = self.get_fold_test_and_train_only(typeOfTest,test_start,test_end)144 k_fold = KFold(n_splits=num_folds)145 accuracy=list()146 for c in [0.001,0.01,0.1,1,10,100]:147 clf = svm.LinearSVC(C=c)148 accuracy.append(statistics.mean(cross_val_score(clf,train1_data,train1_label,cv=k_fold,n_jobs=-1)))149 print "Accuracy in validation(max): ",max(accuracy)150 print "Accuracy in validation(average): ",statistics.mean(accuracy)151 cMax=math.pow(10,(accuracy.index(max(accuracy))-3))152 print "Hyperparameter(c) for max accuracy = ",cMax153 clf = svm.LinearSVC()154 test_results=list()155 train_results=list()156 clf.set_params(C=cMax)157 clf.fit(train1_data, train1_label)158 for line in train1_data:159 train_results.append(clf.predict([line])[0])160 print "Accuracy in training: ",metrics.accuracy_score(train1_label,train_results)161 for line in test1_data:162 test_results.append(clf.predict([line])[0])163 print "Accuracy in test: ",metrics.accuracy_score(test1_label,test_results)164 def perform_svm_rbf(self,typeOfTest,test_start,test_end,validate_start,validate_end,train_start,train_end):165 test1_data,test1_label,validate1_data,validate1_label,train1_data,train1_label = self.get_fold(typeOfTest,test_start,test_end,validate_start,validate_end,train_start,train_end)166 clf = svm.SVC()167 accuracy=list()168 for c in [0.001,0.01,0.1,1,10,100]:169 validation_result=list()170 clf.set_params(C=c,kernel="rbf")171 clf.fit(train1_data, train1_label)172 correct_classification = 0173 i=0174 for line in validate1_data:175 validation_result.append(clf.predict([line])[0])176 accuracy.append(metrics.accuracy_score(validate1_label,validation_result))177 cMax=math.pow(10,(accuracy.index(max(accuracy))-3))178 print "Hyperparameter(c) for max accuracy = ",cMax179 clf = svm.SVC(C=cMax,kernel="rbf")180 test_results=list()181 clf.fit(train1_data, train1_label)182 for line in test1_data:183 test_results.append(clf.predict([line])[0])184 print "Accuracy in test: ",metrics.accuracy_score(test1_label,test_results)185 def perform_nn(self,typeOfTest,test_start,test_end,validate_start,validate_end,train_start,train_end): 186 test1_data,test1_label,validate1_data,validate1_label,train1_data,train1_label = self.get_fold(typeOfTest,test_start,test_end,validate_start,validate_end,train_start,train_end)187 accuracy=list()188 hidden_layers=list()189 for ii in range(1,11):190 hidden_layers.append((ii,))191 for hidden_layer in hidden_layers:192 clf = MLPClassifier(solver = 'lbfgs',alpha = 1, hidden_layer_sizes = hidden_layer, random_state = 1)193 clf.fit(train1_data, train1_label)194 195 correct_classification=0196 i=0197 for line in validate1_data:198 if clf.predict([line])[0] == validate1_label[i] :199 correct_classification = correct_classification + 1200 i=i+1201 accuracy.append(correct_classification/float(len(validate1_label)))202 hidden_layer_for_highest_accuracy=hidden_layers[accuracy.index(max(accuracy))]203 print "Hyperparameter(c) for max accuracy = ",hidden_layer_for_highest_accuracy204 clf = MLPClassifier(solver = 'lbfgs',alpha = 1, hidden_layer_sizes = hidden_layer_for_highest_accuracy, random_state = 1)205 clf.fit(train1_data, train1_label)206 test_results=list()207 for line in test1_data:208 test_results.append(clf.predict([line])[0])209 print "Accuracy in test: ",metrics.accuracy_score(test1_label,test_results)210def naive_bayes(a):211 print "\n\n\nNAIVE BAYES"212 print "-------------------"213 print "\nExperiment 1: "214 print "-----------------"215 test_start=1216 test_end=67217 print "Multimodal:"218 a.perform_naivebayes(2,test_start,test_end)219 print "\nAudio:"220 a.perform_naivebayes(1,test_start,test_end)221 print "\nVisual:"222 a.perform_naivebayes(0,test_start,test_end)223 print "\n\nExperiment 2: "224 print "-----------------"225 test_start=212226 test_end=280227 print "Multimodal:"228 a.perform_naivebayes(2,test_start,test_end)229 print "\nAudio:"230 a.perform_naivebayes(1,test_start,test_end)231 print "\nVisual:"232 a.perform_naivebayes(0,test_start,test_end)233 print "\n\nExperiment 3: "234 print "-----------------"235 test_start=141236 test_end=211237 print "Multimodal:"238 a.perform_naivebayes(2,test_start,test_end)239 print "\nAudio:"240 a.perform_naivebayes(1,test_start,test_end)241 print "\nVisual:"242 a.perform_naivebayes(0,test_start,test_end)243 print "\n\nExperiment 4: "244 print "-----------------"245 test_start=68246 test_end=140247 print "Multimodal:"248 a.perform_naivebayes(2,test_start,test_end)249 print "\nAudio:"250 a.perform_naivebayes(1,test_start,test_end)251 print "\nVisual:"252 a.perform_naivebayes(0,test_start,test_end)253def neural_net(a):254 print "\n\n\nNEURAL NETWORK"255 print "------------------"256 print "\nExperiment 1: "257 print "-----------------"258 test_start=1259 test_end=67260 validate_start=68261 validate_end=140262 train_start=141263 train_end=280264 print "Multimodal:"265 a.perform_nn(2,test_start,test_end,validate_start,validate_end,train_start,train_end)266 print "\nAudio:"267 a.perform_nn(1,test_start,test_end,validate_start,validate_end,train_start,train_end)268 print "\nVisual:"269 a.perform_nn(0,test_start,test_end,validate_start,validate_end,train_start,train_end)270 print "\n\nExperiment 2: "271 print "-----------------"272 test_start=212273 test_end=280274 validate_start=141275 validate_end=211276 train_start=1277 train_end=140278 print "Multimodal:"279 a.perform_nn(2,test_start,test_end,validate_start,validate_end,train_start,train_end)280 print "\nAudio:"281 a.perform_nn(1,test_start,test_end,validate_start,validate_end,train_start,train_end)282 print "\nVisual:"283 a.perform_nn(0,test_start,test_end,validate_start,validate_end,train_start,train_end)284 print "\n\nExperiment 3: "285 print "-----------------"286 test_start=141287 test_end=211288 validate_start=212289 validate_end=280290 train_start=1291 train_end=140292 print "Multimodal:"293 a.perform_nn(2,test_start,test_end,validate_start,validate_end,train_start,train_end)294 print "\nAudio:"295 a.perform_nn(1,test_start,test_end,validate_start,validate_end,train_start,train_end)296 print "\nVisual:"297 a.perform_nn(0,test_start,test_end,validate_start,validate_end,train_start,train_end)298 print "\n\nExperiment 4: "299 print "-----------------"300 test_start=68301 test_end=140302 validate_start=1303 validate_end=67304 train_start=141305 train_end=280306 print "Multimodal:"307 a.perform_nn(2,test_start,test_end,validate_start,validate_end,train_start,train_end)308 print "\nAudio:"309 a.perform_nn(1,test_start,test_end,validate_start,validate_end,train_start,train_end)310 print "\nVisual:"311 a.perform_nn(0,test_start,test_end,validate_start,validate_end,train_start,train_end)312def svm_n_folds(a,numFolds):313 print "\n\n\nSVM with ",numFolds," folds and not respecting speaker independence"314 print "---------------------------------------------------------------------------"315 print "\nExperiment 1: "316 print "-----------------"317 test_start=1318 test_end=67319 a.perform_svm_n_fold(numFolds,2,test_start,test_end)320 print "\n\nExperiment 2: "321 print "-----------------"322 test_start=212323 test_end=280324 a.perform_svm_n_fold(numFolds,2,test_start,test_end)325 print "\n\nExperiment 3: "326 print "-----------------"327 test_start=141328 test_end=211329 a.perform_svm_n_fold(numFolds,2,test_start,test_end)330 print "\n\nExperiment 4: "331 print "-----------------"332 test_start=68333 test_end=140334 a.perform_svm_n_fold(numFolds,2,test_start,test_end)335def svm_rbf(a):336 print "\n\n\nSVM with RBF Kernel"337 print "---------------------------"338 print "\nExperiment 1: "339 print "-----------------"340 test_start=1341 test_end=67342 validate_start=68343 validate_end=140344 train_start=141345 train_end=280346 print "Multimodal:"347 a.perform_svm_rbf(2,test_start,test_end,validate_start,validate_end,train_start,train_end)348 print "\nAudio:"349 a.perform_svm_rbf(1,test_start,test_end,validate_start,validate_end,train_start,train_end)350 print "\nVisual:"351 a.perform_svm_rbf(0,test_start,test_end,validate_start,validate_end,train_start,train_end)352 print "\n\nExperiment 2: "353 print "-----------------"354 test_start=212355 test_end=280356 validate_start=141357 validate_end=211358 train_start=1359 train_end=140360 print "Multimodal:"361 a.perform_svm_rbf(2,test_start,test_end,validate_start,validate_end,train_start,train_end)362 print "\nAudio:"363 a.perform_svm_rbf(1,test_start,test_end,validate_start,validate_end,train_start,train_end)364 print "\nVisual:"365 a.perform_svm_rbf(0,test_start,test_end,validate_start,validate_end,train_start,train_end)366 print "\n\nExperiment 3: "367 print "-----------------"368 test_start=141369 test_end=211370 validate_start=212371 validate_end=280372 train_start=1373 train_end=140374 print "Multimodal:"375 a.perform_svm_rbf(2,test_start,test_end,validate_start,validate_end,train_start,train_end)376 print "\nAudio:"377 a.perform_svm_rbf(1,test_start,test_end,validate_start,validate_end,train_start,train_end)378 print "\nVisual:"379 a.perform_svm_rbf(0,test_start,test_end,validate_start,validate_end,train_start,train_end)380 print "\n\nExperiment 4: "381 print "-----------------"382 test_start=68383 test_end=140384 validate_start=1385 validate_end=67386 train_start=141387 train_end=280388 print "Multimodal:"389 a.perform_svm_rbf(2,test_start,test_end,validate_start,validate_end,train_start,train_end)390 print "\nAudio:"391 a.perform_svm_rbf(1,test_start,test_end,validate_start,validate_end,train_start,train_end)392 print "\nVisual:"393 a.perform_svm_rbf(0,test_start,test_end,validate_start,validate_end,train_start,train_end)394def svm_linear(a):395 print "\n\n\nLinear SVM"396 print "--------------------"397 print "\nExperiment 1: "398 print "-----------------"399 test_start=1400 test_end=67401 validate_start=68402 validate_end=140403 train_start=141404 train_end=280405 print "Multimodal:"406 a.perform_svm(2,test_start,test_end,validate_start,validate_end,train_start,train_end)407 print "\nAudio:"408 a.perform_svm(1,test_start,test_end,validate_start,validate_end,train_start,train_end)409 print "\nVisual:"410 a.perform_svm(0,test_start,test_end,validate_start,validate_end,train_start,train_end)411 print "\n\nExperiment 2: "412 print "-----------------"413 test_start=212414 test_end=280415 validate_start=141416 validate_end=211417 train_start=1418 train_end=140419 print "Multimodal:"420 a.perform_svm(2,test_start,test_end,validate_start,validate_end,train_start,train_end)421 print "\nAudio:"422 a.perform_svm(1,test_start,test_end,validate_start,validate_end,train_start,train_end)423 print "\nVisual:"424 a.perform_svm(0,test_start,test_end,validate_start,validate_end,train_start,train_end)425 print "\n\nExperiment 3: "426 print "-----------------"427 test_start=141428 test_end=211429 validate_start=212430 validate_end=280431 train_start=1432 train_end=140433 print "Multimodal:"434 a.perform_svm(2,test_start,test_end,validate_start,validate_end,train_start,train_end)435 print "\nAudio:"436 a.perform_svm(1,test_start,test_end,validate_start,validate_end,train_start,train_end)437 print "\nVisual:"438 a.perform_svm(0,test_start,test_end,validate_start,validate_end,train_start,train_end)439 print "\n\nExperiment 4: "440 print "-----------------"441 test_start=68442 test_end=140443 validate_start=1444 validate_end=67445 train_start=141446 train_end=280447 print "Multimodal:"448 a.perform_svm(2,test_start,test_end,validate_start,validate_end,train_start,train_end)449 print "\nAudio:"450 a.perform_svm(1,test_start,test_end,validate_start,validate_end,train_start,train_end)451 print "\nVisual:"452 a.perform_svm(0,test_start,test_end,validate_start,validate_end,train_start,train_end)453if len(sys.argv) == 1:454 print "Please specifiy the classifier(s) to execute...\n"455else:456 a=CS535MLClassifier('dataset.csv')457 args=sys.argv[1:]458 args=list(set(args))459 for arg in args:460 if arg=="-nb":461 naive_bayes(a)462 if arg=="-svm":463 svm_linear(a)464 if arg=="-svm3":465 svm_n_folds(a,3)466 if arg=="-svm4":467 svm_n_folds(a,4)468 if arg=="-nn":469 neural_net(a)470 if arg=="-svmrbf":...

Full Screen

Full Screen

test_formatters.py

Source:test_formatters.py Github

copy

Full Screen

...135 fmt = formatters[name](**opts)136 logger = StructuredLogger('test_logger')137 logger.add_handler(StreamHandler(buf, fmt))138 logger.suite_start(['test_foo', 'test_bar', 'test_baz'])139 logger.test_start('test_foo')140 logger.test_end('test_foo', 'OK')141 logger.test_start('test_bar')142 logger.test_status('test_bar', 'a subtest', 'PASS')143 logger.test_end('test_bar', 'OK')144 logger.test_start('test_baz')145 logger.test_end('test_baz', 'FAIL', 'FAIL', 'expected 0 got 1')146 logger.suite_end()147 result = buf.getvalue()148 print("Dumping result for copy/paste:")149 print(result)150 assert result == expected151@pytest.mark.parametrize("name,opts,expected", FORMATS['FAIL'],152 ids=ids('FAIL'))153def test_fail(name, opts, expected):154 stack = """155 SimpleTest.is@SimpleTest/SimpleTest.js:312:5156 @caps/tests/mochitest/test_bug246699.html:53:1157""".strip('\n')158 buf = BytesIO()159 fmt = formatters[name](**opts)160 logger = StructuredLogger('test_logger')161 logger.add_handler(StreamHandler(buf, fmt))162 logger.suite_start(['test_foo', 'test_bar', 'test_baz'])163 logger.test_start('test_foo')164 logger.test_end('test_foo', 'FAIL', 'PASS', 'expected 0 got 1')165 logger.test_start('test_bar')166 logger.test_status('test_bar', 'a subtest', 'FAIL', 'PASS', 'expected 0 got 1', stack)167 logger.test_status('test_bar', 'another subtest', 'TIMEOUT')168 logger.test_end('test_bar', 'OK')169 logger.test_start('test_baz')170 logger.test_end('test_baz', 'PASS', 'FAIL')171 logger.suite_end()172 result = buf.getvalue()173 print("Dumping result for copy/paste:")174 print(result)175 assert result == expected176if __name__ == '__main__':...

Full Screen

Full Screen

peakcache.spec.js

Source:peakcache.spec.js Github

copy

Full Screen

1describe('peakcache', function() {2 var peakcache;3 var test_length = 200;4 var test_length2 = 300;5 var test_start = 50;6 var test_end = 100;7 var test_start2 = 100;8 var test_end2 = 120;9 var test_start3 = 120;10 var test_end3 = 150;11 var window_size = 20;12 function __createPeakCache() {13 peakcache = Object.create(WaveSurfer.PeakCache);14 peakcache.init();15 }16 beforeEach(function (done) {17 __createPeakCache();18 done();19 });20 it('empty cache returns full range', function() {21 var newranges = peakcache.addRangeToPeakCache(test_length, test_start, test_end);22 expect(newranges.length).toEqual(1);23 expect(newranges[0][0]).toEqual(test_start);24 expect(newranges[0][1]).toEqual(test_end);25 });26 it('different length clears cache', function() {27 peakcache.addRangeToPeakCache(test_length, test_start, test_end);28 var newranges = peakcache.addRangeToPeakCache(test_length2, test_start, test_end);29 expect(newranges.length).toEqual(1);30 expect(newranges[0][0]).toEqual(test_start);31 expect(newranges[0][1]).toEqual(test_end);32 });33 it('consecutive calls return no ranges', function() {34 peakcache.addRangeToPeakCache(test_length, test_start, test_end);35 var newranges = peakcache.addRangeToPeakCache(test_length, test_start, test_end);36 expect(newranges.length).toEqual(0);37 });38 it('sliding window returns window sized range', function() {39 var newranges = peakcache.addRangeToPeakCache(test_length, test_start, test_end);40 expect(newranges.length).toEqual(1);41 expect(newranges[0][0]).toEqual(test_start);42 expect(newranges[0][1]).toEqual(test_end);43 var newranges = peakcache.addRangeToPeakCache(test_length, test_start + window_size, test_end + window_size);44 expect(newranges.length).toEqual(1);45 expect(newranges[0][0]).toEqual(test_end);46 expect(newranges[0][1]).toEqual(test_end + window_size);47 var newranges = peakcache.addRangeToPeakCache(test_length, test_start + window_size * 2, test_end + window_size * 2);48 expect(newranges.length).toEqual(1);49 expect(newranges[0][0]).toEqual(test_end + window_size);50 expect(newranges[0][1]).toEqual(test_end + window_size * 2);51 });52 it('disjoint set creates two ranges', function() {53 peakcache.addRangeToPeakCache(test_length, test_start, test_end);54 peakcache.addRangeToPeakCache(test_length, test_start3, test_end3);55 var ranges = peakcache.getCacheRanges();56 expect(ranges.length).toEqual(2);57 expect(ranges[0][0]).toEqual(test_start);58 expect(ranges[0][1]).toEqual(test_end);59 expect(ranges[1][0]).toEqual(test_start3);60 expect(ranges[1][1]).toEqual(test_end3);61 });62 it('filling in disjoint sets coalesces', function() {63 peakcache.addRangeToPeakCache(test_length, test_start, test_end);64 peakcache.addRangeToPeakCache(test_length, test_start3, test_end3);65 var newranges = peakcache.addRangeToPeakCache(test_length, test_start, test_end3);66 expect(newranges.length).toEqual(1);67 expect(newranges[0][0]).toEqual(test_end);68 expect(newranges[0][1]).toEqual(test_start3);69 var ranges = peakcache.getCacheRanges();70 expect(ranges.length).toEqual(1);71 expect(ranges[0][0]).toEqual(test_start);72 expect(ranges[0][1]).toEqual(test_end3);73 });74 it('filling in disjoint sets coalesces / edge cases', function() {75 peakcache.addRangeToPeakCache(test_length, test_start, test_end);76 peakcache.addRangeToPeakCache(test_length, test_start3, test_end3);77 var newranges = peakcache.addRangeToPeakCache(test_length, test_start2, test_end2);78 expect(newranges.length).toEqual(1);79 expect(newranges[0][0]).toEqual(test_end);80 expect(newranges[0][1]).toEqual(test_start3);81 var ranges = peakcache.getCacheRanges();82 expect(ranges.length).toEqual(1);83 expect(ranges[0][0]).toEqual(test_start);84 expect(ranges[0][1]).toEqual(test_end3);85 });...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run avocado automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful