How to use is_needed method in autotest

Best Python code snippet using autotest_python

data_preprocessing.py

Source:data_preprocessing.py Github

copy

Full Screen

...5455 def can_accept(self, data):56 return self.can_accept_a(data)5758 def is_needed(self, data):59 # data = handle_data(data)60 cols = data['X'].columns61 num_cols = data['X']._get_numeric_data().columns62 if not len(cols) == len(num_cols):63 return True64 return False6566 def fit(self, data):67 data = handle_data(data)68 self.num_cols = data['X']._get_numeric_data().columns6970 def produce(self, data):71 output = handle_data(data)72 output['X'] = output['X'][self.num_cols]73 final_output = {0: output}74 return final_output757677class Imputer(primitive):78 def __init__(self, random_state=0):79 super(Imputer, self).__init__(name='imputer')80 self.id = 181 self.hyperparams = []82 self.type = 'data preprocess'83 self.description = "Imputation transformer for completing missing values by mean."84 self.hyperparams_run = {'default': True}85 self.random_state = random_state86 self.imp = SimpleImputer()87 self.num_cols = None88 self.imp_cols = None89 self.accept_type = 'a'9091 def can_accept(self, data):92 return self.can_accept_a(data)9394 def is_needed(self, data):95 # data = handle_data(data)96 if data['X'].isnull().any().any():97 return True98 return False99100 def fit(self, data):101 data = handle_data(data)102 self.num_cols = data['X']._get_numeric_data().columns103 self.imp.fit(data['X'][self.num_cols])104 self.imp_cols = data['X'][self.num_cols].columns[data['X'][self.num_cols].isnull().any()].tolist()105106 def produce(self, data):107 output = handle_data(data)108 # self.imp_cols = output['X'][self.num_cols].columns[output['X'][self.num_cols].isnull().any()].tolist()109 cols = self.num_cols.tolist()110 reg_cols = list(set(cols) - set(self.imp_cols))111 # new_cols = ["{}_imp_mean".format(v) for v in list(imp_cols)]112 for i in range(len(cols)):113 if cols[i] in reg_cols:114 continue115 elif cols[i] in self.imp_cols:116 cols[i] = "{}_imp_mean".format(cols[i])117118 # try:119 output['X'] = pd.DataFrame(self.imp.transform(output['X'][self.num_cols]), columns=cols).reset_index(120 drop=True).infer_objects()121 output['X'] = output['X'].ix[:, ~output['X'].columns.duplicated()]122123 # except Exception as e:124 # print(e)125 final_output = {0: output}126 return final_output127128129class ImputerMedian(primitive):130 def __init__(self, random_state=0):131 super(ImputerMedian, self).__init__(name='ImputerMedian')132 self.id = 2133 self.hyperparams = []134 self.type = 'data preprocess'135 self.description = "Imputation transformer for completing missing values by median."136 self.hyperparams_run = {'default': True}137 self.random_state = random_state138 self.imp = SimpleImputer(strategy='median')139 self.num_cols = None140 self.imp_cols = None141 self.accept_type = 'a'142143 def can_accept(self, data):144 return self.can_accept_a(data)145146 def is_needed(self, data):147 # data = handle_data(data)148 if data['X'].isnull().any().any():149 return True150 return False151152 def fit(self, data):153 data = handle_data(data)154 self.num_cols = data['X']._get_numeric_data().columns155 self.imp.fit(data['X'][self.num_cols])156 self.imp_cols = data['X'][self.num_cols].columns[data['X'][self.num_cols].isnull().any()].tolist()157158 def produce(self, data):159 output = handle_data(data)160 # self.imp_cols = output['X'][self.num_cols].columns[output['X'][self.num_cols].isnull().any()].tolist()161 cols = self.num_cols.tolist()162 reg_cols = list(set(cols)-set(self.imp_cols))163 # new_cols = ["{}_imp_mean".format(v) for v in list(imp_cols)]164 for i in range(len(cols)):165 if cols[i] in reg_cols:166 continue167 elif cols[i] in self.imp_cols:168 cols[i] = "{}_imp_median".format(cols[i])169170 # try:171 output['X'] = pd.DataFrame(self.imp.transform(output['X'][self.num_cols]), columns=cols).reset_index(drop=True).infer_objects()172 output['X'] = output['X'].ix[:, ~output['X'].columns.duplicated()]173174 # except Exception as e:175 # print(e)176 final_output = {0: output}177 return final_output178179180class ImputerIndicatorPrim(primitive):181 def __init__(self, random_state=0):182 super(ImputerIndicatorPrim, self).__init__(name='imputerIndicator')183 self.id = 3184 self.hyperparams = []185 self.type = 'data preprocess'186 self.description = "All features will be imputed using SimpleImputer, in order to enable classifiers to work with this data. Additionally, it adds the the indicator variables from MissingIndicator."187 self.hyperparams_run = {'default': True}188 self.random_state = random_state189 self.imp = FeatureUnion(transformer_list=[('features', SimpleImputer()), ('indicators', MissingIndicator())])190 self.num_cols = None191 self.imp_cols = None192 self.accept_type = 'b'193194 def can_accept(self, data):195 return self.can_accept_b(data)196197 def is_needed(self, data):198 # data = handle_data(data)199 if data['X'].isnull().any().any():200 return True201 return False202203 def fit(self, data):204 data = handle_data(data)205 self.num_cols = data['X']._get_numeric_data().columns206 self.imp.fit(data['X'][self.num_cols])207 self.imp_cols = data['X'][self.num_cols].columns[data['X'][self.num_cols].isnull().any()].tolist()208209 def produce(self, data):210 output = handle_data(data)211212 cols = self.num_cols.tolist()213 reg_cols = list(set(cols)-set(self.imp_cols))214 # new_cols = ["{}_imp_mean".format(v) for v in list(imp_cols)]215 for i in range(len(cols)):216 if cols[i] in reg_cols:217 continue218 elif cols[i] in self.imp_cols:219 cols[i] = "{}_imp_mean".format(cols[i])220 result = self.imp.transform(output['X'][self.num_cols])221 # extra_cols = list(range(result.shape[1] - len(cols)))222 extra_cols = ["{}_miss_indicator".format(v) for v in self.imp_cols]223 output['X'] = pd.DataFrame(result, columns=cols + extra_cols).reset_index(drop=True).infer_objects()224 output['X'] = output['X'].ix[:,~output['X'].columns.duplicated()]225 final_output = {0: output}226 return final_output227228229class OneHotEncoderPrim(primitive):230 # can handle missing values. turns nans to extra category231 def __init__(self, random_state=0):232 super(OneHotEncoderPrim, self).__init__(name='OneHotEncoder')233 self.id = 4234 self.hyperparams = []235 self.type = 'data preprocess'236 self.description = "Encode categorical integer features as a one-hot numeric array. The input to this transformer should be an array-like of integers or strings, denoting the values taken on by categorical (discrete) features. The features are encoded using a one-hot (aka ‘one-of-K’ or ‘dummy’) encoding scheme. This creates a binary column for each category and returns a sparse matrix or dense array. By default, the encoder derives the categories based on the unique values in each feature. Alternatively, you can also specify the categories manually. The OneHotEncoder previously assumed that the input features take on values in the range [0, max(values)). This behaviour is deprecated. This encoding is needed for feeding categorical data to many scikit-learn estimators, notably linear models and SVMs with the standard kernels."237 self.hyperparams_run = {'default': True}238 self.preprocess = None239 self.cat_cols = None240 self.accept_type = 'b'241242 def can_accept(self, data):243 return self.can_accept_b(data)244245 def is_needed(self, data):246 # data = handle_data(data)247 cols = data['X']248 num_cols = data['X']._get_numeric_data().columns249 cat_cols = list(set(cols) - set(num_cols))250 if len(cat_cols) == 0:251 return False252 return True253254 def fit(self, data):255 data = handle_data(data)256 if not self.is_needed(data):257 return258 x = deepcopy(data['X'])259 cols = data['X'].columns260 num_cols = data['X']._get_numeric_data().columns261 self.cat_cols = list(set(cols) - set(num_cols))262 x[self.cat_cols] = x[self.cat_cols].fillna('NaN')263 self.preprocess = ColumnTransformer([("one_hot", OneHotEncoder(handle_unknown='ignore'), self.cat_cols)])264 x[self.cat_cols] = x[self.cat_cols].astype(str)265 self.preprocess.fit(x) # .astype(str)266267 def produce(self, data):268 output = handle_data(data)269 if not self.is_needed(output):270 final_output = {0: output}271 return final_output272 output['X'][self.cat_cols] = output['X'][self.cat_cols].fillna('NaN')273 result = self.preprocess.transform(output['X'])274 if isinstance(result, csr_matrix):275 result = result.toarray()276 output['X'] = pd.DataFrame(result, columns=self.preprocess.get_feature_names()).infer_objects()277 output['X'] = output['X'].ix[:,~output['X'].columns.duplicated()]278 final_output = {0: output}279 return final_output280281282class OrdinalEncoderPrim(primitive):283 # can handle missing values. Operates on all categorical features.284 def __init__(self, random_state=0):285 super(OrdinalEncoderPrim, self).__init__(name='OrdinalEncoder')286 self.id = 5287 self.hyperparams = []288 self.type = 'data preprocess'289 self.description = "Encode categorical features as an integer array."290 self.hyperparams_run = {'default': True}291 self.cat_cols = None292 self.preprocess = {}293 self.accept_type = 'b'294295 def can_accept(self, data):296 return self.can_accept_b(data)297298 def is_needed(self, data):299 # data = handle_data(data)300 cols = data['X']301 num_cols = data['X']._get_numeric_data().columns302 cat_cols = list(set(cols) - set(num_cols))303 if len(cat_cols) == 0:304 return False305 return True306307 def fit(self, data):308 data = handle_data(data)309 if not self.is_needed(data):310 return311 x = deepcopy(data['X'])312 cols = data['X'].columns313 num_cols = data['X']._get_numeric_data().columns314 self.cat_cols = list(set(cols) - set(num_cols))315 x[self.cat_cols] = x[self.cat_cols].fillna('NaN')316 for col in self.cat_cols:317 self.preprocess[col] = OrdinalEncoder().fit(x[col].astype(str))318319 def produce(self, data):320 output = handle_data(data)321 if not self.is_needed(output):322 final_output = {0: output}323 return final_output324 x = output['X']325 x[self.cat_cols] = x[self.cat_cols].fillna('NaN')326 final_cols = []327 for col in self.cat_cols:328 arr = self.preprocess[col].transform(x[col])329 output['X'][col] = arr330 output['X'] = output['X'].rename(index=str, columns={col: "{}_lbl_enc".format(col)})331 final_cols.append("{}_lbl_enc".format(col))332 # output['X'][col].columns = "{}_lbl_enc".format(col)333 # cols = ["{}_lbl_enc".format(v) for v in list(self.cat_cols)]334 output['X'] = output['X'][final_cols].infer_objects()335 output['X'] = output['X'].ix[:,~output['X'].columns.duplicated()]336 final_output = {0: output}337 return final_output338339340class LabelEncoderPrim(primitive):341 # can handle missing values. Operates on all categorical features.342 def __init__(self, random_state=0):343 super(LabelEncoderPrim, self).__init__(name='LabelEncoder')344 self.id = 5345 self.hyperparams = []346 self.type = 'data preprocess'347 self.description = "Encode labels with value between 0 and n_classes-1."348 self.hyperparams_run = {'default': True}349 self.cat_cols = None350 self.preprocess = {}351 self.accept_type = 'b'352353 def can_accept(self, data):354 return self.can_accept_b(data)355356 def is_needed(self, data):357 # data = handle_data(data)358 cols = data['X']359 num_cols = data['X']._get_numeric_data().columns360 cat_cols = list(set(cols) - set(num_cols))361 if len(cat_cols) == 0:362 return False363 return True364365 def fit(self, data):366 data = handle_data(data)367 if not self.is_needed(data):368 return369 x = deepcopy(data['X'])370 cols = data['X'].columns371 num_cols = data['X']._get_numeric_data().columns372 self.cat_cols = list(set(cols) - set(num_cols))373 x[self.cat_cols] = x[self.cat_cols].fillna('NaN')374 for col in self.cat_cols:375 self.preprocess[col] = LabelEncoder().fit(x[col].astype(str))376377 def produce(self, data):378 output = handle_data(data)379 if not self.is_needed(output):380 final_output = {0: output}381 return final_output382 x = output['X']383 x[self.cat_cols] = x[self.cat_cols].fillna('NaN')384 final_cols = []385 for col in self.cat_cols:386 arr = self.preprocess[col].transform(x[col])387 output['X'][col] = arr388 output['X'] = output['X'].rename(index=str, columns={col: "{}_lbl_enc".format(col)})389 final_cols.append("{}_lbl_enc".format(col))390 # output['X'][col].columns = "{}_lbl_enc".format(col)391 # cols = ["{}_lbl_enc".format(v) for v in list(self.cat_cols)]392 output['X'] = output['X'][final_cols].infer_objects()393 output['X'] = output['X'].ix[:,~output['X'].columns.duplicated()]394 final_output = {0: output}395 return final_output396397398class ImputerEncoderPrim(primitive):399 def __init__(self, random_state=0):400 super(ImputerEncoderPrim, self).__init__(name='ImputerEncoderPrim')401 self.id = 6402 self.hyperparams = []403 self.type = 'data preprocess'404 self.description = "Imputation transformer for completing missing values and encode labels with value between 0 and n_classes-1."405 self.hyperparams_run = {'default': True}406 self.random_state = random_state407 self.imp = Imputer()408 self.encoder = LabelEncoderPrim()409 self.accept_type = 'b'410411 def can_accept(self, data):412 return self.can_accept_b(data)413414 def is_needed(self, data):415 # data = handle_data(data)416 if not self.imp.is_needed(data) or not self.encoder.is_needed(data):417 return False418 return True419420 def fit(self, data):421 dt = handle_data(data)422 if self.encoder.is_needed(dt):423 self.needed_enc = True424 self.encoder.fit(data)425 out = self.encoder.produce(data)426 else:427 out = data428 if self.imp.is_needed(handle_data(out)):429 self.needed_imp = True430 self.imp.fit(out)431432 def produce(self, data):433 output = data434 if self.needed_enc:435 output = self.encoder.produce(data)[0]436 if self.needed_imp:437 output = self.imp.produce(output)[0]438439 if not self.needed_imp and not self.needed_enc:440 output = handle_data(output)441 final_output = {0: output}442 return final_output443444445class ImputerOneHotEncoderPrim(primitive):446 def __init__(self, random_state=0):447 super(ImputerOneHotEncoderPrim, self).__init__(name='ImputerOneHotEncoderPrim')448 self.id = 6449 self.hyperparams = []450 self.type = 'data preprocess'451 self.description = "Imputation transformer for completing missing values and encode categorical one-hot."452 self.hyperparams_run = {'default': True}453 self.random_state = random_state454 self.imp = Imputer()455 self.encoder = OneHotEncoderPrim()456 self.accept_type = 'b'457 self.needed_imp = False458 self.needed_enc = False459460 def can_accept(self, data):461 return self.can_accept_b(data)462463 def is_needed(self, data):464 # data = handle_data(data)465 if not self.imp.is_needed(data) or not self.encoder.is_needed(data):466 return False467 return True468469 def fit(self, data):470 dt = handle_data(data)471 if self.encoder.is_needed(dt):472 self.needed_enc = True473 self.encoder.fit(data)474 out = self.encoder.produce(data)475 else:476 out = data477 if self.imp.is_needed(handle_data(out)):478 self.needed_imp = True479 self.imp.fit(out)480481 def produce(self, data):482 output = data483 if self.needed_enc:484 output = self.encoder.produce(data)[0]485 if self.needed_imp:486 output = self.imp.produce(output)[0]487488 if not self.needed_imp and not self.needed_enc:489 output = handle_data(output)490 final_output = {0: output}491 return final_output

Full Screen

Full Screen

mixup.py

Source:mixup.py Github

copy

Full Screen

1from typing import List2import numpy as np3import torch4from catalyst.dl import IRunner, CriterionCallback, Callback, CallbackOrder5__all__ = ["MixupInputCallback", "MixupCriterionCallback"]6class MixupInputCallback(Callback):7 """8 Callback to do mixup augmentation.9 Paper: https://arxiv.org/abs/1710.0941210 Note:11 MixupCallback is inherited from CriterionCallback and does its work.12 You may not use them together.13 """14 def __init__(self, fields: List[str] = ("features",), alpha=0.5, on_train_only=True, p=0.5, **kwargs):15 """16 Args:17 fields (List[str]): list of features which must be affected.18 alpha (float): beta distribution a=b parameters.19 Must be >=0. The more alpha closer to zero20 the less effect of the mixup.21 on_train_only (bool): Apply to train only.22 As the mixup use the proxy inputs, the targets are also proxy.23 We are not interested in them, are we?24 So, if on_train_only is True, use a standard output/metric25 for validation.26 """27 assert len(fields) > 0, "At least one field for MixupCallback is required"28 assert alpha >= 0, "alpha must be>=0"29 super().__init__(CallbackOrder.Internal)30 self.on_train_only = on_train_only31 self.fields = fields32 self.alpha = alpha33 self.is_needed = True34 self.is_batch_needed = True35 self.p = p36 def on_loader_start(self, state: IRunner):37 self.is_needed = not self.on_train_only or state.loader_name.startswith("train")38 def on_batch_start(self, state: IRunner):39 if not self.is_needed:40 return41 is_batch_needed = np.random.random() < self.p42 if is_batch_needed:43 lam = np.random.beta(self.alpha, self.alpha)44 else:45 lam = 146 index = torch.randperm(state.input[self.fields[0]].shape[0]).to(state.device)47 state.input["mixup_index"] = index48 state.input["mixup_lambda"] = lam49 for f in self.fields:50 a = lam * state.input[f]51 b = (1 - lam) * state.input[f][index]52 state.input[f] = a + b53class MixupCriterionCallback(CriterionCallback):54 """55 Callback to do mixup augmentation.56 Paper: https://arxiv.org/abs/1710.0941257 Note:58 MixupCallback is inherited from CriterionCallback and59 does its work.60 You may not use them together.61 """62 def __init__(self, on_train_only=True, **kwargs):63 """64 Args:65 fields (List[str]): list of features which must be affected.66 alpha (float): beta distribution a=b parameters.67 Must be >=0. The more alpha closer to zero68 the less effect of the mixup.69 on_train_only (bool): Apply to train only.70 As the mixup use the proxy inputs, the targets are also proxy.71 We are not interested in them, are we?72 So, if on_train_only is True, use a standard output/metric73 for validation.74 """75 super().__init__(**kwargs)76 self.on_train_only = on_train_only77 self.is_needed = True78 def on_loader_start(self, state: IRunner):79 self.is_needed = not self.on_train_only or state.loader_name.startswith("train")80 def _compute_loss_value(self, state: IRunner, criterion):81 if not self.is_needed:82 return super()._compute_loss_value(state, criterion)83 lam = state.input["mixup_lambda"]84 index = state.input["mixup_index"]85 pred = self._get_output(state.output, self.output_key)86 y_a = self._get_input(state.input, self.input_key)87 y_b = y_a[index]88 loss = lam * criterion(pred, y_a) + (1.0 - lam) * criterion(pred, y_b)89 return loss90 def _compute_loss_key_value(self, state: IRunner, criterion):91 if not self.is_needed:92 return super()._compute_loss_key_value(state, criterion)93 lam = state.input["mixup_lambda"]94 index = state.input["mixup_index"]95 pred = self._get_output(state.output, self.output_key)96 y_a = self._get_input(state.input, self.input_key)97 y_b = y_a[index]98 loss = lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)...

Full Screen

Full Screen

UserAgent_Proxy.py

Source:UserAgent_Proxy.py Github

copy

Full Screen

...20 proxy.http = None21 proxy.https = None22 none_ok(proxy.http, 'right proxy')23 none_ok(proxy.https, 'right proxy')24 ok(not proxy.is_needed('dummy.mojolicio.us'), 'no proxy needed')25 ok(proxy.is_needed('icio.us'), 'proxy needed')26 ok(proxy.is_needed('localhost'), 'proxy needed')27 setenv('HTTP_PROXY', None)28 setenv('HTTPS_PROXY', None)29 setenv('NO_PROXY', None)30 setenv('http_proxy', 'proxy.example.com')31 setenv('https_proxy', 'tunnel.example.com')32 setenv('no_proxy', 'localhost,localdomain,foo.com,example.com')33 proxy.detect()34 is_deeply_ok(proxy.no, ['localhost', 'localdomain', 'foo.com', 'example.com'], 'right list')35 is_ok(proxy.http, 'proxy.example.com', 'right proxy')36 is_ok(proxy.https, 'tunnel.example.com', 'right proxy')37 ok(proxy.is_needed('dummy.mojolicio.us'), 'proxy needed')38 ok(proxy.is_needed('icio.us'), 'proxy needed')39 ok(not proxy.is_needed('localhost'), 'proxy needed')40 ok(not proxy.is_needed('localhost.localdomain'), 'no proxy needed')41 ok(not proxy.is_needed('foo.com'), 'no proxy needed')42 ok(not proxy.is_needed('example.com'), 'no proxy needed')43 ok(not proxy.is_needed('www.example.com'), 'no proxy needed')44 ok(proxy.is_needed('www.example.com.com'), 'proxy needed')45 setenv('http_proxy', None)46 setenv('https_proxy', None)47 setenv('no_proxy', None)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run autotest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful