How to use _sanity_check method in autotest

Best Python code snippet using autotest_python

graph.py

Source:graph.py Github

copy

Full Screen

...20 def __init__(self, numVertices, directed=False):21 super(AdjacencyMatrixGraph, self).__init__(numVertices, directed)22 self.matrix = np.zeros((numVertices, numVertices))23 def add_edge(self, v1, v2, weight=1):24 self._sanity_check(v1)25 self._sanity_check(v2)26 if v1 == v2:27 raise ValueError("both vertices are the same")28 if weight < 1:29 raise ValueError("weight number is incorrect")30 self.matrix[v1][v2] = weight31 if self.directed is False:32 self.matrix[v2][v1] = weight33 def get_adjacent_vertices(self, v):34 self._sanity_check(v)35 adjacent_vertices = []36 for i in range(self.numVertices):37 if self.matrix[v][i] != 0:38 adjacent_vertices.append(i)39 return adjacent_vertices40 def get_indegree(self, v):41 self._sanity_check(v)42 indegree = 043 for i in range(self.numVertices):44 if self.matrix[i][v] != 0:45 indegree += 146 return indegree47 def get_edge_weight(self, v1, v2):48 return self.matrix[v1][v2]49 def display_matrix(self):50 print(self.matrix)51 def display(self):52 for v in range(self.numVertices):53 for v_a in range(self.numVertices):54 if self.matrix[v][v_a] != 0:55 print(v, "-->", v_a)56 def _sanity_check(self, v):57 if v >= self.numVertices or v < 0:58 raise ValueError("vertex number is incorrect")59# ################################################# AdjacencySetGraph ##################################################60# unweighted (all weights are 1)61class Vertex:62 """graph vertex for AdjacencySetGraph"""63 def __init__(self, vertex_id):64 self.vertex_id = vertex_id65 self.adjacent = set()66 def add_edge(self, v):67 if v == self.vertex_id:68 raise ValueError(f"the vertex {v} cant be adjacent to itself")69 self.adjacent.add(v)70 def get_adjacent_vertices(self):71 return sorted(self.adjacent)72class AdjacencySetGraph(Graph):73 def __init__(self, num_vertices, directed=False):74 super(AdjacencySetGraph, self).__init__(num_vertices, directed)75 self.vertices = []76 for i in range(self.numVertices):77 self.vertices.append(Vertex(i))78 def add_edge(self, v1, v2, weight=1):79 self._sanity_check(v1)80 self._sanity_check(v2)81 if v1 == v2:82 raise ValueError("both vertices are the same")83 if weight != 1:84 raise ValueError("only weight=1 is currently supported")85 self.vertices[v1].add_edge(v2)86 if self.directed is False:87 self.vertices[v2].add_edge(v1)88 def get_adjacent_vertices(self, v):89 return self.vertices[v].get_adjacent_vertices()90 def get_indegree(self, v):91 count = 092 for vertex in self.vertices:93 if vertex.vertex_id != v:94 if v in vertex.adjacent:95 count += 196 return count97 def get_edge_weight(self, v1, v2):98 return 1 # only weight=1 is currently supported99 def display(self):100 for v in self.vertices:101 print(v.vertex_id, "-->", sorted(v.adjacent))102 def _sanity_check(self, v):103 if v >= self.numVertices or v < 0:...

Full Screen

Full Screen

test_datasets.py

Source:test_datasets.py Github

copy

Full Screen

...19 assert val.n_ood != 020 assert val.n_indomain < len(val)21 assert test.n_ood != 022 assert test.n_indomain < len(test)23def _sanity_check(train_dataset, val_dataset, test_dataset):24 def test_single(dataset):25 assert len(dataset.raw_texts) > 126 assert len(dataset.raw_texts) == len(dataset.raw_labels)27 assert dataset.vectorized_texts28 for idx in range(0, len(dataset.raw_texts), 1000):29 assert " ".join(dataset.tokenized_texts[idx]) == " ".join(dataset.tokenized_texts[idx]).lower()30 test_single(train_dataset)31 test_single(val_dataset)32 test_single(test_dataset)33 label_vocab_test(train_dataset, val_dataset)34 label_vocab_test(train_dataset, test_dataset)35 ood_test(train_dataset, val_dataset, test_dataset)36def _vectorize_simple(train_dataset, val_dataset, test_dataset):37 vocab = Vocab()38 vocab.build(train_dataset.tokenized_texts + val_dataset.tokenized_texts)39 train_dataset.vectorize_texts(vocab)40 val_dataset.vectorize_texts(vocab)41 test_dataset.vectorize_texts(vocab)42@pytest.mark.parametrize("K,version", itertools.product([25, 75], [0, 1, 2, 3, 4]))43def test_snips_dataset_simple(K, version):44 datasets, vocab = get_dataset_simple(f'snips_{K}', version=version)45 train_dataset, val_dataset, test_dataset = datasets46 _vectorize_simple(train_dataset, val_dataset, test_dataset)47 _sanity_check(train_dataset, val_dataset, test_dataset)48@pytest.mark.parametrize("coarse", [True, False])49def test_rostd_dataset_simple(coarse):50 dataset_name = "rostd_coarse" if coarse else "rostd"51 datasets, vocab = get_dataset_simple(dataset_name=dataset_name)52 train_dataset, val_dataset, test_dataset = datasets53 _vectorize_simple(train_dataset, val_dataset, test_dataset)54 _sanity_check(train_dataset, val_dataset, test_dataset)55 if coarse:56 assert len(train_dataset.label_vocab) == 357 else:58 assert len(train_dataset.label_vocab) == 1259def test_clinc_dataset_simple():60 datasets, vocab = get_dataset_simple('clinc')61 train_dataset, val_dataset, test_dataset = datasets62 _vectorize_simple(train_dataset, val_dataset, test_dataset)63 _sanity_check(train_dataset, val_dataset, test_dataset)64@pytest.fixture(scope="session")65def bert_tok():66 return BertTokenizer.from_pretrained("bert-base-uncased")67@pytest.mark.transformer68def test_snips_dataset_bert(bert_tok):69 train_dataset, val_dataset, test_dataset = get_dataset_transformers(bert_tok, 'snips_75', version=0)70 _sanity_check(train_dataset, val_dataset, test_dataset)71@pytest.mark.transformer72def test_rostd_dataset_bert(bert_tok):73 train_dataset, val_dataset, test_dataset = get_dataset_transformers(bert_tok, 'rostd')74 _sanity_check(train_dataset, val_dataset, test_dataset)75@pytest.mark.transformer76def test_clinc_dataset_bert(bert_tok):77 train_dataset, val_dataset, test_dataset = get_dataset_transformers(bert_tok, 'clinc')...

Full Screen

Full Screen

simple_pkpd.py

Source:simple_pkpd.py Github

copy

Full Screen

1import numpy as np2import pandas as pd3from ...data import Dataset, EventSamples, TimeSeriesSamples4from . import _simple_pkpd5_SANITY_CHECK_ON = False6_sanity_check = dict()7def simple_pkpd_dataset(8 n_timesteps: int = 30,9 time_index_treatment_event: int = 25,10 n_control_samples: int = 200,11 n_treated_samples: int = 200,12 seed: int = 100,13):14 print(f"Generating simple PKPD dataset with random seed {seed}...")15 hidden_confounder: int = 016 (x_full, t_full, mask_full, batch_ind_full, y_full, y_control, y_mask_full, y_full_all,) = _simple_pkpd.generate(17 seed=seed,18 train_step=time_index_treatment_event,19 step=n_timesteps,20 control_sample=n_control_samples,21 treatment_sample=n_treated_samples,22 hidden_confounder=hidden_confounder,23 )24 if _SANITY_CHECK_ON:25 _sanity_check["x_full"] = x_full26 _sanity_check["t_full"] = t_full27 _sanity_check["mask_full"] = mask_full28 _sanity_check["batch_ind_full"] = batch_ind_full29 _sanity_check["y_full"] = y_full30 _sanity_check["y_control"] = y_control31 _sanity_check["y_mask_full"] = y_mask_full32 x_everything = np.concatenate([x_full, y_full_all], axis=0)33 assert (x_everything[:time_index_treatment_event, :, :] == x_full).all()34 assert (x_everything[time_index_treatment_event:, :, [2]] == y_full).all()35 assert (x_everything[time_index_treatment_event:, :n_control_samples, [2]] == y_control).all()36 sample_index = batch_ind_full.astype(int)37 tss = TimeSeriesSamples(38 data=[39 pd.DataFrame(data=x_everything[:, idx, :], columns=["k_in", "p", "y"], index=range(n_timesteps))40 for idx in sample_index41 ]42 )43 treat_event_feature = np.zeros(shape=(n_control_samples + n_treated_samples,), dtype=float)44 treat_event_feature[n_control_samples:] = 1.045 df = pd.DataFrame(46 data={47 "si": sample_index,48 "ti": [time_index_treatment_event] * (n_control_samples + n_treated_samples),49 "a": treat_event_feature,50 }51 )52 es = EventSamples.from_df(data=df, column_sample_index="si", column_time_index="ti")...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run autotest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful