How to use test_counts method in Slash

Best Python code snippet using slash

test_similarity_deltas.py

Source:test_similarity_deltas.py Github

copy

Full Screen

1import unittest2import numpy as np3from collections import Counter, defaultdict 4from compare_trajectories import *5from store_counts import store_counts6TEST_OUTPUT = 'test_output'7SIMILARITY_ORDER = [NOT, SOMEWHAT, MOSTLY, VERY]8DELTA_THRESHOLD = 109class TestCompareTrajectories(unittest.TestCase):10 golden_keywords = {11 '1-1': 'for if print',12 '1-2': 'for if print'13 }14 def similarity_deltas_multiple_thresholds(self) -> None:15 thresholds = [i/DELTA_THRESHOLD for i in range(DELTA_THRESHOLD)]16 for threshold in thresholds:17 if not os.path.exists(TEST_OUTPUT):18 os.mkdir(TEST_OUTPUT)19 storage = os.path.join(TEST_OUTPUT, str(threshold))20 store_counts(storage=storage, threshold=threshold)21 for pb, keywords in TestCompareTrajectories.golden_keywords.items():22 print(pb)23 counts = count_keywords(keywords)24 sim_strs = []25 for i in range(len(counts)):26 test_count = list(counts)27 if counts[i] >= 1:28 test_count[i] -= 129 sim_strs.append(get_similarity_string(get_similarity_percent(test_count, pb, storage=storage)))30 test_count[i] += 231 else:32 test_count[i] += 133 sim_strs.append(get_similarity_string(get_similarity_percent(test_count, pb, storage=storage)))34 counts = Counter(sim_strs)35 36 output_path = os.path.join(storage, f'golden_deltas_{pb}.txt')37 if os.path.exists(output_path):38 open(output_path, 'w').close()39 40 with open(output_path, 'a') as f:41 f.write('keyword, count\n')42 for similarity_keyword in SIMILARITY_ORDER:43 f.write(similarity_keyword + ", ")44 try:45 count = counts[similarity_keyword]46 except KeyError:47 count = 048 print(f" {similarity_keyword}: {count}")49 f.write(str(count) + '\n')50 def similarity_deltas_multiple_parameters(self) -> None:51 if not os.path.exists(TEST_OUTPUT):52 os.mkdir(TEST_OUTPUT)53 thresholds = np.arange(0.0, 1.0, 0.05)54 scales = np.arange(0.5, 1.5, 0.05)55 for threshold in thresholds:56 for scale in scales:57 store_counts(storage=TEST_OUTPUT, threshold=threshold, std_scale=scale)58 for pb, keywords in TestCompareTrajectories.golden_keywords.items():59 counts = count_keywords(keywords)60 sim_counts = {}61 for n in range(1, 10):62 all_test_counts = []63 for i in range(len(counts)):64 test_counts = list(counts)65 test_counts[i] += n66 all_test_counts.append(test_counts)67 if counts[i] >= n:68 test_counts[i] -= n*269 all_test_counts.append(test_counts)70 for z in range(1, n):71 for i in range(len(counts) - 1):72 test_counts = list(counts)73 test_counts[i] += n-z74 test_counts[i+1] += z75 all_test_counts.append(test_counts)76 if counts[i] >= n-z and counts[i+1] >= z:77 test_counts[i] -= (n-z)*278 test_counts[i+1] -= z*279 all_test_counts.append(test_counts)80 for k in range(1, z):81 for i in range(len(counts) - 2):82 test_counts = list(counts)83 test_counts[i] += n-z-k84 test_counts[i+1] += z-k85 test_counts[i+2] += k86 all_test_counts.append(test_counts)87 if counts[i] >= n-z-k and counts[i+1] >= z-k and test_counts[i+2] >= k:88 test_counts[i] -= (n-z-k)*289 test_counts[i+1] -= (z-k)*290 test_counts[i+2] -= k*291 all_test_counts.append(test_counts)92 unique_test_counts = [list(x) for x in set(tuple(x) for x in all_test_counts)]93 sim_strs = [get_similarity_string(get_similarity_percent(c, pb, storage=TEST_OUTPUT)) for c in unique_test_counts]94 current_counts = dict(Counter(sim_strs)) 95 if n == 1 and 'very' not in current_counts:96 break97 elif n == 3 and len(current_counts) != 1:98 break99 sim_counts[n] = current_counts100 if sim_counts:101 print(pb, threshold, scale)102 print(" " + str(sim_counts) + '\n')103 def similarity_deltas(self, threshold: float = 0.0, scale: float = 1.0) -> None:104 store_counts(storage=TEST_OUTPUT, threshold=threshold, std_scale=scale)105 for pb, keywords in TestCompareTrajectories.golden_keywords.items():106 counts = count_keywords(keywords)107 sim_counts = {}108 for n in range(1, 10):109 all_test_counts = []110 111 for i in range(len(counts)):112 test_counts = list(counts)113 test_counts[i] += n114 all_test_counts.append(test_counts)115 if counts[i] >= n:116 test_counts[i] -= n*2117 all_test_counts.append(test_counts)118 for z in range(1, n):119 for i in range(len(counts) - 1):120 test_counts = list(counts)121 test_counts[i] += n-z122 test_counts[i+1] += z123 all_test_counts.append(test_counts)124 125 if counts[i] >= n-z and counts[i+1] >= z:126 test_counts[i] -= (n-z)*2127 test_counts[i+1] -= z*2128 all_test_counts.append(test_counts)129 for k in range(1, z):130 for i in range(len(counts) - 2):131 test_counts = list(counts)132 test_counts[i] += n-z-k133 test_counts[i+1] += z-k134 test_counts[i+2] += k135 all_test_counts.append(test_counts)136 137 if counts[i] >= n-z-k and counts[i+1] >= z-k and test_counts[i+2] >= k:138 test_counts[i] -= (n-z-k)*2139 test_counts[i+1] -= (z-k)*2140 test_counts[i+2] -= k*2141 all_test_counts.append(test_counts)142 unique_test_counts = [list(x) for x in set(tuple(x) for x in all_test_counts)]143 sim_strs = [get_similarity_string(get_similarity_percent(c, pb, storage=TEST_OUTPUT)) for c in unique_test_counts]144 current_counts = dict(Counter(sim_strs))145 sim_counts[n] = current_counts146 print(pb, sim_counts)147if __name__ == '__main__':148 tester = TestCompareTrajectories()149 tester.similarity_deltas_multiple_parameters()...

Full Screen

Full Screen

test_chunking.py

Source:test_chunking.py Github

copy

Full Screen

1from htrc_features.transformations import chunk_ends, chunk_even, chunk_last2from htrc_features.resolvers import LocalResolver3from htrc_features import Volume4from pathlib import Path5import random6import numpy as np7from collections import Counter8import pandas as pd9class TestChunking():10 def test_write_to_chunked_parquet_sectionless(self, tmpdir):11 dir = "tests/data"12 vol_in = Volume(id='aeu.ark:/13960/t1rf63t52', dir = str(dir), id_resolver = 'local')13 output = Volume(id='foo.123', format = 'parquet', mode = 'wb', id_resolver='local', dir = tmpdir )14 output.write(vol_in, token_kwargs = {"chunk": True,"drop_section": True, "pos":False})15 read = pd.read_parquet(Path(tmpdir, "foo.123.tokens.parquet")).reset_index()16 print(read.columns)17 assert("chunk" in read.columns)18 19 def test_write_to_chunked_parquet(self, tmpdir):20 dir = "tests/data"21 vol_in = Volume(id='aeu.ark:/13960/t1rf63t52', dir = str(dir), id_resolver = 'local')22 output = Volume(id='foo.123', dir = tmpdir, format = 'parquet', mode = 'wb')23 output.write(vol_in, token_kwargs = {"chunk": True})24 read = pd.read_parquet(Path(tmpdir, "foo.123.tokens.parquet")).reset_index()25 assert("chunk" in read.columns)26 27 def test_even_chunking(self):28 # All methods should solve it when pages are only one thing long.29 for method in chunk_ends, chunk_even, chunk_last:30 test_counts = np.ones(1000)31 target = 10032 c = Counter()33 for chunk, count in zip(method(test_counts, target), test_counts):34 c[chunk] += count35 assert(Counter(c.values()) == Counter({100:10}))36 def test_assymetric_chunking_end(self):37 # Previously this caused an infinite loop.38 for method in chunk_ends, chunk_even, chunk_last:39 test_counts = np.ones(1000)40 test_counts[-1] = 50041 target = 10042 c = Counter()43 for chunk, count in zip(method(test_counts, target), test_counts):44 c[chunk] += count45 assert(np.max([*c.values()]) == 500)46 assert(np.min([*c.values()]) == 99)47 def test_assymetric_chunking_middle(self):48 # in cases with page lengths like [1, 500, 2] the49 # outer chunks may try to eat the inner chunk at the same50 # time. The test assertion here is unimportant: the51 # goal is really not to raise an error.52 for method in chunk_ends, chunk_even, chunk_last:53 test_counts = np.ones(1000)54 test_counts[500] = 50055 target = 10056 c = Counter()57 for chunk, count in zip(method(test_counts, target), test_counts):58 c[chunk] += count59 assert(np.max([*c.values()]) <= 501)60 def test_tiny_chunk_size(self):61 # What if the chunk size is much smaller than any page?62 # The only reasonable response is 63 for method in chunk_ends, chunk_even, chunk_last:64 test_counts = np.array([500] * 10)65 target = 10066 c = Counter()67 for chunk, count in zip(method(test_counts, target), test_counts):68 c[chunk] += count69 assert(np.max([*c.values()]) == 500)...

Full Screen

Full Screen

memory_leak_check.py

Source:memory_leak_check.py Github

copy

Full Screen

1"""Test for memory leaks"""2from minpower import powersystems, solve3from .test_utils import solve_problem, make_loads_times4import objgraph5import inspect6def show_memory_backrefs(name):7 objgraph.show_backrefs(8 objgraph.by_type(name), filename="backrefs-{}.png".format(name)9 )10def show_memory_refs(name):11 try:12 obj = objgraph.by_type(name)[0]13 except IndexError:14 print(("no object of type", name))15 return16 objgraph.show_chain(17 objgraph.find_backref_chain(obj, inspect.ismodule),18 filename="chain-{}.png".format(name),19 )20def show_memory_growth():21 objgraph.show_growth()22def get_counts(prefix=""):23 test_types = [24 "Var",25 "Piecewise",26 "ScenarioTree",27 "ScenarioTreeNode",28 "Scenario",29 "_SetContainer",30 "_ConstraintArray",31 ]32 # use objgraph to check if these pyomo objects still exist33 objects = {}34 test_counts = {}35 for name in test_types:36 objects[name] = objgraph.by_type(name)37 test_counts[name] = len(objects[name])38 if True:39 for name in test_types:40 if test_counts[name] == 0:41 continue42 else:43 obj = objects[name][0]44 fname = prefix + "-" + "objgraph-{}-".format(name)45 objgraph.show_refs([obj], filename=fname + "refs.png") # too_many=50,46 objgraph.show_backrefs([obj], filename=fname + "backref.png")47 objgraph.show_chain(48 objgraph.find_backref_chain(obj, inspect.ismodule),49 filename=fname + "chain.png",50 )51 return test_counts52def leak_on_reset():53 # create a problem54 loads_times = make_loads_times(Pdt=[20, 30, 40, 50])55 generators = [56 powersystems.Generator(name="expensive", costcurveequation="30P+0.01P^2"),57 powersystems.Generator(name="cheap", costcurveequation="20P+0.005P^2"),58 ]59 # solve it60 power_system, times = solve_problem(generators, **loads_times)61 # reset the model - no pyomo components should persist62 power_system.reset_model()63 test_counts = get_counts("uc")64 assert sum(test_counts.values()) == 065def leak_on_stochastic_reset():66 solve.solve_problem(67 datadir="./uc-stochastic",68 shell=False,69 csv=False,70 hours_commitment=24,71 hours_commitment_overlap=12,72 get_duals=False,73 Nscenarios=None,74 )75 test_counts = get_counts("uc-stochastic")76 assert sum(test_counts.values()) == 077if __name__ == "__main__":...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Slash automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful