How to use norm_row method in Testify

Best Python code snippet using Testify_python

motifEnrichment.py

Source:motifEnrichment.py Github

copy

Full Screen

1""" ATAC-Seq peaks motif enrichment analysis"""2import pandas as pd3import os4import glob5import seaborn as sns6import matplotlib.pyplot as plt7# read in all results files in folder8# def motif_enrichment(known_results_folder):9known_results_folder = "C:\\Users\\libin\\UCSF\\motif_analysis\\200_HOCOMOCO_Dec.11\\set1"10size = 20011list_of_result = glob.glob(known_results_folder + "\\*.txt")12cell_types = [os.path.split(path)[-1][13:-4] for path in list_of_result]13# 1e-749 will be read in as 0 if not read in as str14results = [pd.read_table(result, sep="\t", dtype=str) for result in list_of_result]15# data cleaning; rename motif name16# for hocomoco dataset17for res in results:18 res["Motif Name"] = res["Motif Name"].str.extract(r"([A-Za-z0-9]+)_.+", expand=False)19# for combined hocomoco20results_cleaned = [result.filter(["Motif Name", "Log P-value"], axis=1).drop_duplicates(subset="Motif Name") for result in results]21# results_cleaned = [result.filter(["Motif Name", "Log P-value"], axis=1) for result in results]22# extract top motifs23sig_results_cleaned = [result.iloc[0:51,:] for result in results_cleaned]24for (c, r) in zip(cell_types, sig_results_cleaned):25 # for homer dataset26 # r["Motif Name"] = r["Motif Name"].str.extract(r"^([^\/]+)", expand=False)27 r.rename(columns={"Log P-value": c}, inplace=True)28 r.to_csv(known_results_folder + "\\" + c + str(size) + ".csv", sep=",", index=False)29# find common sig motifs30[sig_result.set_index("Motif Name", inplace=True) for sig_result in sig_results_cleaned]31common_motif = pd.concat(sig_results_cleaned, axis=1, join="inner")32common_motif.to_csv(known_results_folder + "\\common_motif.csv")33# find cell-type specific motifs34#[re.set_index("Motif Name", inplace=True) for re in results_cleaned]35#for (c1, r1) in zip(cell_types, results_cleaned):36# common_motif = common_motif.reset_index()37# drop motif name for plotting38# common_motif_1 = common_motif.drop(["Motif Name"], axis = 1)39common_motif = common_motif[common_motif.columns].astype(float)40# norm_common_motif = ((common_motif - common_motif.min())/ (common_motif.max() - common_motif.min())) * -10041# average = common_motif.mean(axis=0).mean()42# midpoint = 0 - (common_motif.values.max() - common_motif.values.min()) / 243## Below are ploting parts44# normalize column wide45norm_col = common_motif.sub(common_motif.mean(axis=0),axis=1)46norm_col = norm_col.div(common_motif.std(axis=0),axis=1)47norm_row = common_motif.sub(common_motif.mean(axis=1),axis=0)48norm_row = norm_row.div(common_motif.std(axis=1),axis=0)49average_row = norm_row.mean(axis=0).mean()50average_col = norm_col.mean(axis=0).mean()51average_all = common_motif.mean(axis=0).mean()52plt.figure(figsize=(11,15))53ax = sns.heatmap(norm_col, vmin=norm_col.values.min(), vmax=norm_col.values.max(), cmap="RdBu", center=average_col, annot=True)54plt.title("Column-Normalized Log P-value", y =1.01, x=1.1, fontsize =13)55for item in ax.get_xticklabels():56 item.set_rotation(45)57 58plt.figure(figsize=(11,15))59ax1 = sns.heatmap(common_motif, vmin=common_motif.values.min(), vmax=common_motif.values.max(), cmap = "RdBu", center = average_all, annot=True)60plt.title("Log P-value", y =1.01, x=1.1, fontsize =13)61for item in ax.get_xticklabels():62 item.set_rotation(45)63 64plt.figure(figsize=(11,15))65ax2 = sns.heatmap(norm_row, vmin=norm_row.values.min(), vmax=norm_row.values.max(), center = average_row, cmap="RdBu", annot=True)66plt.title("Row-Normalized Log P-value", y =1.01, x=1.1, fontsize =13)67for item in ax.get_xticklabels():68 item.set_rotation(45) 69# plt.figure(figsize=(5,7))70cluster_row = sns.clustermap(common_motif, cmap = "RdBu", standard_scale=0, figsize=(11,15)) 71plt.title("Row-Normalized cluster", y=1.2, x=1.1, fontsize=13)72cluster_column = sns.clustermap(common_motif, standard_scale=1, figsize=(11,15))73plt.title("Col-Normalized cluster", y=1.2, x=1.1, fontsize=13)...

Full Screen

Full Screen

items.py

Source:items.py Github

copy

Full Screen

1"""Define Scrapy Item corresponding to a single WARN notice entry"""2import scrapy3from scrapy.item import Item4from scrapy.loader import ItemLoader5class Entry(Item):6 # Metadata7 state_name = scrapy.Field()8 timestamp = scrapy.Field()9 url = scrapy.Field()10 # WARN notice data provided for the state, as dictionaries11 fields = scrapy.Field()12 normalized_fields = scrapy.Field() # Will be populated by get_normalized_fields13class UnpackedEntry(dict, Item):14 """Flexible Item whose items can be populated using an item loader15 16 This is used to unpack fields and normalized_fields from the Entry item,17 for use in saving these to individual CSV files. 18 19 Note that for the exporting to work, the Fields Items generated by a 20 particular state must be consistent in the actual field names!"""21 pass22def unpack_entry_fields_as_items(item):23 """Unpack dictionaries defined in Entry item (fields and normalized_fields)24 and generate new UnpackedEntry items from them25 These will be processed by separate pipelines from the Entry items26 27 Parameters:28 item: fully loaded Entry item29 30 Returns:31 rawItem: UnpackedEntry item with each value from item.fields in its own Field32 normItem: UnpackedEntry item with each value from item.normalized_fields in its own Field33 """34 35 assert isinstance(item, Entry)36 rawLoader = ItemLoader(UnpackedEntry())37 normLoader = ItemLoader(UnpackedEntry())38 rawLoader.add_value('state_name', item.state_name)39 normLoader.add_value('state_name', item.state_name)40 for key, value in item.fields:41 rawLoader.add_value(key, value)42 43 for key, value in item.normalized_fields:44 normLoader.add_value(key, value)45 rawItem = rawLoader.load_item()46 normItem = normLoader.load_item()47 return (rawItem, normItem)48def get_normalized_fields(name_dict, row):49 """Grab and rename columns which are to be normalized across states50 51 Parameters:52 names: dictionary of normalized_name:original_name pairs53 row: pd.Series containing original_name:value fields54 Returns:55 norm_row: pd.Series containing normalized_name:value fields56 """57 columns_to_keep = [x for x in name_dict.values() if x] # Some columns may not be present58 norm_row = row[row.index.intersection(columns_to_keep)]59 name_dict_inverse = {v: k for (k, v) in name_dict.items()}60 norm_row.rename(index=name_dict_inverse, inplace=True)61 return norm_row...

Full Screen

Full Screen

level_3_doomsday_fuel.py

Source:level_3_doomsday_fuel.py Github

copy

Full Screen

1from fractions import Fraction, gcd2def find_lcm(denominator_list):3 lcm = 14 for i in denominator_list:5 lcm = lcm * i // gcd(lcm, i)6 return lcm7def normalize_matrix(m):8 matrix = []9 for row in m:10 row_sum = sum(row)11 norm_row = []12 for col in row:13 if row_sum > 0:14 # norm_row.append(Fraction(col, row_sum))15 norm_row.append(float(col) / float(row_sum))16 else:17 norm_row.append(0)18 matrix.append(norm_row)19 return matrix20def dot(m1, m2):21 return [22 [sum(x * y for x, y in zip(m1_r, m2_c)) for m2_c in zip(*m2)] for m1_r in m123 ]24def solution(m):25 matrix = normalize_matrix(m)26 terminal_states = [i for i, row in enumerate(m) if sum(row) == 0]27 # s0 is a terminal state, what to do?28 if terminal_states[0] == 0:29 return [1, 1]30 states = [[0] for _ in range(len(m))]31 states[0][0] = 132 state_acc = [0 for _ in range(len(m))]33 for _ in range(1000):34 states = dot(states, matrix)35 state_acc = [state_acc[i] + states[0][i] for i in range(len(states))]36 fractions = []37 for terminal_state in terminal_states:38 node_prob = state_acc[terminal_state]39 numerator, denominator = float(node_prob).as_integer_ratio()40 fractions.append(Fraction(numerator, denominator).limit_denominator())41 least_common_multiple = find_lcm([i.denominator for i in fractions])42 probabilities = [int(fraction.numerator * (float(least_common_multiple) / float(fraction.denominator))) for fraction in fractions]43 probabilities.append(int(least_common_multiple))44 return probabilities45print(46 solution([47 [0, 2, 1, 0, 0],48 [0, 0, 0, 3, 4],49 [0, 0, 0, 0, 0],50 [0, 0, 0, 0, 0],51 [0, 0, 0, 0, 0]52 ])53)54print(55 solution([56 [0, 1, 0, 0, 0, 1], # s0, the initial state, goes to s1 and s5 with equal probability57 [4, 0, 0, 3, 2, 0], # s1 can become s0, s3, or s4, but with different probabilities58 [0, 0, 0, 0, 0, 0], # s2 is terminal, and unreachable (never observed in practice)59 [0, 0, 0, 0, 0, 0], # s3 is terminal60 [0, 0, 0, 0, 0, 0], # s4 is terminal61 [0, 0, 0, 0, 0, 0], # s5 is terminal62 ])...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Testify automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful