How to use get_cluster_count method in lisa

Best Python code snippet using lisa_python

analyze_serialized.py

Source:analyze_serialized.py Github

copy

Full Screen

...7from math import sqrt, degrees8from analysis.minipix import *9cluster_types = ["SMALL_BLOB", "HEAVY_TRACK", "HEAVY_BLOB", "MEDIUM_BLOB", "STRAIGHT_TRACK", "LIGHT_TRACK"]10utc = pytz.UTC11def get_cluster_count(file, data):12 for frame in range(file.num_frames):13 frame_data = data.get(frame, None)14 if frame_data:15 acq_time = data[frame]["acq_time"].replace(tzinfo=utc)16 clusters = data[frame]["clusters"]17 # Cluster types18 types = [cluster[5] for cluster in clusters]19 type_freq = {c_type: types.count(c_type) for c_type in cluster_types}20 print(acq_time, end=",")21 print(",".join(map(str, type_freq.values())))22 else:23 acq_time = file.timestamps[frame]24 print(acq_time, end=",")25 print("0,0,0,0,0,0")26def get_frame_energy(file, data):27 for frame in range(file.num_frames):28 frame_data = data.get(frame, None)29 if frame_data:30 acq_time = data[frame]["acq_time"].replace(tzinfo=utc)31 clusters = data[frame]["clusters"]32 # Total energy33 cluster_energy = [cluster[2] for cluster in clusters]34 total_energy = sum(cluster_energy)35 print(acq_time, end=",")36 print(total_energy)37 else:38 acq_time = file.timestamps[frame]39 print(acq_time, end=",")40 print(0)41def get_LET(file, data):42 for frame in range(file.num_frames):43 frame_data = data.get(frame, None)44 if frame_data:45 acq_time = data[frame]["acq_time"].replace(tzinfo=utc)46 clusters = data[frame]["clusters"]47 # Total energy48 for cluster in clusters:49 trklen = cluster[7]50 truetrkln = sqrt((55*trklen)**2 + 500**2)51 aoi = degrees(arctan(500/(55*trklen)))52 print("{},{}".format(truetrkln, aoi), end=",")53 total_energy = sum(cluster_energy)54 print(acq_time)55 else:56 acq_time = file.timestamps[frame]57 print(acq_time, end=",")58 print(0)59def get_cluster_energy(file, data):60 cluster_types = {61 "SMALL_BLOB": [],62 "HEAVY_TRACK": [],63 "HEAVY_BLOB": [],64 "MEDIUM_BLOB": [],65 "STRAIGHT_TRACK": [],66 "LIGHT_TRACK": []67 }68 for frame in range(file.num_frames):69 frame_data = data.get(frame, None)70 if frame_data:71 acq_time = data[frame]["acq_time"].replace(tzinfo=utc)72 clusters = data[frame]["clusters"]73 for cluster in clusters:74 type = cluster[5]75 energy = cluster[2]76 cluster_types[type].append((str(acq_time), type, str(energy)))77 for cluster_type in cluster_types:78 filename = cluster_type + ".csv"79 csvfile = open(filename, "w")80 csvwriter = csv.writer(csvfile)81 csvwriter.writerow(["Timestamp", "Type", "Energy"])82 for cluster in cluster_types[cluster_type]:83 csvwriter.writerow(cluster)84def get_energy_distribution(file, data):85 cluster_types = {86 "SMALL_BLOB": [],87 "HEAVY_TRACK": [],88 "HEAVY_BLOB": [],89 "MEDIUM_BLOB": [],90 "STRAIGHT_TRACK": [],91 "LIGHT_TRACK": []92 }93 for frame in range(file.num_frames):94 frame_data = data.get(frame, None)95 if frame_data:96 acq_time = data[frame]["acq_time"].replace(tzinfo=utc)97 clusters = data[frame]["clusters"]98 for cluster in clusters:99 type = cluster[5]100 energy = cluster[2]101 cluster_types[type].append((str(acq_time), type, str(energy)))102 """103 for cluster_type in cluster_types:104 filename = cluster_type + "_dist" + ".csv"105 csvfile = open(filename, "w")106 csvwriter = csv.writer(csvfile)107 energy = [float(cluster[2]) for cluster in cluster_types[cluster_type]]108 energy_distribution = normalized_distribution(energy, 50)109 csvwriter.writerow(["Range", "Count"])110 for r in energy_distribution:111 csvwriter.writerow([r, energy_distribution[r]])112 """113 fig, ax = plt.subplots()114 for cluster_type in cluster_types:115 energy = [float(cluster[2]) for cluster in cluster_types[cluster_type]]116 mu = sum(energy)/len(energy)117 sigma = std(energy)118 n, bins, patches = ax.hist(energy, bins=arange(0, 10000, 5))119 y = mlab.normpdf(bins, mu, sigma)120 ax.plot(bins, y, '--')121 ax.set_xlabel('Energy')122 ax.set_ylabel('Probability Density')123 ax.set_title('Light Track Frequency Distribution')124 fig.tight_layout()125 plt.show()126def normalized_distribution(data, window):127 data.sort()128 distributed_data = {}129 range = 0130 for point in data:131 if not distributed_data.get(range, None):132 distributed_data[range] = 0133 if point < range + window:134 distributed_data[range] += 1135 else:136 range += window137 # Normalize the distribution138 total = sum(distributed_data.values())139 for key in distributed_data:140 distributed_data[key] /= total141 return distributed_data142if __name__ == "__main__":143 serialized = open('clusters.pkl', 'rb')144 data = pickle.load(serialized)145 pmffile = PmfFile("HASPDATA/thurs_test36.pmf")146 pmffile.load_dsc()147 get_cluster_count(pmffile, data)148 # get_energy_distribution(pmffile, data)149 #get_cluster_energy(file, data)150 # get_cluster_count(file, data)...

Full Screen

Full Screen

external.py

Source:external.py Github

copy

Full Screen

1from math import log2# count the size of a cluster3def get_cluster_count(data, index):4 num_class = len(data)5 count = 06 for i in range(0, num_class):7 count += data[i][index]8 return count9# get a list of cluster counts10def get_all_cluster_counts(data):11 counts = []12 num_cluster = len(data[0])13 num_class = len(data)14 for i in range(0, num_cluster):15 counts.append(get_cluster_count(data, i))16 return counts17# count the size of each class18def get_class_count(data, index):19 num_cluster = len(data[0])20 count = 021 for i in range(0, num_cluster):22 count += data[index][i]23 return count24# get a list of class counts25def get_all_class_counts(data):26 counts = []27 num_class = len(data)28 num_cluster = len(data[0])29 for i in range(0, num_class):30 counts.append(get_class_count(data, i))31 return counts32# total value in the list33def get_all_counts(count_list):34 total = 0.035 for i in range(0, len(count_list)):36 total += count_list[i]37 return total38# compute n chooses 239def get_combination_two(n):40 return n * (n - 1) / 2.041# compute nomalized mutual information42def nmi(data):43 num_class = len(data)44 num_cluster = len(data[0])45 class_counts = get_all_class_counts(data)46 cluster_counts = get_all_cluster_counts(data)47 N = get_all_counts(class_counts)48 #print "N: {0}".format(N)49 Hc = 0.050 for j in range(0, num_class):51 c = class_counts[j]52 Hc -= (c / N) * log(c / N, 2)53 #print "Hc: {0}".format(Hc)54 Hw = 0.055 I = 0.056 for k in range(0, num_cluster):57 w = cluster_counts[k]58 Hw -= (w / N) * log(w / N, 2)59 for j in range(0, num_class):60 i = 0.061 wc = data[j][k]62 if wc > 0:63 i = (wc / N) * log(N * wc / (class_counts[j] * w), 2)64 I += i65 #print "wc: {0}, i: {1}".format(wc, i)66 #print "Hw: {0}".format(Hw)67 #print "I: {0}".format(I)68 return I * 2 / (Hw + Hc)69# compute the Rand index which penalizes both false positive and false negative decisions during clustering70# TP - assigns two similar documents to the same cluster71# TN - assigns two dissimilar documents to different clusters.72# FP - assigns two dissimilar documents to the same cluster.73# FN - assigns two similar documents to different clusters.74def rand_index(data):75 num_class = len(data)76 num_cluster = len(data[0])77 TP_FP = 0.078 TP = 0.079 cluster_counts = []80 for k in range(0, num_cluster):81 w = get_cluster_count(data, k)82 cluster_counts.append(w)83 c = get_combination_two(w) # choose two from each clusters84 TP_FP += c85 #print "c: {0}, TP+FP: {1}".format(c, TP_FP)86 for j in range(0, num_class):87 wc = data[j][k]88 if wc > 1:89 TP += get_combination_two(wc) # choose two from each class within a cluster90 FP = TP_FP - TP91 #print cluster_counts92 TN_FN = 0.093 for i in range(0, num_cluster):94 for j in range(i + 1, num_cluster):95 TN_FN += cluster_counts[i] * cluster_counts[j] # choose one from different clusters...

Full Screen

Full Screen

kmeans_clustering.py

Source:kmeans_clustering.py Github

copy

Full Screen

...20plt.xlabel('k')21plt.ylim(0, 425)22plt.savefig('kmeans_clustering_error.png')23kmeans = KMeans(n_clusters=4, random_state=0).fit(data)24def get_cluster_count(cluster_num):25 return list(kmeans.labels_).count(cluster_num)26result_df['cluster'] = list(kmeans.labels_)27result_df['count'] = result_df['cluster'].apply(get_cluster_count)28cluster_df = result_df.groupby(['cluster']).mean()...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run lisa automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful