How to use analysis2 method in autotest

Best Python code snippet using autotest_python

makedatafile_t2kcc0pi.py

Source:makedatafile_t2kcc0pi.py Github

copy

Full Screen

1from ROOT import *2from array import *3def GetMiddle(mystr):4 lims = mystr.strip().split(" - ")5 val = (float(lims[0]) + float(lims[1]))/2.06 return val7def GetLowEdge(mystr):8 lims = mystr.strip().split(" - ")9 val = (float(lims[0]) + 0.00001)10 11 return val12def GetHighEdge(mystr):13 14 lims = mystr.strip().split(" - ")15 val = (float(lims[1]) - 0.00001)16 17 return val18 19def GetIndex(mystr):20 lims = mystr.split("-")21 return int(lims[0]), int(lims[1])22outfile = TFile("T2K_CC0PI_2DPmuCosmu_Data.root","RECREATE")23# ANALYSIS I24#______________________________25xedge = [0.0, 0.3, 0.4, 0.5, 0.65, 0.8, 0.95, 1.1, 1.25, 1.5, 2.0, 3.0, 5.0, 30.0]26yedge = [-1.0, 0.0, 0.6, 0.7, 0.8, 0.85, 0.9, 0.94, 0.98, 1.0]27datahist = TH2D("analysis1_data","analysis1_data",28 len(xedge)-1, array('f',xedge),29 len(yedge)-1, array('f',yedge))30 31maphist = datahist.Clone("analysis1_map")32maphist.SetTitle("analysis1_map")33 34counthist = datahist.Clone("analysis1_entrycount")35datapoly = TH2Poly("datapoly","datapoly", 0.0,30.0, -1.0, 1.0)36hist = None37binedges = []38histedgeslist = []39xsecvals = []40histxseclist = []41binlimits = [3,8,15,22,30,39,47,58,67]42with open("cross-section_analysisI.txt") as f:43 count = 044 for line in f:45 count += 146 47 if (count < 4): continue48 data = line.strip().split("|")49 if (len(data) < 1): continue50 ibin = int( data[0] ) + 151 52 xval = round(float(GetLowEdge( data[2] )),4)53 yval = round(float(GetLowEdge( data[1] )),4)54 xhig = round(float(GetHighEdge( data[2] )),4)55 yhig = round(float(GetHighEdge( data[1] )),4)56 57 xsec = float( data[3] ) * 1E-3858 datapoly.AddBin( xval, yval, xhig, yhig )59 datapoly.SetBinContent( datapoly.GetNumberOfBins(), xsec)60 binedges.append( xval )61 xsecvals.append( xsec )62 if ibin in binlimits: 63 binedges.append( xhig )64 histedgeslist.append(binedges)65 histxseclist.append(xsecvals)66 binedges = []67 xsecvals = []68 datahist.Fill(xval, yval, xsec)69 counthist.Fill(xval, yval, 1.0)70 for i in range(maphist.GetNbinsX()):71 for j in range(maphist.GetNbinsY()):72 xcent = maphist.GetXaxis().GetBinCenter(i+1)73 ycent = maphist.GetYaxis().GetBinCenter(j+1)74 if (xcent > xval and xcent < xhig and75 ycent > yval and ycent < yhig):76 maphist.SetBinContent(i+1,j+1, ibin)77# Get Covariances (keep in 1E-38 cm^2) \78nbins = 6779statcov = TH2D("analysis1_statcov","analysis1_statcov", nbins, 0.0, float(nbins), nbins, 0.0, float(nbins));80systcov = TH2D("analysis1_systcov","analysis1_systcov", nbins, 0.0, float(nbins), nbins, 0.0, float(nbins));81normcov = TH2D("analysis1_normcov","analysis1_normcov", nbins, 0.0, float(nbins), nbins, 0.0, float(nbins));82totcov = TH2D("analysis1_totcov","analysis1_totcov", nbins, 0.0, float(nbins), nbins, 0.0, float(nbins));83with open("covariance_statisticUncertainty_analysisI.txt") as f:84 count = 085 for line in f:86 count += 187 if (count < 4): continue88 data = line.strip().split("|")89 if (len(data) < 1): continue90 xi, yi = GetIndex(data[0])91 cov = float(data[1])92 statcov.SetBinContent(xi + 1, yi + 1, cov)93with open("covariance_shapeSystematics_analysisI.txt") as f:94 count = 095 for line in f:96 count += 197 if (count < 4): continue98 data = line.strip().split("|")99 if (len(data) < 1): continue100 xi, yi = GetIndex(data[0])101 cov = float(data[1])102 systcov.SetBinContent(xi + 1, yi + 1, cov)103with open("covariance_fluxNormalizationSystematics_analysisI.txt") as f:104 count = 0105 for line in f:106 count += 1107 if (count < 4): continue108 data = line.strip().split("|")109 if (len(data) < 1): continue110 xi, yi = GetIndex(data[0])111 cov = float(data[1])112 normcov.SetBinContent(xi + 1, yi + 1, cov)113totcov.Add(systcov)114totcov.Add(statcov)115totcov.Add(normcov)116data1D = TH1D("datahist","datahist", datapoly.GetNumberOfBins(), 0.0, float(datapoly.GetNumberOfBins()));117for i in range(datapoly.GetNumberOfBins()):118 data1D.SetBinContent(i+1, datapoly.GetBinContent(i+1));119 data1D.SetBinError(i+1, sqrt(totcov.GetBinContent(i+1,i+1))*1E-38)120outfile.cd()121for i, obj in enumerate(histedgeslist):122 print obj123 hist = TH1D("dataslice_" + str(i), "dataslice_" + str(i), len(obj)-1, array('f',obj))124 for j in range(hist.GetNbinsX()):125 hist.SetBinContent(j+1, histxseclist[i][j])126 hist.GetXaxis().SetRangeUser(obj[0], obj[len(obj)-2])127 hist.Draw("HIST")128 gPad.Update()129 hist.SetNameTitle("dataslice_" + str(i),"dataslice_" + str(i))130 hist.Write()131outfile.cd()132datahist.Write()133counthist.Write()134maphist.Write()135datapoly.Write()136data1D.Write()137statcov.Write()138systcov.Write()139totcov.Write()140normcov.Write()141# ANALYSIS II142#______________________________143xedge = [0.2, 0.35, 0.5, 0.65, 0.8, 0.95, 1.1, 1.25, 1.5, 2.0, 3.0, 5.0, 30.0]144yedge = [0.6, 0.7, 0.8, 0.85, 0.9, 0.925, 0.95, 0.975, 1.0]145datahist = TH2D("analysis2_data","analysis2_data",146 len(xedge)-1, array('f',xedge),147 len(yedge)-1, array('f',yedge))148maphist = datahist.Clone("analysis2_map")149maphist.SetTitle("analysis2_map")150counthist = datahist.Clone("analysis2_entrycount")151# Get Data Entries152entries = []153count = 0154with open("rps_crossSection_analysis2.txt") as f:155 for line in f:156 count += 1157 if (count < 4): continue158 data = line.strip().split("|")159 if (len(data) < 1): continue160 ibin = int( data[0] ) + 1161 xval = GetMiddle( data[2] )162 yval = GetMiddle( data[1] )163 xsec = float( data[3] ) * 1E-38164 datahist.Fill(xval, yval, xsec)165 maphist.Fill(xval, yval, ibin)166 167 counthist.Fill(xval, yval, 1.0)168 # print ibin, "Map Value"169 170# Get N Bins171nbins = int(maphist.GetMaximum())172print "NBins I = ", nbins173# Get Covariances (keep in 1E-38 cm^2)174statcov = TH2D("analysis2_statcov","analysis2_statcov", nbins, 0.0, float(nbins), nbins, 0.0, float(nbins));175systcov = TH2D("analysis2_systcov","analysis2_systcov", nbins, 0.0, float(nbins), nbins, 0.0, float(nbins));176normcov = TH2D("analysis2_normcov","analysis2_normcov", nbins, 0.0, float(nbins), nbins, 0.0, float(nbins));177totcov = TH2D("analysis2_totcov","analysis2_totcov", nbins, 0.0, float(nbins), nbins, 0.0, float(nbins));178with open("rps_statsCov_analysis2.txt") as f:179 count = 0180 for line in f:181 count += 1182 183 if (count < 4): continue184 data = line.strip().split("|")185 if (len(data) < 1): continue186 xi, yi = GetIndex(data[0])187 cov = float(data[1])188 statcov.SetBinContent(xi + 1, yi + 1, cov)189with open("rps_systCov_analysis2.txt") as f:190 count = 0191 for line in f:192 count += 1193 194 if (count < 4): continue195 data = line.strip().split("|")196 if (len(data) < 1): continue197 198 xi, yi = GetIndex(data[0])199 cov = float(data[1])200 201 systcov.SetBinContent(xi + 1, yi + 1, cov)202with open("rps_fluxNormCov_analysis2.txt") as f:203 count = 0204 for line in f:205 count += 1206 207 if (count < 4): continue208 data = line.strip().split("|")209 if (len(data) < 1): continue210 211 xi, yi = GetIndex(data[0])212 cov = float(data[1])213 214 normcov.SetBinContent(xi + 1, yi + 1, cov)215 216totcov.Add(systcov)217totcov.Add(statcov)218totcov.Add(normcov)219outfile.cd()220datahist.Write()221maphist.Write()222counthist.Write()223statcov.Write()224systcov.Write()225totcov.Write() 226normcov.Write() ...

Full Screen

Full Screen

finalproject.py

Source:finalproject.py Github

copy

Full Screen

1import pandas as pd2import numpy as np3import os4# df = pd.read_csv('USvideos_new.csv', engine='python')5# print(df.head(5))6# from textblob import TextBlob7# pola = []8# polas = []9# subj = []10# subjs = []11# for index, row in df.iterrows():12# analysis = TextBlob(row['title'])13# pola.append(analysis.sentiment[0])14# subj.append(analysis.sentiment[1])15# if type(row['description']) == type('str'):16# analysis2 = TextBlob(row['description'])17# polas.append(analysis2.sentiment[0])18# subjs.append(analysis2.sentiment[1])19# else:20# polas.append(0)21# subjs.append(0)22# df['polarity'] = pola23# df['subjectivity'] = subj24# df['polarity_description'] = polas25# df['subjectivity_description'] = subjs26# print(df.head(5))27# df.to_csv('out.csv')28# df = pd.read_csv('UKvideos_new.csv', engine='python')29# print(df.head(5))30# from textblob import TextBlob31# pola = []32# polas = []33# subj = []34# subjs = []35# for index, row in df.iterrows():36# analysis = TextBlob(row['title'])37# pola.append(analysis.sentiment[0])38# subj.append(analysis.sentiment[1])39# if type(row['description']) == type('str'):40# analysis2 = TextBlob(row['description'])41# polas.append(analysis2.sentiment[0])42# subjs.append(analysis2.sentiment[1])43# else:44# polas.append(0)45# subjs.append(0)46# df['polarity'] = pola47# df['subjectivity'] = subj48# df['polarity_description'] = polas49# df['subjectivity_description'] = subjs50# print(df.head(5))51# df.to_csv('outuk.csv')52df = pd.read_csv('CAvideos_new.csv', engine='python')53print(df.head(5))54from textblob import TextBlob55pola = []56polas = []57subj = []58subjs = []59for index, row in df.iterrows():60 analysis = TextBlob(row['title'])61 pola.append(analysis.sentiment[0])62 subj.append(analysis.sentiment[1])63 if type(row['description']) == type('str'):64 analysis2 = TextBlob(row['description'])65 polas.append(analysis2.sentiment[0])66 subjs.append(analysis2.sentiment[1])67 else:68 polas.append(0)69 subjs.append(0)70df['polarity'] = pola71df['subjectivity'] = subj72df['polarity_description'] = polas73df['subjectivity_description'] = subjs74print(df.head(5))...

Full Screen

Full Screen

analysis2_csv_tweets_genre_yrmnth.py

Source:analysis2_csv_tweets_genre_yrmnth.py Github

copy

Full Screen

1#Analysis-2 Starting here2import pandas as pd3import datetime4gen=pd.read_csv('genre.csv')5tweets=pd.read_csv('processed.csv')6mv=pd.read_csv('processed_movies.csv')7analysis2=tweets[['imdbID','created_year','created_month','retweet_count','favorite_count']]8analysis2['calc_ret_fav_count']=analysis2['retweet_count']+analysis2['favorite_count']9mv=mv[['Released','imdbID']]10mv=mv[~(mv['Released'].isnull())]11mv['Released_DateTime'] = mv['Released'].apply(lambda x: pd.to_datetime(str(x), format='%d %b %Y'))12mv['Released_Year']=mv['Released'].apply(lambda x: pd.to_datetime(str(x), format='%d %b %Y').year)13mv['Released_Year']=pd.to_numeric(mv['Released_Year']).round()14mv['Released_Month']=mv['Released'].apply(lambda x: pd.to_datetime(str(x), format='%d %b %Y').month)15mv['Released_Monthname']=mv['Released'].apply(lambda x: pd.to_datetime(str(x), format='%d %b %Y').strftime('%b'))16#mv['Released_Month']=mv['Released'].month17mv=mv.merge(gen, left_on=['imdbID'], right_on=['imdbID'], how='inner')18#print(mv.head(5))19analysis2=analysis2.merge(mv,left_on=['imdbID','created_year'], right_on=['imdbID','Released_Year'], how='inner')20analysis2=analysis2[['Genre','calc_ret_fav_count','created_year','Released_Monthname','Released_Month']]21analysis2=analysis2.groupby(['Genre','created_year','Released_Monthname','Released_Month'],as_index=False)['calc_ret_fav_count'].mean()...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run autotest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful