How to use percentage method in Kiwi

Best Python code snippet using Kiwi_python

paint.py

Source:paint.py Github

copy

Full Screen

1# encoding: utf-82import numpy as np3import matplotlib.pyplot as plt4import sys5from scipy import stats6from matplotlib import rcParams7rcParams.update({'font.size': 10,'font.weight':'bold'})8patterns = ('/','//','-', '+', 'x', '\\', '\\\\', '*', 'o', 'O', '.')9##first read from file10pFabric="pFabric"11Varys="Varys"12Barrat="Barrat"13Yosemite="Yosemite"14Fair="FAIR"15DARK='DARK'16SHORT=20*1024*1024*102417NARROW=14018def getAverage(arralylist):19 return np.mean(arralylist)20def getRange(arraylist,element):21 return stats.percentileofscore(arraylist, element)22def getElements(arraylist,percentage):23 result=[]24 for element in arraylist:25 pos=getRange(arraylist,element)26 if pos <= percentage:27 result.append(element)28 return result29def getPercentageResult(path,percentage):30 f=open(path,"r")31 totaline=f.readlines()32 wc1=[]33 wc2=[]34 wc3=[]35 wc4=[]36 wc=[]37 for line in totaline:38 if line[0]=='J':39 arrayline=line.split()40 #analyze job 41 jobname=arrayline[0]42 starttime=float(arrayline[1])43 finishtime=float(arrayline[2])44 mappers=int(arrayline[3])45 reducers=int(arrayline[4])46 totalshuffle=float(arrayline[5])47 maxshuffle=float(arrayline[6])48 duration=float(arrayline[7])49 deadlineduration=float(arrayline[8])50 shufflesum=float(arrayline[9])51 weight=float(arrayline[10])52 width=mappers53 if mappers < reducers:54 width=reducers55 else:56 width=mappers57 if maxshuffle < SHORT and width < NARROW:58 wc1.append(weight*duration)59 60 elif maxshuffle >= SHORT and width < NARROW:61 wc2.append(weight*duration)62 elif maxshuffle < SHORT and width > NARROW:63 wc3.append(weight*duration)64 else:65 wc4.append(weight*duration)66 67 #wc=wc1+wc2+wc3+wc468 f.close()69 wc1add=070 wc2add=071 wc3add=072 wc4add=073 wcadd=074 wc1=getElements(wc1,percentage)75 wc2=getElements(wc2,percentage)76 wc3=getElements(wc3,percentage)77 wc4=getElements(wc4,percentage)78 for element in wc1:79 wc1add+=element80 for element in wc2:81 wc2add+=element82 for element in wc3:83 wc3add+=element84 for element in wc4:85 wc4add+=element86 return [wc1add,wc2add,wc3add,wc4add,wc1add+wc2add+wc3add+wc4add]87 88def getPercentile(arraylist,percentage):89 a=np.array(arraylist)90 p=np.percentile(a,percentage)91 return p92def getWcResult(path):93 f=open(path,"r")94 totaline=f.readlines()95 wc=[]96 for line in totaline:97 if line[0]=='J':98 arrayline=line.split()99 #analyze job 100 jobname=arrayline[0]101 starttime=float(arrayline[1])102 finishtime=float(arrayline[2])103 mappers=int(arrayline[3])104 reducers=int(arrayline[4])105 totalshuffle=float(arrayline[5])106 maxshuffle=float(arrayline[6])107 duration=float(arrayline[7])108 deadlineduration=float(arrayline[8])109 shufflesum=float(arrayline[9])110 weight=float(arrayline[10])111 width=mappers112 wc.append(weight*duration/1000)113 f.close()114 return wc115def getResult(path):116 f=open(path,"r")117 totaline=f.readlines()118 bin1=0119 bin2=0120 bin3=0121 bin4=0122 wc1=0123 wc2=0124 wc3=0125 wc4=0126 wc=0127 for line in totaline:128 if line[0]=='J':129 arrayline=line.split()130 #analyze job 131 jobname=arrayline[0]132 starttime=float(arrayline[1])133 finishtime=float(arrayline[2])134 mappers=int(arrayline[3])135 reducers=int(arrayline[4])136 totalshuffle=float(arrayline[5])137 maxshuffle=float(arrayline[6])138 duration=float(arrayline[7])139 deadlineduration=float(arrayline[8])140 shufflesum=float(arrayline[9])141 weight=float(arrayline[10])142 width=mappers143 if mappers < reducers:144 width=reducers145 else:146 width=mappers147 if maxshuffle < SHORT and width < NARROW:148 wc1+=weight*duration149 bin1+=1150 elif maxshuffle >= SHORT and width < NARROW:151 wc2+=weight*duration152 bin2+=1153 elif maxshuffle < SHORT and width > NARROW:154 wc3+=weight*duration155 bin3+=1156 else:157 wc4+=weight*duration158 bin4+=1159 wc=wc1+wc2+wc3+wc4160 f.close()161 return [wc1,wc2,wc3,wc4,wc]162 163def frac(v,x):164 n=0165 for i in v:166 if i<x:167 n=n+1168 return float(n)/float(len(v))169if __name__=='__main__':170 Barratwc=getResult(Barrat)171 Varyswc=getResult(Varys)172 Yosemitewc=getResult(Yosemite)173 pFabricwc=getResult(pFabric)174 Fairwc=getResult(Fair)175 Darkwc=getResult(DARK)176 177 VarysResult=[]178 YosemiteResult=[]179 BarratResult=[]180 pFabricResult=[]181 FairResult=[]182 DarkResult=[]183 percentageVaryswc=getPercentageResult(Varys,95)184 percentageYosemitewc=getPercentageResult(Yosemite,95)185 percentageBarratwc=getPercentageResult(Barrat,95)186 percentagepFabricwc=getPercentageResult(pFabric,95)187 percentageFairwc=getPercentageResult(Fair,95)188 percentageDarkwc=getPercentageResult(DARK,95)189 percentageVarysResult=[]190 percentageYosemiteResult=[]191 percentageBarratResult=[]192 percentagepFabricResult=[]193 percentageFairResult=[]194 percentageDarkResult=[]195 for i in range(0,5):196 VarysResult.append(percentageFairwc[i]/Varyswc[i])197 percentageVarysResult.append(percentageFairwc[i]/percentageVaryswc[i])198 YosemiteResult.append(percentageFairwc[i]/Yosemitewc[i])199 percentageYosemiteResult.append(percentageFairwc[i]/percentageYosemitewc[i])200 BarratResult.append(percentageFairwc[i]/Barratwc[i])201 percentageBarratResult.append(percentageFairwc[i]/percentageBarratwc[i])202 DarkResult.append(percentageFairwc[i]/Darkwc[i])203 percentageDarkResult.append(percentageFairwc[i]/percentageDarkwc[i])204 N=5205 ind = np.arange(N) # the x locations for the groups206 width = 0.1 # the width of the bars207 fig, ax = plt.subplots(figsize=(12,6))208 rects1 = ax.bar(ind, BarratResult, width, hatch="+",color='r',ecolor='k')209 rects2 = ax.bar(ind+width, DarkResult, width, hatch="+",color='g',ecolor='k')210 rects3 = ax.bar(ind+2*width, VarysResult, width, hatch='-',color='white',ecolor='k')211 rects4 = ax.bar(ind+3*width, YosemiteResult, width, hatch='+',color='k',ecolor='k')212 rects5 = ax.bar(ind+4*width, percentageBarratResult, width, hatch="+",color='#FF7256',ecolor='k')213 rects6 = ax.bar(ind+5*width, percentageDarkResult, width, hatch="+",color='#00FF00',ecolor='k')214 rects7 = ax.bar(ind+6*width, percentageVarysResult, width, hatch='-',color='#EEE9E9',ecolor='k')215 rects8=ax.bar(ind+7*width, percentageYosemiteResult, width, hatch='+',color='#696969',ecolor='k')216 ax.set_xticks(ind+width)217 ax.set_xticklabels(('SHORT & NARROW','LONG & NARROW','SHORT & WIDTH','LONG & WIDTH','ALL'))218 ax.legend((rects1[0],rects2[0],rects3[0],rects4[0],rects5[0],rects6[0],rects7[0],rects8[0]), ('Barrat','Aalo','Vary','Yosemite','Barrat(95th)','Aalo(95th)','Vary(95th)','Yosemite(95th)'),loc=0)219 ax.set_ylabel('Factor of Improvement',fontsize=12,fontweight='bold')220 ax.set_ylim([0,5])221 ax.set_xlabel('coflow types',fontsize=12,fontweight='bold')222 #plt.figure(figsize=(12,3))223 #plt.show()224 fig.savefig("weight_real_type.eps")225 fig, ax = plt.subplots(figsize=(4.5,6))226 x = np.linspace(0, 1000, 10)227 Yosemitewc=getWcResult(Yosemite)228 Varyswc=getWcResult(Varys)229 Fairwc=getWcResult(Fair)230 pFabricwc=getWcResult(pFabric)231 Barratwc=getWcResult(Barrat)232 Darkwc=getWcResult(DARK)233 YosemiteCDF=[]234 VarysCDF=[]235 FairCDF=[]236 pFabricCDF=[]237 BarratCDF=[]238 AaloCDF=[]239 for v in x:240 YosemiteCDF.append(frac(Yosemitewc,v))241 VarysCDF.append(frac(Varyswc,v))242 FairCDF.append(frac(Fairwc,v))243 pFabricCDF.append(frac(pFabricwc,v))244 BarratCDF.append(frac(Barratwc,v))245 AaloCDF.append(frac(Darkwc,v))246 ax.plot(x, BarratCDF,linewidth=3,color='b',label='Aalo')247 ax.plot(x, FairCDF,linewidth=3,color='r',label='Fair')248 ax.plot(x, AaloCDF,linewidth=3,color='g',label='Barrat')249 ax.plot(x, YosemiteCDF,linewidth=3,color='k',label='Yosemite')250 251 ax.legend(loc='lower right')252 plt.ylabel('CDF',fontsize=12,fontweight='bold')253 plt.xlabel('weight completion time(s)',fontsize=12,fontweight='bold')254 plt.show()...

Full Screen

Full Screen

grid.default.config.js

Source:grid.default.config.js Github

copy

Full Screen

1//TODO: What is this file? Is it used?? I don't think so2var uSkyGridConfig = [3{4 style:[5 {6 label: "Set a background image",7 description: "Set a row background",8 key: "background-image",9 view: "imagepicker",10 modifier: "url({0})"11 },12 {13 label: "Set a font color",14 description: "Pick a color",15 key: "color",16 view: "colorpicker"17 }18 ],19 config:[20 {21 label: "Preview",22 description: "Display a live preview",23 key: "preview",24 view: "boolean"25 },26 {27 label: "Class",28 description: "Set a css class",29 key: "class",30 view: "textstring"31 }32 ],33 layouts: [34 {35 grid: 12,36 percentage: 100,37 rows: [38 {39 name: "Single column",40 columns: [{41 grid: 12,42 percentage: 10043 }]44 },45 {46 name: "Article",47 models: [{48 grid: 4,49 percentage: 33.3,50 allowed: ["media","quote"]51 }, {52 grid: 8,53 percentage: 66.6,54 allowed: ["rte"]55 }]56 },57 {58 name: "Article, reverse",59 models: [60 {61 grid: 8,62 percentage: 66.6,63 allowed: ["rte","macro"]64 },65 {66 grid: 4,67 percentage: 33.3,68 allowed: ["media","quote","embed"]69 }]70 },71 {72 name: "Profile page",73 models: [74 {75 grid: 4,76 percentage: 33.3,77 allowed: ["media"]78 },79 {80 grid: 8,81 percentage: 66.6,82 allowed: ["rte"]83 }84 ]85},86{87 name: "Headline",88 models: [89 {90 grid: 12,91 percentage: 100,92 max: 1,93 allowed: ["headline"]94 }95 ]96},97{98 name: "Three columns",99 models: [{100 grid: 4,101 percentage: 33.3,102 allowed: ["rte"]103 },104 {105 grid: 4,106 percentage: 33.3,107 allowed: ["rte"]108 },109 {110 grid: 4,111 percentage: 33.3,112 allowed: ["rte"]113 }]114}115]116}117]118},119{120 columns: [121 {122 grid: 9,123 percentage: 70,124 cellModels: [125 {126 models: [{127 grid: 12,128 percentage: 100129 }]130 }, {131 models: [{132 grid: 6,133 percentage: 50134 }, {135 grid: 6,136 percentage: 50137 }]138 }, {139 models: [{140 grid: 4,141 percentage: 33.3142 }, {143 grid: 4,144 percentage: 33.3145 }, {146 grid: 4,147 percentage: 33.3148 }]149 }, {150 models: [{151 grid: 3,152 percentage: 25153 }, {154 grid: 3,155 percentage: 25156 }, {157 grid: 3,158 percentage: 25159 }, {160 grid: 3,161 percentage: 25162 }, ]163 }, {164 models: [{165 grid: 2,166 percentage: 16.6167 }, {168 grid: 2,169 percentage: 16.6170 }, {171 grid: 2,172 percentage: 16.6173 }, {174 grid: 2,175 percentage: 16.6176 }, {177 grid: 2,178 percentage: 16.6179 }, {180 grid: 2,181 percentage: 16.6182 }]183 }, {184 models: [{185 grid: 8,186 percentage: 60187 }, {188 grid: 4,189 percentage: 40190 }]191 }, {192 models: [{193 grid: 4,194 percentage: 40195 }, {196 grid: 8,197 percentage: 60198 }]199 }200 ]201 },202 {203 grid: 3,204 percentage: 30,205 cellModels: [206 {207 models: [{208 grid: 12,209 percentage: 100210 }]211 }212 ]213 }214 ]215},216{217 columns: [218 {219 grid: 3,220 percentage: 30,221 cellModels: [222 {223 models: [{224 grid: 12,225 percentage: 100226 }]227 }228 ]229 },230 {231 grid: 9,232 percentage: 70,233 cellModels: [234 {235 models: [{236 grid: 12,237 percentage: 100238 }]239 }, {240 models: [{241 grid: 6,242 percentage: 50243 }, {244 grid: 6,245 percentage: 50246 }]247 }, {248 models: [{249 grid: 4,250 percentage: 33.3251 }, {252 grid: 4,253 percentage: 33.3254 }, {255 grid: 4,256 percentage: 33.3257 }]258 }, {259 models: [{260 grid: 3,261 percentage: 25262 }, {263 grid: 3,264 percentage: 25265 }, {266 grid: 3,267 percentage: 25268 }, {269 grid: 3,270 percentage: 25271 }, ]272 }, {273 models: [{274 grid: 2,275 percentage: 16.6276 }, {277 grid: 2,278 percentage: 16.6279 }, {280 grid: 2,281 percentage: 16.6282 }, {283 grid: 2,284 percentage: 16.6285 }, {286 grid: 2,287 percentage: 16.6288 }, {289 grid: 2,290 percentage: 16.6291 }]292 }, {293 models: [{294 grid: 8,295 percentage: 60296 }, {297 grid: 4,298 percentage: 40299 }]300 }, {301 models: [{302 grid: 4,303 percentage: 40304 }, {305 grid: 8,306 percentage: 60307 }]308 }309 ]310 }311 ]312},313{314 columns: [315 {316 grid: 4,317 percentage: 33.3,318 cellModels: [319 {320 models: [{321 grid: 12,322 percentage: 100323 }]324 }325 ]326 },327 {328 grid: 4,329 percentage: 33.3,330 cellModels: [331 {332 models: [{333 grid: 12,334 percentage: 100335 }]336 }337 ]338 },339 {340 grid: 4,341 percentage: 33.3,342 cellModels: [343 {344 models: [{345 grid: 12,346 percentage: 100347 }]348 }349 ]350 }351 ]352}...

Full Screen

Full Screen

4_InstagramRanking.py

Source:4_InstagramRanking.py Github

copy

Full Screen

1import pandas as pd2import numpy as np3df = pd.read_csv("usersInstagramPercentages.csv")4df1 =df.loc[df['micro']==1] #only micro influencers dataset to evaluate their percentiles5scores = []6for i in range(0,len(df['id'])):7 score=08 #score by followers9 if(5000<=df['followers'].iloc[i]<=df1['followers'].quantile(0.2)):10 score+=2.511 elif(df1['followers'].quantile(0.2)<df['followers'].iloc[i]<=df1['followers'].quantile(0.4)):12 score+=513 elif(df1['followers'].quantile(0.4)<df['followers'].iloc[i]<=df1['followers'].quantile(0.6)):14 score+=7.515 elif(df1['followers'].quantile(0.6)<df['followers'].iloc[i]<=df1['followers'].quantile(0.8)):16 score+=1017 elif(df1['followers'].quantile(0.8)<df['followers'].iloc[i]<=100000):18 score+=12.519 #score by followers following ratio20 if(2<=df['followers_following_ratio'].iloc[i]<=df1['followers_following_ratio'].quantile(0.2)):21 score+=2.522 elif(df1['followers_following_ratio'].quantile(0.2)<df['followers_following_ratio'].iloc[i]<=df1['followers_following_ratio'].quantile(0.4)):23 score+=524 elif(df1['followers_following_ratio'].quantile(0.4)<df['followers_following_ratio'].iloc[i]<=df1['followers_following_ratio'].quantile(0.6)):25 score+=7.526 elif(df1['followers_following_ratio'].quantile(0.6)<df['followers_following_ratio'].iloc[i]<=df1['followers_following_ratio'].quantile(0.8)):27 score+=1028 elif(df['followers_following_ratio'].iloc[i]>df1['followers_following_ratio'].quantile(0.8)):29 score+=12.530 #score by followers per media31 if(2<=df['followers_per_media'].iloc[i]<=df1['followers_per_media'].quantile(0.2)):32 score+=2.533 elif(df1['followers_per_media'].quantile(0.2)<df['followers_per_media'].iloc[i]<=df1['followers_per_media'].quantile(0.4)):34 score+=535 elif(df1['followers_per_media'].quantile(0.4)<df['followers_per_media'].iloc[i]<=df1['followers_per_media'].quantile(0.6)):36 score+=7.537 elif(df1['followers_per_media'].quantile(0.6)<df['followers_per_media'].iloc[i]<=df1['followers_per_media'].quantile(0.8)):38 score+=1039 elif(df['followers_per_media'].iloc[i]>df1['followers_per_media'].quantile(0.8)):40 score+=12.541 #score by interactions42 if(0<=df['interactions'].iloc[i]<=df1['interactions'].quantile(0.2)):43 score+=2.544 elif(df1['interactions'].quantile(0.2)<df['interactions'].iloc[i]<=df1['interactions'].quantile(0.4)):45 score+=546 elif(df1['interactions'].quantile(0.4)<df['interactions'].iloc[i]<=df1['interactions'].quantile(0.6)):47 score+=7.548 elif(df1['interactions'].quantile(0.6)<df['interactions'].iloc[i]<=df1['interactions'].quantile(0.8)):49 score+=1050 elif(df['interactions'].iloc[i]>df1['interactions'].quantile(0.8)):51 score+=12.552 #score by topic % in captions53 if(0<=df['topicInCaptionsPercentage'].iloc[i]<=df1['topicInCaptionsPercentage'].quantile(0.2)):54 score+=2.555 elif(df1['topicInCaptionsPercentage'].quantile(0.2)<df['topicInCaptionsPercentage'].iloc[i]<=df1['topicInCaptionsPercentage'].quantile(0.4)):56 score+=557 elif(df1['topicInCaptionsPercentage'].quantile(0.4)<df['topicInCaptionsPercentage'].iloc[i]<=df1['topicInCaptionsPercentage'].quantile(0.6)):58 score+=7.559 elif(df1['topicInCaptionsPercentage'].quantile(0.6)<df['topicInCaptionsPercentage'].iloc[i]<=df1['topicInCaptionsPercentage'].quantile(0.8)):60 score+=1061 elif(df['topicInCaptionsPercentage'].iloc[i]>df1['topicInCaptionsPercentage'].quantile(0.8)):62 score+=12.563 #score by topic % in words64 if(0<=df['topicInWordsPercentage'].iloc[i]<=df1['topicInWordsPercentage'].quantile(0.2)):65 score+=2.566 elif(df1['topicInWordsPercentage'].quantile(0.2)<df['topicInWordsPercentage'].iloc[i]<=df1['topicInWordsPercentage'].quantile(0.4)):67 score+=568 elif(df1['topicInWordsPercentage'].quantile(0.4)<df['topicInWordsPercentage'].iloc[i]<=df1['topicInWordsPercentage'].quantile(0.6)):69 score+=7.570 elif(df1['topicInWordsPercentage'].quantile(0.6)<df['topicInWordsPercentage'].iloc[i]<=df1['topicInWordsPercentage'].quantile(0.8)):71 score+=1072 elif(df['topicInWordsPercentage'].iloc[i]>df1['topicInWordsPercentage'].quantile(0.8)):73 score+=12.574 #score by topic % in pics75 if(0<=df['topicInPicsPercentage'].iloc[i]<=df1['topicInPicsPercentage'].quantile(0.92)):76 score+=2.577 elif(df1['topicInPicsPercentage'].quantile(0.92)<df['topicInPicsPercentage'].iloc[i]<=df1['topicInPicsPercentage'].quantile(0.94)):78 score+=579 elif(df1['topicInPicsPercentage'].quantile(0.94)<df['topicInPicsPercentage'].iloc[i]<=df1['topicInPicsPercentage'].quantile(0.96)):80 score+=7.581 elif(df1['topicInPicsPercentage'].quantile(0.96)<df['topicInPicsPercentage'].iloc[i]<=df1['topicInPicsPercentage'].quantile(0.98)):82 score+=1083 elif(df['topicInPicsPercentage'].iloc[i]>df1['topicInPicsPercentage'].quantile(0.98)):84 score+=12.585 #score by topic % in pics86 if(0<=df['topicInPicsWordsPercentage'].iloc[i]<=df1['topicInPicsWordsPercentage'].quantile(0.92)):87 score+=2.588 elif(df1['topicInPicsWordsPercentage'].quantile(0.92)<df['topicInPicsWordsPercentage'].iloc[i]<=df1['topicInPicsWordsPercentage'].quantile(0.94)):89 score+=590 elif(df1['topicInPicsWordsPercentage'].quantile(0.94)<df['topicInPicsWordsPercentage'].iloc[i]<=df1['topicInPicsWordsPercentage'].quantile(0.96)):91 score+=7.592 elif(df1['topicInPicsWordsPercentage'].quantile(0.96)<df['topicInPicsWordsPercentage'].iloc[i]<=df1['topicInPicsWordsPercentage'].quantile(0.98)):93 score+=1094 elif(df['topicInPicsWordsPercentage'].iloc[i]>df1['topicInPicsWordsPercentage'].quantile(0.98)):95 score+=12.596 scores.append(score)97df['scores']= scores98half = df['scores'].quantile(0.5)99print(half)100df['microTopic'] = np.where(df['scores']>=half, 1, 0) #assign micrro topic to 1 if the score overcomes the 0.5 percentile of the scores' column101df.to_csv('usersInstagramMicroTopicCC.csv', encoding='UTF8',index=False)...

Full Screen

Full Screen

demographic_data_analyzer.py

Source:demographic_data_analyzer.py Github

copy

Full Screen

1import pandas as pd2def calculate_demographic_data(print_data=True):3 # Read data from file4 df = pd.read_csv('adult.data.csv')5 # How many of each race are represented in this dataset? This should be a Pandas series with race names as the index labels.6 race_count = df['race'].value_counts()7 # What is the average age of men?8 average_age_men = round(df[df['sex'] == 'Male']['age'].mean(), ndigits=1)9 # What is the percentage of people who have a Bachelor's degree?10 percentage_bachelors = round(((df[df['education'] == 'Bachelors'].shape[0] / df.shape[0]) * 100), ndigits=1)11 # What percentage of people with advanced education (`Bachelors`, `Masters`, or `Doctorate`) make more than 50K?12 # What percentage of people without advanced education make more than 50K?13 14 # with and without `Bachelors`, `Masters`, or `Doctorate`15 higher_education = df[df['education'].isin(['Bachelors','Masters','Doctorate'])]16 17 lower_education = df[~df['education'].isin(['Bachelors','Masters','Doctorate'])]18 # percentage with salary >50K19 higher_education_rich = round(((higher_education[higher_education['salary'] == '>50K'].shape[0] / higher_education.shape[0]) * 100), ndigits=1)20 lower_education_rich = round(((lower_education[lower_education['salary'] == '>50K'].shape[0] / lower_education.shape[0]) * 100), ndigits=1)21 # What is the minimum number of hours a person works per week (hours-per-week feature)?22 min_work_hours = df['hours-per-week'].min()23 # What percentage of the people who work the minimum number of hours per week have a salary of >50K?24 num_min_workers = df[df['hours-per-week'] == min_work_hours]25 rich_percentage = round((num_min_workers[num_min_workers['salary'] == '>50K'].shape[0] / num_min_workers.shape[0] * 100), ndigits=1)26 # What country has the highest percentage of people that earn >50K?27 people = df['native-country'].value_counts()28 rich = df[df['salary'] == '>50K']['native-country'].value_counts()29 highest_earning_country = (rich / people).sort_values(ascending=False).keys()[0]30 31 people_in_highest = df[df['native-country'] == highest_earning_country]32 rich_in_highest = people_in_highest[people_in_highest['salary'] == '>50K']33 highest_earning_country_percentage = round((rich_in_highest.shape[0] / people_in_highest.shape[0] * 100), ndigits=1)34 # Identify the most popular occupation for those who earn >50K in India.35 top_IN_occupation = df[df['salary'] == '>50K']36 top_IN_occupation = top_IN_occupation[top_IN_occupation['native-country'] == 'India']37 top_IN_occupation = top_IN_occupation['occupation'].value_counts()._index[0]38 # DO NOT MODIFY BELOW THIS LINE39 if print_data:40 print("Number of each race:\n", race_count) 41 print("Average age of men:", average_age_men)42 print(f"Percentage with Bachelors degrees: {percentage_bachelors}%")43 print(f"Percentage with higher education that earn >50K: {higher_education_rich}%")44 print(f"Percentage without higher education that earn >50K: {lower_education_rich}%")45 print(f"Min work time: {min_work_hours} hours/week")46 print(f"Percentage of rich among those who work fewest hours: {rich_percentage}%")47 print("Country with highest percentage of rich:", highest_earning_country)48 print(f"Highest percentage of rich people in country: {highest_earning_country_percentage}%")49 print("Top occupations in India:", top_IN_occupation)50 return {51 'race_count': race_count,52 'average_age_men': average_age_men,53 'percentage_bachelors': percentage_bachelors,54 'higher_education_rich': higher_education_rich,55 'lower_education_rich': lower_education_rich,56 'min_work_hours': min_work_hours,57 'rich_percentage': rich_percentage,58 'highest_earning_country': highest_earning_country,59 'highest_earning_country_percentage':60 highest_earning_country_percentage,61 'top_IN_occupation': top_IN_occupation...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Kiwi automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful