How to use record_endtime method in Radish

Best Python code snippet using radish

checking_time_series_data_fro_suspect_infect_time_+1_hour_and_outtime_1_hour.py

Source:checking_time_series_data_fro_suspect_infect_time_+1_hour_and_outtime_1_hour.py Github

copy

Full Screen

1# -*- coding: utf-8 -*-2"""Checking time series data fro suspect_infect_time +1 hour and outtime - 1 hour.ipynb3Automatically generated by Colaboratory.4Original file is located at5 https://colab.research.google.com/drive/1P5QAV8iLYCK6UA42OR4whtc2mzhKI-x46"""7# Commented out IPython magic to ensure Python compatibility.8!pip install wfdb9import io10import pandas as pd11from IPython.display import display12import matplotlib.pyplot as plt13# %matplotlib inline14import numpy as np15import os16import shutil17import posixpath18import wfdb19import urllib.request20import datetime21from collections import namedtuple22from google.colab import files23uploaded = files.upload()24df_waveform_exists = pd.read_csv(io.BytesIO(uploaded['new_df_waveform_exists.csv']))25#df_housrly_csvdata = pd.read_excel('df_TS_exists_withoutTEMP_hourlycounts.xls',)26# Dataset is now stored in a Pandas Dataframe27print ('shape of the new df_Waveform_exists : ')28print(df_waveform_exists)29df_waveform_exists['6_sig_exists']='';30df_waveform_exists['SI+1_timeoverlap_exists']='';31df_waveform_exists['SI+1_overlap_duration']=''32df_waveform_exists['SI+1_Number_of_overlaping_records'] =''33df_waveform_exists['SI+2_timeoverlap_exists']='';34df_waveform_exists['SI+2_overlap_duration']=''35df_waveform_exists['SI+2_Number_of_overlaping_records'] =''36df_waveform_exists['SI+3_timeoverlap_exists']='';37df_waveform_exists['SI+3_overlap_duration']=''38df_waveform_exists['SI+3_Number_of_overlaping_records'] =''39df_waveform_exists['SI+4_timeoverlap_exists']='';40df_waveform_exists['SI+4_overlap_duration']=''41df_waveform_exists['SI+4_Number_of_overlaping_records'] =''42#############################################################43df_waveform_exists['OUT-1_timeoverlap_exists']='';44df_waveform_exists['OUT-1_overlap_duration']=''45df_waveform_exists['OUT-1_Number_of_overlaping_records'] =''46df_waveform_exists['OUT-2_timeoverlap_exists']='';47df_waveform_exists['OUT-2_overlap_duration']=''48df_waveform_exists['OUT-2_Number_of_overlaping_records'] =''49df_waveform_exists['OUT-3_timeoverlap_exists']='';50df_waveform_exists['OUT-3_overlap_duration']=''51df_waveform_exists['OUT-3_Number_of_overlaping_records'] =''52df_waveform_exists['OUT-4_timeoverlap_exists']='';53df_waveform_exists['OUT-4_overlap_duration']=''54df_waveform_exists['OUT-4_Number_of_overlaping_records'] =''55#######################################################56df_waveform_exists['SEPSIS+1_timeoverlap_exists']='';57df_waveform_exists['SEPSIS+1_overlap_duration']=''58df_waveform_exists['SEPSIS+1_Number_of_overlaping_records'] =''59df_waveform_exists['SEPSIS+2_timeoverlap_exists']='';60df_waveform_exists['SEPSIS+2_overlap_duration']=''61df_waveform_exists['SEPSIS+2_Number_of_overlaping_records'] =''62df_waveform_exists['SEPSIS+3_timeoverlap_exists']='';63df_waveform_exists['SEPSIS+3_overlap_duration']=''64df_waveform_exists['SEPSIS+3_Number_of_overlaping_records'] =''65df_waveform_exists['SEPSIS+4_timeoverlap_exists']='';66df_waveform_exists['SEPSIS+4_overlap_duration']=''67df_waveform_exists['SEPSIS+4_Number_of_overlaping_records'] =''68################################################################69df_waveform_exists['SEPSIS-1_timeoverlap_exists']='';70df_waveform_exists['SEPSIS-1_overlap_duration']=''71df_waveform_exists['SEPSIS-1_Number_of_overlaping_records'] =''72df_waveform_exists['SEPSIS-2_timeoverlap_exists']='';73df_waveform_exists['SEPSIS-2_overlap_duration']=''74df_waveform_exists['SEPSIS-2_Number_of_overlaping_records'] =''75df_waveform_exists['SEPSIS-3_timeoverlap_exists']='';76df_waveform_exists['SEPSIS-3_overlap_duration']=''77df_waveform_exists['SEPSIS-3_Number_of_overlaping_records'] =''78df_waveform_exists['SEPSIS-4_timeoverlap_exists']='';79df_waveform_exists['SEPSIS-4_overlap_duration']=''80df_waveform_exists['SEPSIS-4_Number_of_overlaping_records'] =''81####################################################################82df_waveform_exists['SHOCK-1_timeoverlap_exists']='';83df_waveform_exists['SHOCK-1_overlap_duration']=''84df_waveform_exists['SHOCK-1_Number_of_overlaping_records'] =''85df_waveform_exists['SHOCK-2_timeoverlap_exists']='';86df_waveform_exists['SHOCK-2_overlap_duration']=''87df_waveform_exists['SHOCK-2_Number_of_overlaping_records'] =''88df_waveform_exists['SHOCK-3_timeoverlap_exists']='';89df_waveform_exists['SHOCK-3_overlap_duration']=''90df_waveform_exists['SHOCK-3_Number_of_overlaping_records'] =''91df_waveform_exists['SHOCK-4_timeoverlap_exists']='';92df_waveform_exists['SHOCK-4_overlap_duration']=''93df_waveform_exists['SHOCK-4_Number_of_overlaping_records'] =''94###########################################################95df_waveform_exists['SHOCK+1_timeoverlap_exists']='';96df_waveform_exists['SHOCK+1_overlap_duration']=''97df_waveform_exists['SHOCK+1_Number_of_overlaping_records'] =''98df_waveform_exists['SHOCK+2_timeoverlap_exists']='';99df_waveform_exists['SHOCK+2_overlap_duration']=''100df_waveform_exists['SHOCK+2_Number_of_overlaping_records'] =''101df_waveform_exists['SHOCK+3_timeoverlap_exists']='';102df_waveform_exists['SHOCK+3_overlap_duration']=''103df_waveform_exists['SHOCK+3_Number_of_overlaping_records'] =''104df_waveform_exists['SHOCK+4_timeoverlap_exists']='';105df_waveform_exists['SHOCK+4_overlap_duration']=''106df_waveform_exists['SHOCK+4_Number_of_overlaping_records'] =''107print(df_waveform_exists.columns)108pd.set_option('display.max_rows', 500)109pd.set_option('display.max_columns', 500)110pd.set_option('display.width', 1000)111print(df_waveform_exists)112import numpy113from collections import namedtuple114for index, row in df_waveform_exists.iterrows():115 print(row['subject_id'], row['icustay_id'], row['sepsis_onsettime'],row['intime'],row['outtime'])116 if str(row['sepsis_onsettime']) == 'nan':117 print('NULL SEPSIS ONNSET TIME')118#### to check SI TIME + 1, 2, 3, 4 HOURs119#### to check ICU outtime - 1, 2, 3, 4 HOURs120from collections import namedtuple121for index, row in df_waveform_exists.iterrows():122 #print(row['subject_id'], row['icustay_id'], row['sepsis_onsettime'],row['intime'],row['outtime'])123 records_toRead=[]124 wdb_dir_path = 'mimic3wdb/matched/p'+ str(row['subject_id']).zfill(6)[:2] + '/p' + str(row['subject_id']).zfill(6) + '/';125 wdb_path_toAllRecords = 'https://archive.physionet.org/physiobank/database/'+ wdb_dir_path + 'RECORDS';126 wdb_records = urllib.request.urlopen(wdb_path_toAllRecords); 127 count_overlaping_records_1 = 0 128 overlap_duration_1 = '';129 count_overlaping_records_2 = 0 130 overlap_duration_2 = '';131 count_overlaping_records_3 = 0 132 overlap_duration_3 = '';133 count_overlaping_records_4 = 0 134 overlap_duration_4 = '';135 ###############################136 count_overlaping_records_outtime_1 = 0 137 overlap_duration_outtime_1 = '';138 count_overlaping_records_outtime_2 = 0 139 overlap_duration_outtime_2 = '';140 count_overlaping_records_outtime_3 = 0 141 overlap_duration_outtime_3 = '';142 count_overlaping_records_outtime_4 = 0 143 overlap_duration_outtime_4 = '' ;144 ########################################145 overlap_sepsis_plus_duration_1 = '';146 count_overlaping_records_sepsis_plus_1 = 0 147 overlap_sepsis_plus_duration_2 = '';148 count_overlaping_records_sepsis_plus_2 = 0 149 overlap_sepsis_plus_duration_3 = '';150 count_overlaping_records_sepsis_plus_3 = 0 151 overlap_sepsis_plus_duration_4 = '';152 count_overlaping_records_sepsis_plus_4 = 0 153 ##########################################154 overlap_sepsis_minus_duration_1 = '';155 count_overlaping_records_sepsis_minus_1 = 0 156 overlap_sepsis_minus_duration_2= '';157 count_overlaping_records_sepsis_minus_2 = 0 158 overlap_sepsis_minus_duration_3= '';159 count_overlaping_records_sepsis_minus_3 = 0 160 overlap_sepsis_minus_duration_4= '';161 count_overlaping_records_sepsis_minus_4 = 0 162 ###############################################163 overlap_shock_plus_duration_1 = '';164 count_overlaping_records_shock_plus_1 = 0 165 overlap_shock_plus_duration_2 = '';166 count_overlaping_records_shock_plus_2 = 0 167 overlap_shock_plus_duration_3 = '';168 count_overlaping_records_shock_plus_3 = 0 169 overlap_shock_plus_duration_4 = '';170 count_overlaping_records_shock_plus_4 = 0 171 ########################################172 overlap_shock_minus_duration_1 = '';173 count_overlaping_records_shock_minus_1 = 0 174 overlap_shock_minus_duration_2 = '';175 count_overlaping_records_shock_minus_2 = 0 176 overlap_shock_minus_duration_3 = '';177 count_overlaping_records_shock_minus_3 = 0 178 overlap_shock_minus_duration_4 = '';179 count_overlaping_records_shock_minus_4 = 0 180 for lines in wdb_records.readlines():181 record = lines.decode("utf-8"); 182 record = str(record).rstrip()183 #print (record[-1:])184 if record[-1:] == 'n':185 #print(record);186 #print (wdb_dir_path);187 record = str(record).rstrip()188 try:189 signals,fields = wfdb.rdsamp(record, pn_dir=wdb_dir_path) ; 190 #wfdb.plot_items(signal=signals, fs=fields['fs'])191 #display(signals)192 #display(fields)193 #print ('fs' , fields['fs']);194 #print ('signal length',fields['sig_len']);195 #print ('date' ,fields['base_date'] ); 196 #print ('time' ,fields['base_time'] );197 #print ('%.3f'%(fields['fs']))198 199 list_sig_name = [item.upper().replace(' ','') for item in fields['sig_name']]200 sig_exist_1 = all(x in list_sig_name for x in ['HR', 'SPO2','ABPSYS','ABPDIAS','ABPMEAN','RESP']); #%SpO2201 sig_exist_2 = all(x in list_sig_name for x in ['HR', '%SPO2','ABPSYS','ABPDIAS','ABPMEAN','RESP']); 202 if ((sig_exist_1 == True) or (sig_exist_2 == True)) :203 df_waveform_exists.loc[index,'6_sig_exists'] = 1;204 record_starttime = datetime.datetime.combine(fields['base_date'] ,fields['base_time'] ) ;205 206 if '%.3f'%(fields['fs']) == '1.000' :207 record_endtime = record_starttime + datetime.timedelta(seconds= (fields['sig_len']-1)) ;208 elif '%.3f'%(fields['fs'])== '0.017' :209 record_endtime = record_starttime + datetime.timedelta(minutes = (fields['sig_len']-1)) ;210 else : 211 print('ERROR IN SAMPLING');212 print(record);213 print (wdb_dir_path);214 #Caculate if we have a recording for the time of icu stay215 print('record start time: ', record_starttime)216 print('record end time: ', record_endtime)217 print('suspected infection time: ' , row['suspected_infection_time_poe'])218 print('ICU outtime: ' , row['outtime'])219 if str(row['suspected_infection_time_poe']) != 'nan' : 220 #for suspected innfection time + 1 hour221 Range='';222 r1='';223 r2='';224 Range = namedtuple('Range', ['start', 'end'])225 r1 = Range(start= datetime.datetime.strptime(row['suspected_infection_time_poe'],'%Y-%m-%d %H:%M:%S'), end= (datetime.datetime.strptime(row['suspected_infection_time_poe'],'%Y-%m-%d %H:%M:%S') + datetime.timedelta(hours=1) ) )226 r2 = Range(start= record_starttime, end = record_endtime)227 latest_start = max(r1.start, r2.start)228 earliest_end = min(r1.end, r2.end)229 print('latest_start: ' ,latest_start )230 print('earliest_end: ' ,earliest_end )231 232 if (r1.start <= r2.end) and (r2.start <= r1.end) :233 print('SI PLUS 1 : RECORD EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])234 delta=0;235 delta = ((earliest_end - latest_start).seconds )/60 236 overlap_duration_1 = overlap_duration_1 + ',' + str(delta)237 df_waveform_exists.loc[index,'SI+1_timeoverlap_exists'] = 1;238 count_overlaping_records_1 = count_overlaping_records_1 +1 ;239 #todo : adding new dataframe, exatracting required signals, computing avergage for per sminute values in case of per second sampling frequency240 else: 241 print('SI PLUS 1 : RECORD DOES NOT EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])242 #df_csvdata.loc[index,'timeoverlap'] = 0;243 #for suspected innfection time + 2 hour244 Range='';245 r1='';246 r2='';247 Range = namedtuple('Range', ['start', 'end'])248 #datetime.timedelta(hours=24)249 r1 = Range(start= datetime.datetime.strptime(row['suspected_infection_time_poe'],'%Y-%m-%d %H:%M:%S'), end= (datetime.datetime.strptime(row['suspected_infection_time_poe'],'%Y-%m-%d %H:%M:%S') + datetime.timedelta(hours=2) ) )250 r2 = Range(start= record_starttime, end = record_endtime)251 latest_start = max(r1.start, r2.start)252 earliest_end = min(r1.end, r2.end)253 print('latest_start: ' ,latest_start )254 print('earliest_end: ' ,earliest_end )255 256 if (r1.start <= r2.end) and (r2.start <= r1.end) :257 print('SI PLUS 2 : RECORD EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])258 delta = 0;259 delta = ((earliest_end - latest_start).seconds )/60260 overlap_duration_2 = overlap_duration_2 + ',' + str(delta)261 df_waveform_exists.loc[index,'SI+2_timeoverlap_exists'] = 1;262 count_overlaping_records_2 = count_overlaping_records_2 +1 ;263 #todo : adding new dataframe, exatracting required signals, computing avergage for per sminute values in case of per second sampling frequency264 else: 265 print('SI PLUS 2 : RECORD DOES NOT EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])266 #df_csvdata.loc[index,'timeoverlap'] = 0;267 268 #for suspected innfection time + 3 hour269 Range='';270 r1='';271 r2='';272 Range = namedtuple('Range', ['start', 'end'])273 r1 = Range(start= datetime.datetime.strptime(row['suspected_infection_time_poe'],'%Y-%m-%d %H:%M:%S'), end= (datetime.datetime.strptime(row['suspected_infection_time_poe'],'%Y-%m-%d %H:%M:%S') + datetime.timedelta(hours=3) ) )274 r2 = Range(start= record_starttime, end = record_endtime)275 latest_start = max(r1.start, r2.start)276 earliest_end = min(r1.end, r2.end)277 print('latest_start: ' ,latest_start )278 print('earliest_end: ' ,earliest_end )279 280 if (r1.start <= r2.end) and (r2.start <= r1.end) :281 print('SI PLUS 3 : RECORD EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])282 delta = 0 ;283 delta = ((earliest_end - latest_start).seconds )/60284 overlap_duration_3 = overlap_duration_3 + ',' + str(delta)285 df_waveform_exists.loc[index,'SI+3_timeoverlap_exists'] = 1;286 count_overlaping_records_3 = count_overlaping_records_3 +1 ;287 #todo : adding new dataframe, exatracting required signals, computing avergage for per sminute values in case of per second sampling frequency288 else: 289 print('SI PLUS 3 : RECORD DOES NOT EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])290 #df_csvdata.loc[index,'timeoverlap'] = 0;291 292 #for suspected innfection time + 4 hour293 Range='';294 r1='';295 r2='';296 Range = namedtuple('Range', ['start', 'end'])297 r1 = Range(start= datetime.datetime.strptime(row['suspected_infection_time_poe'],'%Y-%m-%d %H:%M:%S'), end= (datetime.datetime.strptime(row['suspected_infection_time_poe'],'%Y-%m-%d %H:%M:%S') + datetime.timedelta(hours=4) ) )298 r2 = Range(start= record_starttime, end = record_endtime)299 latest_start = max(r1.start, r2.start)300 earliest_end = min(r1.end, r2.end)301 print('latest_start: ' ,latest_start )302 print('earliest_end: ' ,earliest_end )303 304 if (r1.start <= r2.end) and (r2.start <= r1.end) :305 print('SI PLUS 4 : RECORD EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])306 delta = 0;307 delta = ((earliest_end - latest_start).seconds )/60308 overlap_duration_4 = overlap_duration_4 + ',' + str(delta)309 df_waveform_exists.loc[index,'SI+4_timeoverlap_exists'] = 1;310 count_overlaping_records_4 = count_overlaping_records_4 +1 ;311 #todo : adding new dataframe, exatracting required signals, computing avergage for per sminute values in case of per second sampling frequency312 else: 313 print('SI PLUS 4 : RECORD DOES NOT EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])314 #df_csvdata.loc[index,'timeoverlap'] = 0;315 #########################################################################################################################316 if str(row['sepsis_onsettime'] ) != 'nan' : 317 #for sepsis onset time + 1 hour318 Range='';319 r1='';320 r2='';321 Range = namedtuple('Range', ['start', 'end'])322 r1 = Range(start= datetime.datetime.strptime(row['sepsis_onsettime'],'%Y-%m-%d %H:%M:%S'), end= (datetime.datetime.strptime(row['sepsis_onsettime'],'%Y-%m-%d %H:%M:%S') + datetime.timedelta(hours=1) ) )323 r2 = Range(start= record_starttime, end = record_endtime)324 latest_start = max(r1.start, r2.start)325 earliest_end = min(r1.end, r2.end)326 print('latest_start: ' ,latest_start )327 print('earliest_end: ' ,earliest_end )328 329 if (r1.start <= r2.end) and (r2.start <= r1.end) :330 print('SO PLUS 1 : RECORD EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])331 delta=0;332 delta = ((earliest_end - latest_start).seconds )/60 333 overlap_sepsis_plus_duration_1 = overlap_sepsis_plus_duration_1 + ',' + str(delta)334 df_waveform_exists.loc[index,'SEPSIS+1_timeoverlap_exists'] = 1;335 count_overlaping_records_sepsis_plus_1 = count_overlaping_records_sepsis_plus_1 +1 ;336 #todo : adding new dataframe, exatracting required signals, computing avergage for per sminute values in case of per second sampling frequency337 else: 338 print('SO PLUS 1 : RECORD DOES NOT EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])339 #df_csvdata.loc[index,'timeoverlap'] = 0;340 #for sepsis_onsettime + 2 hour341 Range='';342 r1='';343 r2='';344 Range = namedtuple('Range', ['start', 'end'])345 #datetime.timedelta(hours=24)346 r1 = Range(start= datetime.datetime.strptime(row['sepsis_onsettime'],'%Y-%m-%d %H:%M:%S'), end= (datetime.datetime.strptime(row['sepsis_onsettime'],'%Y-%m-%d %H:%M:%S') + datetime.timedelta(hours=2) ) )347 r2 = Range(start= record_starttime, end = record_endtime)348 latest_start = max(r1.start, r2.start)349 earliest_end = min(r1.end, r2.end)350 print('latest_start: ' ,latest_start )351 print('earliest_end: ' ,earliest_end )352 353 if (r1.start <= r2.end) and (r2.start <= r1.end) :354 print('SO PLUS 2 : RECORD EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])355 delta = 0;356 delta = ((earliest_end - latest_start).seconds )/60357 overlap_sepsis_plus_duration_2 = overlap_sepsis_plus_duration_2 + ',' + str(delta)358 df_waveform_exists.loc[index,'SEPSIS+2_timeoverlap_exists'] = 1;359 count_overlaping_records_sepsis_plus_2 = count_overlaping_records_sepsis_plus_2 +1 ;360 #todo : adding new dataframe, exatracting required signals, computing avergage for per sminute values in case of per second sampling frequency361 else: 362 print('SO PLUS 2 : RECORD DOES NOT EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])363 #df_csvdata.loc[index,'timeoverlap'] = 0;364 365 #for sepsis_onsettime+ 3 hour366 Range='';367 r1='';368 r2='';369 Range = namedtuple('Range', ['start', 'end'])370 r1 = Range(start= datetime.datetime.strptime(row['sepsis_onsettime'],'%Y-%m-%d %H:%M:%S'), end= (datetime.datetime.strptime(row['sepsis_onsettime'],'%Y-%m-%d %H:%M:%S') + datetime.timedelta(hours=3) ) )371 r2 = Range(start= record_starttime, end = record_endtime)372 latest_start = max(r1.start, r2.start)373 earliest_end = min(r1.end, r2.end)374 print('latest_start: ' ,latest_start )375 print('earliest_end: ' ,earliest_end )376 377 if (r1.start <= r2.end) and (r2.start <= r1.end) :378 print('SO PLUS 3 : RECORD EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])379 delta = 0 ;380 delta = ((earliest_end - latest_start).seconds )/60381 overlap_sepsis_plus_duration_3 = overlap_sepsis_plus_duration_3 + ',' + str(delta)382 df_waveform_exists.loc[index,'SEPSIS+3_timeoverlap_exists'] = 1;383 count_overlaping_records_sepsis_plus_3 = count_overlaping_records_sepsis_plus_3 +1 ;384 #todo : adding new dataframe, exatracting required signals, computing avergage for per sminute values in case of per second sampling frequency385 else: 386 print('SO PLUS 3 : RECORD DOES NOT EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])387 #df_csvdata.loc[index,'timeoverlap'] = 0;388 389 #for ssepsis_onsettime + 4 hour390 Range='';391 r1='';392 r2='';393 Range = namedtuple('Range', ['start', 'end'])394 r1 = Range(start= datetime.datetime.strptime(row['sepsis_onsettime'],'%Y-%m-%d %H:%M:%S'), end= (datetime.datetime.strptime(row['sepsis_onsettime'],'%Y-%m-%d %H:%M:%S') + datetime.timedelta(hours=4) ) )395 r2 = Range(start= record_starttime, end = record_endtime)396 latest_start = max(r1.start, r2.start)397 earliest_end = min(r1.end, r2.end)398 print('latest_start: ' ,latest_start )399 print('earliest_end: ' ,earliest_end )400 401 if (r1.start <= r2.end) and (r2.start <= r1.end) :402 print('SO PLUS 4 : RECORD EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])403 delta = 0;404 delta = ((earliest_end - latest_start).seconds )/60405 overlap_sepsis_plus_duration_4 = overlap_sepsis_plus_duration_4 + ',' + str(delta)406 df_waveform_exists.loc[index,'SEPSIS+4_timeoverlap_exists'] = 1;407 count_overlaping_records_sepsis_plus_4 = count_overlaping_records_sepsis_plus_4 +1 ;408 #todo : adding new dataframe, exatracting required signals, computing avergage for per sminute values in case of per second sampling frequency409 else: 410 print('SO PLUS 4 : RECORD DOES NOT EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])411 #df_csvdata.loc[index,'timeoverlap'] = 0;412#############################################################################413 #for sepsis onset time - 1 hour414 Range='';415 r1='';416 r2='';417 Range = namedtuple('Range', ['start', 'end'])418 r1 = Range(start= (datetime.datetime.strptime(row['sepsis_onsettime'],'%Y-%m-%d %H:%M:%S') - datetime.timedelta(hours=1) ) , end= datetime.datetime.strptime(row['sepsis_onsettime'],'%Y-%m-%d %H:%M:%S') )419 r2 = Range(start= record_starttime, end = record_endtime)420 latest_start = max(r1.start, r2.start)421 earliest_end = min(r1.end, r2.end)422 print('latest_start: ' ,latest_start )423 print('earliest_end: ' ,earliest_end )424 425 if (r1.start <= r2.end) and (r2.start <= r1.end) :426 print('SO minus 1 : RECORD EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])427 delta=0;428 delta = ((earliest_end - latest_start).seconds )/60 429 overlap_sepsis_minus_duration_1 = overlap_sepsis_minus_duration_1 + ',' + str(delta)430 df_waveform_exists.loc[index,'SEPSIS-1_timeoverlap_exists'] = 1;431 count_overlaping_records_sepsis_minus_1 = count_overlaping_records_sepsis_minus_1 + 1 ;432 #todo : adding new dataframe, exatracting required signals, computing avergage for per sminute values in case of per second sampling frequency433 else: 434 print('SO minus 1 : RECORD DOES NOT EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])435 #df_csvdata.loc[index,'timeoverlap'] = 0;436 #for sepsis_onsettime - 2 hour437 Range='';438 r1='';439 r2='';440 Range = namedtuple('Range', ['start', 'end'])441 #datetime.timedelta(hours=24)442 r1 = Range(start= (datetime.datetime.strptime(row['sepsis_onsettime'],'%Y-%m-%d %H:%M:%S') - datetime.timedelta(hours=2) ) , end= datetime.datetime.strptime(row['sepsis_onsettime'],'%Y-%m-%d %H:%M:%S') )443 r2 = Range(start= record_starttime, end = record_endtime)444 latest_start = max(r1.start, r2.start)445 earliest_end = min(r1.end, r2.end)446 print('latest_start: ' ,latest_start )447 print('earliest_end: ' ,earliest_end )448 449 if (r1.start <= r2.end) and (r2.start <= r1.end) :450 print('SO minus 2 : RECORD EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])451 delta = 0;452 delta = ((earliest_end - latest_start).seconds )/60453 overlap_sepsis_minus_duration_2 = overlap_sepsis_minus_duration_2 + ',' + str(delta)454 df_waveform_exists.loc[index,'SEPSIS-2_timeoverlap_exists'] = 1;455 count_overlaping_records_sepsis_minus_2 = count_overlaping_records_sepsis_minus_2 +1 ;456 #todo : adding new dataframe, exatracting required signals, computing avergage for per sminute values in case of per second sampling frequency457 else: 458 print('SO minus 2 : RECORD DOES NOT EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])459 #df_csvdata.loc[index,'timeoverlap'] = 0;460 461 #for sepsis_onsettime - 3 hour462 Range='';463 r1='';464 r2='';465 Range = namedtuple('Range', ['start', 'end'])466 r1 = Range(start= (datetime.datetime.strptime(row['sepsis_onsettime'],'%Y-%m-%d %H:%M:%S') - datetime.timedelta(hours=3) ) , end= datetime.datetime.strptime(row['sepsis_onsettime'],'%Y-%m-%d %H:%M:%S') )467 r2 = Range(start= record_starttime, end = record_endtime)468 latest_start = max(r1.start, r2.start)469 earliest_end = min(r1.end, r2.end)470 print('latest_start: ' ,latest_start )471 print('earliest_end: ' ,earliest_end )472 473 if (r1.start <= r2.end) and (r2.start <= r1.end) :474 print('SO minus 3 : RECORD EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])475 delta = 0 ;476 delta = ((earliest_end - latest_start).seconds )/60477 overlap_sepsis_minus_duration_3 = overlap_sepsis_minus_duration_3 + ',' + str(delta)478 df_waveform_exists.loc[index,'SEPSIS-3_timeoverlap_exists'] = 1;479 count_overlaping_records_sepsis_minus_3 = count_overlaping_records_sepsis_minus_3 +1 ;480 #todo : adding new dataframe, exatracting required signals, computing avergage for per sminute values in case of per second sampling frequency481 else: 482 print('SO minus 3 : RECORD DOES NOT EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])483 #df_csvdata.loc[index,'timeoverlap'] = 0;484 485 #for ssepsis_onsettime - 4 hour486 Range='';487 r1='';488 r2='';489 Range = namedtuple('Range', ['start', 'end'])490 r1 = Range(start= (datetime.datetime.strptime(row['sepsis_onsettime'],'%Y-%m-%d %H:%M:%S') - datetime.timedelta(hours=4) ) , end= datetime.datetime.strptime(row['sepsis_onsettime'],'%Y-%m-%d %H:%M:%S') )491 r2 = Range(start= record_starttime, end = record_endtime)492 latest_start = max(r1.start, r2.start)493 earliest_end = min(r1.end, r2.end)494 print('latest_start: ' ,latest_start )495 print('earliest_end: ' ,earliest_end )496 497 if (r1.start <= r2.end) and (r2.start <= r1.end) :498 print('SO minus 4 : RECORD EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])499 delta = 0;500 delta = ((earliest_end - latest_start).seconds )/60501 overlap_sepsis_minus_duration_4 = overlap_sepsis_minus_duration_4 + ',' + str(delta)502 df_waveform_exists.loc[index,'SEPSIS-4_timeoverlap_exists'] = 1;503 count_overlaping_records_sepsis_minus_4 = count_overlaping_records_sepsis_minus_4 +1 ;504 #todo : adding new dataframe, exatracting required signals, computing avergage for per sminute values in case of per second sampling frequency505 else: 506 print('SO minus 4 : RECORD DOES NOT EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])507 #df_csvdata.loc[index,'timeoverlap'] = 0;508 509 #################################################################################################################510 if str(row['sepstic_shock_onsettime']) != 'nan' : 511 #for ssepstic_shock_onsettime + 1 hour512 Range='';513 r1='';514 r2='';515 Range = namedtuple('Range', ['start', 'end'])516 r1 = Range(start= datetime.datetime.strptime(row['sepstic_shock_onsettime'],'%Y-%m-%d %H:%M:%S'), end= (datetime.datetime.strptime(row['sepstic_shock_onsettime'],'%Y-%m-%d %H:%M:%S') + datetime.timedelta(hours=1) ) )517 r2 = Range(start= record_starttime, end = record_endtime)518 latest_start = max(r1.start, r2.start)519 earliest_end = min(r1.end, r2.end)520 print('latest_start: ' ,latest_start )521 print('earliest_end: ' ,earliest_end )522 523 if (r1.start <= r2.end) and (r2.start <= r1.end) :524 print('SHOCK PLUS 1 : RECORD EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])525 delta=0;526 delta = ((earliest_end - latest_start).seconds )/60 527 overlap_shock_plus_duration_1 = overlap_shock_plus_duration_1 + ',' + str(delta)528 df_waveform_exists.loc[index,'SHOCK+1_timeoverlap_exists'] = 1;529 count_overlaping_records_shock_plus_1 = count_overlaping_records_shock_plus_1 +1 ;530 #todo : adding new dataframe, exatracting required signals, computing avergage for per sminute values in case of per second sampling frequency531 else: 532 print('SHOCK PLUS 1 : RECORD DOES NOT EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])533 #df_csvdata.loc[index,'timeoverlap'] = 0;534 #for sepstic_shock_onsettime + 2 hour535 Range='';536 r1='';537 r2='';538 Range = namedtuple('Range', ['start', 'end'])539 #datetime.timedelta(hours=24)540 r1 = Range(start= datetime.datetime.strptime(row['sepstic_shock_onsettime'],'%Y-%m-%d %H:%M:%S'), end= (datetime.datetime.strptime(row['sepstic_shock_onsettime'],'%Y-%m-%d %H:%M:%S') + datetime.timedelta(hours=2) ) )541 r2 = Range(start= record_starttime, end = record_endtime)542 latest_start = max(r1.start, r2.start)543 earliest_end = min(r1.end, r2.end)544 print('latest_start: ' ,latest_start )545 print('earliest_end: ' ,earliest_end )546 547 if (r1.start <= r2.end) and (r2.start <= r1.end) :548 print('SHOCK PLUS 2 : RECORD EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])549 delta = 0;550 delta = ((earliest_end - latest_start).seconds )/60551 overlap_shock_plus_duration_2 = overlap_shock_plus_duration_2 + ',' + str(delta)552 df_waveform_exists.loc[index,'SHOCK+2_timeoverlap_exists'] = 1;553 count_overlaping_records_shock_plus_2 = count_overlaping_records_shock_plus_2 +1 ;554 #todo : adding new dataframe, exatracting required signals, computing avergage for per sminute values in case of per second sampling frequency555 else: 556 print('SHOCK PLUS 2 : RECORD DOES NOT EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])557 #df_csvdata.loc[index,'timeoverlap'] = 0;558 559 #for sepstic_shock_onsettime + 3 hour560 Range='';561 r1='';562 r2='';563 Range = namedtuple('Range', ['start', 'end'])564 r1 = Range(start= datetime.datetime.strptime(row['sepstic_shock_onsettime'],'%Y-%m-%d %H:%M:%S'), end= (datetime.datetime.strptime(row['sepstic_shock_onsettime'],'%Y-%m-%d %H:%M:%S') + datetime.timedelta(hours=3) ) )565 r2 = Range(start= record_starttime, end = record_endtime)566 latest_start = max(r1.start, r2.start)567 earliest_end = min(r1.end, r2.end)568 print('latest_start: ' ,latest_start )569 print('earliest_end: ' ,earliest_end )570 571 if (r1.start <= r2.end) and (r2.start <= r1.end) :572 print('SHOCK PLUS 3 : RECORD EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])573 delta = 0 ;574 delta = ((earliest_end - latest_start).seconds )/60575 overlap_shock_plus_duration_3 = overlap_shock_plus_duration_3 + ',' + str(delta)576 df_waveform_exists.loc[index,'SHOCK+3_timeoverlap_exists'] = 1;577 count_overlaping_records_shock_plus_3 = count_overlaping_records_shock_plus_3 +1 ;578 #todo : adding new dataframe, exatracting required signals, computing avergage for per sminute values in case of per second sampling frequency579 else: 580 print('SHOCK PLUS 3 : RECORD DOES NOT EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])581 #df_csvdata.loc[index,'timeoverlap'] = 0;582 583 #for sepstic_shock_onsettime + 4 hour584 Range='';585 r1='';586 r2='';587 Range = namedtuple('Range', ['start', 'end'])588 r1 = Range(start= datetime.datetime.strptime(row['sepstic_shock_onsettime'],'%Y-%m-%d %H:%M:%S'), end= (datetime.datetime.strptime(row['sepstic_shock_onsettime'],'%Y-%m-%d %H:%M:%S') + datetime.timedelta(hours=4) ) )589 r2 = Range(start= record_starttime, end = record_endtime)590 latest_start = max(r1.start, r2.start)591 earliest_end = min(r1.end, r2.end)592 print('latest_start: ' ,latest_start )593 print('earliest_end: ' ,earliest_end )594 595 if (r1.start <= r2.end) and (r2.start <= r1.end) :596 print('SHOCK PLUS 4 : RECORD EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])597 delta = 0;598 delta = ((earliest_end - latest_start).seconds )/60599 overlap_shock_plus_duration_4 = overlap_shock_plus_duration_4 + ',' + str(delta)600 df_waveform_exists.loc[index,'SHOCK+4_timeoverlap_exists'] = 1;601 count_overlaping_records_shock_plus_4 = count_overlaping_records_shock_plus_4 +1 ;602 #todo : adding new dataframe, exatracting required signals, computing avergage for per sminute values in case of per second sampling frequency603 else: 604 print('SHOCK PLUS 4 : RECORD DOES NOT EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])605 #df_csvdata.loc[index,'timeoverlap'] = 0;606#############################################################################607 #for sepstic_shock_onsettime - 1 hour608 Range='';609 r1='';610 r2='';611 Range = namedtuple('Range', ['start', 'end'])612 r1 = Range(start= (datetime.datetime.strptime(row['sepstic_shock_onsettime'],'%Y-%m-%d %H:%M:%S') - datetime.timedelta(hours=1) ) , end= datetime.datetime.strptime(row['sepstic_shock_onsettime'],'%Y-%m-%d %H:%M:%S') )613 r2 = Range(start= record_starttime, end = record_endtime)614 latest_start = max(r1.start, r2.start)615 earliest_end = min(r1.end, r2.end)616 print('latest_start: ' ,latest_start )617 print('earliest_end: ' ,earliest_end )618 619 if (r1.start <= r2.end) and (r2.start <= r1.end) :620 print('SHOCK MINUS 1 : RECORD EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])621 delta=0;622 delta = ((earliest_end - latest_start).seconds )/60 623 overlap_shock_minus_duration_1 = overlap_shock_minus_duration_1 + ',' + str(delta)624 df_waveform_exists.loc[index,'SHOCK-1_timeoverlap_exists'] = 1;625 count_overlaping_records_shock_minus_1 = count_overlaping_records_shock_minus_1 + 1 ;626 #todo : adding new dataframe, exatracting required signals, computing avergage for per sminute values in case of per second sampling frequency627 else: 628 print('SHOCK MINUS 1 : RECORD DOES NOT EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])629 #df_csvdata.loc[index,'timeoverlap'] = 0;630 #for sepstic_shock_onsettime - 2 hour631 Range='';632 r1='';633 r2='';634 Range = namedtuple('Range', ['start', 'end'])635 #datetime.timedelta(hours=24)636 r1 = Range(start= (datetime.datetime.strptime(row['sepstic_shock_onsettime'],'%Y-%m-%d %H:%M:%S') - datetime.timedelta(hours=2) ) , end= datetime.datetime.strptime(row['sepstic_shock_onsettime'],'%Y-%m-%d %H:%M:%S') )637 r2 = Range(start= record_starttime, end = record_endtime)638 latest_start = max(r1.start, r2.start)639 earliest_end = min(r1.end, r2.end)640 print('latest_start: ' ,latest_start )641 print('earliest_end: ' ,earliest_end )642 643 if (r1.start <= r2.end) and (r2.start <= r1.end) :644 print('SHOCK MINUS 2 : RECORD EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])645 delta = 0;646 delta = ((earliest_end - latest_start).seconds )/60647 overlap_shock_minus_duration_2 = overlap_shock_minus_duration_2 + ',' + str(delta)648 df_waveform_exists.loc[index,'SHOCK-2_timeoverlap_exists'] = 1;649 count_overlaping_records_shock_minus_2 = count_overlaping_records_shock_minus_2 +1 ;650 #todo : adding new dataframe, exatracting required signals, computing avergage for per sminute values in case of per second sampling frequency651 else: 652 print('SHOCK MINUS 2 : RECORD DOES NOT EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])653 #df_csvdata.loc[index,'timeoverlap'] = 0;654 655 #for sepstic_shock_onsettime - 3 hour656 Range='';657 r1='';658 r2='';659 Range = namedtuple('Range', ['start', 'end'])660 r1 = Range(start= (datetime.datetime.strptime(row['sepstic_shock_onsettime'],'%Y-%m-%d %H:%M:%S') - datetime.timedelta(hours=3) ) , end= datetime.datetime.strptime(row['sepstic_shock_onsettime'],'%Y-%m-%d %H:%M:%S') )661 r2 = Range(start= record_starttime, end = record_endtime)662 latest_start = max(r1.start, r2.start)663 earliest_end = min(r1.end, r2.end)664 print('latest_start: ' ,latest_start )665 print('earliest_end: ' ,earliest_end )666 667 if (r1.start <= r2.end) and (r2.start <= r1.end) :668 print('SHOCK MINUS 3 : RECORD EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])669 delta = 0 ;670 delta = ((earliest_end - latest_start).seconds )/60671 overlap_shock_minus_duration_3 = overlap_shock_minus_duration_3 + ',' + str(delta)672 df_waveform_exists.loc[index,'SHOCK-3_timeoverlap_exists'] = 1;673 count_overlaping_records_shock_minus_3 = count_overlaping_records_shock_minus_3 +1 ;674 #todo : adding new dataframe, exatracting required signals, computing avergage for per sminute values in case of per second sampling frequency675 else: 676 print('SHOCK MINUS 3 : RECORD DOES NOT EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])677 #df_csvdata.loc[index,'timeoverlap'] = 0;678 679 #for sepstic_shock_onsettime - 4 hour680 Range='';681 r1='';682 r2='';683 Range = namedtuple('Range', ['start', 'end'])684 r1 = Range(start= (datetime.datetime.strptime(row['sepstic_shock_onsettime'],'%Y-%m-%d %H:%M:%S') - datetime.timedelta(hours=4) ) , end= datetime.datetime.strptime(row['sepstic_shock_onsettime'],'%Y-%m-%d %H:%M:%S') )685 r2 = Range(start= record_starttime, end = record_endtime)686 latest_start = max(r1.start, r2.start)687 earliest_end = min(r1.end, r2.end)688 print('latest_start: ' ,latest_start )689 print('earliest_end: ' ,earliest_end )690 691 if (r1.start <= r2.end) and (r2.start <= r1.end) :692 print('SHOCK MINUS 4 : RECORD EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])693 delta = 0;694 delta = ((earliest_end - latest_start).seconds )/60695 overlap_shock_minus_duration_4 = overlap_shock_minus_duration_4 + ',' + str(delta)696 df_waveform_exists.loc[index,'SHOCK-4_timeoverlap_exists'] = 1;697 count_overlaping_records_shock_minus_4 = count_overlaping_records_shock_minus_4 +1 ;698 #todo : adding new dataframe, exatracting required signals, computing avergage for per sminute values in case of per second sampling frequency699 else: 700 print('SHOCK MINUS 4 : RECORD DOES NOT EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])701 #df_csvdata.loc[index,'timeoverlap'] = 0;702 703 ###########################################################704 #for ICU OUTTIME - 1 hour705 Range='';706 r1='';707 r2='';708 Range = namedtuple('Range', ['start', 'end'])709 r1 = Range(start= (datetime.datetime.strptime(row['outtime'],'%Y-%m-%d %H:%M:%S') - datetime.timedelta(hours=1) ) , end = datetime.datetime.strptime(row['outtime'],'%Y-%m-%d %H:%M:%S') )710 r2 = Range(start= record_starttime, end = record_endtime)711 latest_start = max(r1.start, r2.start)712 earliest_end = min(r1.end, r2.end)713 print('latest_start: ' ,latest_start )714 print('earliest_end: ' ,earliest_end )715 716 if (r1.start <= r2.end) and (r2.start <= r1.end) :717 print('OUT - 1 : RECORD EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])718 delta=0;719 delta = ((earliest_end - latest_start).seconds )/60 720 overlap_duration_outtime_1 = overlap_duration_outtime_1 + ',' + str(delta)721 df_waveform_exists.loc[index,'OUT-1_timeoverlap_exists'] = 1;722 count_overlaping_records_outtime_1 = count_overlaping_records_outtime_1 +1 ;723 #todo : adding new dataframe, exatracting required signals, computing avergage for per sminute values in case of per second sampling frequency724 else: 725 print('OUT - 1 : RECORD DOES NOT EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])726 #df_csvdata.loc[index,'timeoverlap'] = 0;727 #for ICU OUTTIME - 2 hourS728 Range='';729 r1='';730 r2='';731 Range = namedtuple('Range', ['start', 'end'])732 #datetime.timedelta(hours=24)733 r1 = Range(start= (datetime.datetime.strptime(row['outtime'],'%Y-%m-%d %H:%M:%S') - datetime.timedelta(hours=2) ) , end = datetime.datetime.strptime(row['outtime'],'%Y-%m-%d %H:%M:%S') )734 r2 = Range(start= record_starttime, end = record_endtime)735 latest_start = max(r1.start, r2.start)736 earliest_end = min(r1.end, r2.end)737 print('latest_start: ' ,latest_start )738 print('earliest_end: ' ,earliest_end )739 740 if (r1.start <= r2.end) and (r2.start <= r1.end) :741 print('OUT - 2 : RECORD EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])742 delta = 0;743 delta = ((earliest_end - latest_start).seconds )/60744 overlap_duration_outtime_2 = overlap_duration_outtime_2 + ',' + str(delta)745 df_waveform_exists.loc[index,'OUT-2_timeoverlap_exists'] = 1;746 count_overlaping_records_outtime_2 = count_overlaping_records_outtime_2 +1 ;747 #todo : adding new dataframe, exatracting required signals, computing avergage for per sminute values in case of per second sampling frequency748 else: 749 print('OUT - 2 : RECORD DOES NOT EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])750 #df_csvdata.loc[index,'timeoverlap'] = 0;751 752 #for ICU OUTTIME - 3 HOURS753 Range='';754 r1='';755 r2='';756 Range = namedtuple('Range', ['start', 'end'])757 print('record icu time - 3: ', (datetime.datetime.strptime(row['outtime'],'%Y-%m-%d %H:%M:%S') - datetime.timedelta(hours=3) ))758 r1 = Range(start= (datetime.datetime.strptime(row['outtime'],'%Y-%m-%d %H:%M:%S') - datetime.timedelta(hours=3) ) , end = datetime.datetime.strptime(row['outtime'],'%Y-%m-%d %H:%M:%S') )759 r2 = Range(start= record_starttime, end = record_endtime)760 latest_start = max(r1.start, r2.start)761 earliest_end = min(r1.end, r2.end)762 print('latest_start: ' ,latest_start )763 print('earliest_end: ' ,earliest_end )764 765 if (r1.start <= r2.end) and (r2.start <= r1.end) :766 print('OUT - 3 : RECORD EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])767 delta = 0 ;768 delta = ((earliest_end - latest_start).seconds )/60769 overlap_duration_outtime_3 = overlap_duration_outtime_3 + ',' + str(delta)770 df_waveform_exists.loc[index,'OUT-3_timeoverlap_exists'] = 1;771 count_overlaping_records_outtime_3 = count_overlaping_records_outtime_3 +1 ;772 #todo : adding new dataframe, exatracting required signals, computing avergage for per sminute values in case of per second sampling frequency773 else: 774 print('OUT - 3 : RECORD DOES NOT EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])775 #df_csvdata.loc[index,'timeoverlap'] = 0;776 777 #for OUT - 4 hour778 Range='';779 r1='';780 r2='';781 Range = namedtuple('Range', ['start', 'end'])782 print('record icu time - 4: ', (datetime.datetime.strptime(row['outtime'],'%Y-%m-%d %H:%M:%S') - datetime.timedelta(hours=4) ))783 r1 = Range(start= (datetime.datetime.strptime(row['outtime'],'%Y-%m-%d %H:%M:%S') - datetime.timedelta(hours=4) ) , end = datetime.datetime.strptime(row['outtime'],'%Y-%m-%d %H:%M:%S') )784 r2 = Range(start= record_starttime, end = record_endtime)785 latest_start = max(r1.start, r2.start)786 earliest_end = min(r1.end, r2.end)787 print('latest_start: ' ,latest_start )788 print('earliest_end: ' ,earliest_end )789 790 if (r1.start <= r2.end) and (r2.start <= r1.end) :791 print('OUT - 4 : RECORD EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])792 delta = 0;793 delta = ((earliest_end - latest_start).seconds )/60794 overlap_duration_outtime_4 = overlap_duration_outtime_4 + ',' + str(delta)795 df_waveform_exists.loc[index,'OUT-4_timeoverlap_exists'] = 1;796 count_overlaping_records_outtime_4 = count_overlaping_records_outtime_4 +1 ;797 #todo : adding new dataframe, exatracting required signals, computing avergage for per sminute values in case of per second sampling frequency798 else: 799 print('OUT - 4 : RECORD DOES NOT EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED AND TIME OVERLAP: ', row['subject_id'])800 #df_csvdata.loc[index,'timeoverlap'] = 0;801 else:802 #df_csvdata.loc[index,'sig_exists'] = 0 ;803 print('DO NOT SELECT THIS RECORD', row['subject_id'])804 except ValueError:805 print('Error occured while reading waveform: ', record);806 df_waveform_exists.loc[index,'SI+1_Number_of_overlaping_records'] = count_overlaping_records_1;807 df_waveform_exists.loc[index,'SI+1_overlap_duration'] = overlap_duration_1 # overlap between suspected infection + 1 hour and record time808 df_waveform_exists.loc[index,'SI+2_Number_of_overlaping_records'] = count_overlaping_records_2;809 df_waveform_exists.loc[index,'SI+2_overlap_duration'] = overlap_duration_2 # overlap between suspected infection + 1 hour and record time810 df_waveform_exists.loc[index,'SI+3_Number_of_overlaping_records'] = count_overlaping_records_3;811 df_waveform_exists.loc[index,'SI+3_overlap_duration'] = overlap_duration_3 # overlap between suspected infection + 1 hour and record time812 df_waveform_exists.loc[index,'SI+4_Number_of_overlaping_records'] = count_overlaping_records_4;813 df_waveform_exists.loc[index,'SI+4_overlap_duration'] = overlap_duration_4 # overlap between suspected infection + 1 hour and record time814##################################################################################################################################################815 df_waveform_exists.loc[index,'OUT-1_Number_of_overlaping_records'] = count_overlaping_records_outtime_1;816 df_waveform_exists.loc[index,'OUT-1_overlap_duration'] = overlap_duration_outtime_1 # overlap between suspected infection + 1 hour and record time817 df_waveform_exists.loc[index,'OUT-2_Number_of_overlaping_records'] = count_overlaping_records_outtime_2;818 df_waveform_exists.loc[index,'OUT-2_overlap_duration'] = overlap_duration_outtime_2 # overlap between suspected infection + 1 hour and record time819 df_waveform_exists.loc[index,'OUT-3_Number_of_overlaping_records'] = count_overlaping_records_outtime_3;820 df_waveform_exists.loc[index,'OUT-3_overlap_duration'] = overlap_duration_outtime_3 # overlap between suspected infection + 1 hour and record time821 df_waveform_exists.loc[index,'OUT-4_Number_of_overlaping_records'] = count_overlaping_records_outtime_4;822 df_waveform_exists.loc[index,'OUT-4_overlap_duration'] = overlap_duration_outtime_4 # overlap between suspected infection + 1 hour and record time823##########################################. start 824 df_waveform_exists.loc[index,'SEPSIS+1_Number_of_overlaping_records'] = count_overlaping_records_sepsis_plus_1;825 df_waveform_exists.loc[index,'SEPSIS+1_overlap_duration'] = overlap_sepsis_plus_duration_1 # overlap between suspected infection + 1 hour and record time826 df_waveform_exists.loc[index,'SEPSIS+2_Number_of_overlaping_records'] = count_overlaping_records_sepsis_plus_2;827 df_waveform_exists.loc[index,'SEPSIS+2_overlap_duration'] = overlap_sepsis_plus_duration_2 # overlap between suspected infection + 1 hour and record time828 df_waveform_exists.loc[index,'SEPSIS+3_Number_of_overlaping_records'] = count_overlaping_records_sepsis_plus_3;829 df_waveform_exists.loc[index,'SEPSIS+3_overlap_duration'] = overlap_sepsis_plus_duration_3 # overlap between suspected infection + 1 hour and record time830 df_waveform_exists.loc[index,'SEPSIS+4_Number_of_overlaping_records'] = count_overlaping_records_sepsis_plus_4;831 df_waveform_exists.loc[index,'SEPSIS+4_overlap_duration'] = overlap_sepsis_plus_duration_4 # overlap between suspected infection + 1 hour and record time832 ###833 df_waveform_exists.loc[index,'SEPSIS-1_Number_of_overlaping_records'] = count_overlaping_records_sepsis_minus_1; #TODO834 df_waveform_exists.loc[index,'SEPSIS-1_overlap_duration'] = overlap_sepsis_minus_duration_1 # overlap between suspected infection + 1 hour and record time835 df_waveform_exists.loc[index,'SEPSIS-2_Number_of_overlaping_records'] = count_overlaping_records_sepsis_minus_2;836 df_waveform_exists.loc[index,'SEPSIS-2_overlap_duration'] = overlap_sepsis_minus_duration_2 # overlap between suspected infection + 1 hour and record time837 df_waveform_exists.loc[index,'SEPSIS-3_Number_of_overlaping_records'] = count_overlaping_records_sepsis_minus_3;838 df_waveform_exists.loc[index,'SEPSIS-3_overlap_duration'] = overlap_sepsis_minus_duration_3 # overlap between suspected infection + 1 hour and record time839 df_waveform_exists.loc[index,'SEPSIS-4_Number_of_overlaping_records'] = count_overlaping_records_sepsis_minus_4;840 df_waveform_exists.loc[index,'SEPSIS-4_overlap_duration'] = overlap_sepsis_minus_duration_4 # overlap between suspected infection + 1 hour and record time841 ####842 df_waveform_exists.loc[index,'SHOCK+1_Number_of_overlaping_records'] = count_overlaping_records_shock_plus_1;843 df_waveform_exists.loc[index,'SHOCK+1_overlap_duration'] = overlap_shock_plus_duration_1 # overlap between suspected infection + 1 hour and record time844 df_waveform_exists.loc[index,'SHOCK+2_Number_of_overlaping_records'] = count_overlaping_records_shock_plus_2;845 df_waveform_exists.loc[index,'SHOCK+2_overlap_duration'] = overlap_shock_plus_duration_2 # overlap between suspected infection + 1 hour and record time846 df_waveform_exists.loc[index,'SHOCK+3_Number_of_overlaping_records'] = count_overlaping_records_shock_plus_3;847 df_waveform_exists.loc[index,'SHOCK+3_overlap_duration'] = overlap_shock_plus_duration_3 # overlap between suspected infection + 1 hour and record time848 df_waveform_exists.loc[index,'SHOCK+4_Number_of_overlaping_records'] = count_overlaping_records_shock_plus_4;849 df_waveform_exists.loc[index,'SHOCK+4_overlap_duration'] = overlap_shock_plus_duration_4 # overlap between suspected infection + 1 hour and record time850 #####851 df_waveform_exists.loc[index,'SHOCK-1_Number_of_overlaping_records'] = count_overlaping_records_shock_minus_1;852 df_waveform_exists.loc[index,'SHOCK-1_overlap_duration'] = overlap_shock_minus_duration_1 # overlap between suspected infection + 1 hour and record time853 df_waveform_exists.loc[index,'SHOCK-2_Number_of_overlaping_records'] = count_overlaping_records_shock_minus_2;854 df_waveform_exists.loc[index,'SHOCK-2_overlap_duration'] = overlap_shock_minus_duration_2 # overlap between suspected infection + 1 hour and record time855 df_waveform_exists.loc[index,'SHOCK-3_Number_of_overlaping_records'] = count_overlaping_records_shock_minus_3;856 df_waveform_exists.loc[index,'SHOCK-3_overlap_duration'] = overlap_shock_minus_duration_3 # overlap between suspected infection + 1 hour and record time857 df_waveform_exists.loc[index,'SHOCK-4_Number_of_overlaping_records'] = count_overlaping_records_shock_minus_4;858 df_waveform_exists.loc[index,'SHOCK-4_overlap_duration'] = overlap_shock_minus_duration_4 # overlap between suspected infection + 1 hour and record time859print(df_waveform_exists.shape)860df_waveform_exists.to_csv ('Check_TSdata_exists_susInfect_icuout_sepsis_shock_onsettime.csv', sep=',', index = False, header=True);861#df_with_6_Signals.drop(df_with_6_Signals.index, inplace=True)862df_with_6_Signals = df_waveform_exists[(df_waveform_exists['6_sig_exists']==1) & (df_waveform_exists['sepsis_onsettime'].notnull())] # add and sepsis onset time is not null863print(df_with_6_Signals.shape)864print('---------------------for Suspected infection + 1,2,3,4------------------------')865print( 'Number of patients with recording for Suspected infection + 1 hour : ' , df_with_6_Signals[df_with_6_Signals['SI+1_timeoverlap_exists']==1].shape[0] )866print( 'Number of patients with recording for Suspected infection + 2 hours : ' , df_with_6_Signals[df_with_6_Signals['SI+2_timeoverlap_exists']==1].shape[0] )867print( 'Number of patients with recording for Suspected infection + 3 hours : ' , df_with_6_Signals[df_with_6_Signals['SI+3_timeoverlap_exists']==1].shape[0] )868print( 'Number of patients with recording for Suspected infection + 4 hours : ' , df_with_6_Signals[df_with_6_Signals['SI+4_timeoverlap_exists']==1].shape[0] )869print('---------------------for ICU outtime - 1,2,3,4-------------------------------')870print( 'Number of patients with recording for ICU outtime - 1 hour : ' , df_with_6_Signals[df_with_6_Signals['OUT-1_timeoverlap_exists']==1].shape[0] )871print( 'Number of patients with recording for ICU outtime - 2 hours : ' , df_with_6_Signals[df_with_6_Signals['OUT-2_timeoverlap_exists']==1].shape[0] )872print( 'Number of patients with recording for ICU outtime - 3 hours : ' , df_with_6_Signals[df_with_6_Signals['OUT-3_timeoverlap_exists']==1].shape[0] )873print( 'Number of patients with recording for ICU outtime - 4 hours : ' , df_with_6_Signals[df_with_6_Signals['OUT-4_timeoverlap_exists']==1].shape[0] )874print('---------------------------for Sepsis onset time + 1,2,3,4----------------------------')875print( 'Number of patients with recording for Sepsis onset time + 1 hour : ' , df_with_6_Signals[df_with_6_Signals['SEPSIS+1_timeoverlap_exists']==1].shape[0] )876print( 'Number of patients with recording for Sepsis onset time + 2 hours : ' , df_with_6_Signals[df_with_6_Signals['SEPSIS+2_timeoverlap_exists']==1].shape[0] )877print( 'Number of patients with recording for Sepsis onset time + 3 hours : ' , df_with_6_Signals[df_with_6_Signals['SEPSIS+3_timeoverlap_exists']==1].shape[0] )878print( 'Number of patients with recording for Sepsis onset time + 4 hours : ' , df_with_6_Signals[df_with_6_Signals['SEPSIS+4_timeoverlap_exists']==1].shape[0] )879print('---------------------for Sepsis onset time - 1,2,3,4------------------------')880print( 'Number of patients with recording for Sepsis onset time - 1 hour : ' , df_with_6_Signals[df_with_6_Signals['SEPSIS-1_timeoverlap_exists']==1].shape[0] )881print( 'Number of patients with recording for Sepsis onset time - 2 hours : ' , df_with_6_Signals[df_with_6_Signals['SEPSIS-2_timeoverlap_exists']==1].shape[0] )882print( 'Number of patients with recording for Sepsis onset time - 3 hours : ' , df_with_6_Signals[df_with_6_Signals['SEPSIS-3_timeoverlap_exists']==1].shape[0] )883print( 'Number of patients with recording for Sepsis onset time - 4 hours : ' , df_with_6_Signals[df_with_6_Signals['SEPSIS-4_timeoverlap_exists']==1].shape[0] )884print('--------------------for Septic shock onset time + 1,2,3,4-------------------------')885print( 'Number of patients with recording for Septic shock onset time + 1 hour : ' , df_with_6_Signals[df_with_6_Signals['SHOCK+1_timeoverlap_exists']==1].shape[0] )886print( 'Number of patients with recording for Septic shock onset time + 2 hours : ' , df_with_6_Signals[df_with_6_Signals['SHOCK+2_timeoverlap_exists']==1].shape[0] )887print( 'Number of patients with recording for Septic shock onset time + 3 hours : ' , df_with_6_Signals[df_with_6_Signals['SHOCK+3_timeoverlap_exists']==1].shape[0] )888print( 'Number of patients with recording for Septic shock onset time + 4 hours : ' , df_with_6_Signals[df_with_6_Signals['SHOCK+4_timeoverlap_exists']==1].shape[0] )889print('---------------------for Septic shock onset time - 1,2,3,4----------------------------')890print( 'Number of patients with recording for Septic shock onset time - 1 hour : ' , df_with_6_Signals[df_with_6_Signals['SHOCK-1_timeoverlap_exists']==1].shape[0] )891print( 'Number of patients with recording for Septic shock onset time - 2 hours : ' , df_with_6_Signals[df_with_6_Signals['SHOCK-2_timeoverlap_exists']==1].shape[0] )892print( 'Number of patients with recording for Septic shock onset time - 3 hours : ' , df_with_6_Signals[df_with_6_Signals['SHOCK-3_timeoverlap_exists']==1].shape[0] )893print( 'Number of patients with recording for Septic shock onset time - 4 hours : ' , df_with_6_Signals[df_with_6_Signals['SHOCK-4_timeoverlap_exists']==1].shape[0] )894#combinations895print('----------------------suspected infection + 1,2,3,4 AND ICU out time - 1,2,3,4--------------')896# suspected infection + 1,2,3,4 AND ICU out time - 1,2,3,4897print( 'Number of patients with recording for Suspected infection + 1, Out - 1 : ' , df_with_6_Signals[(df_with_6_Signals['SI+1_timeoverlap_exists']==1) & ( df_with_6_Signals['OUT-1_timeoverlap_exists']==1) ].shape[0] )898print( 'Number of patients with recording for Suspected infection + 2, Out - 2 : ' , df_with_6_Signals[(df_with_6_Signals['SI+2_timeoverlap_exists']==1) & ( df_with_6_Signals['OUT-2_timeoverlap_exists']==1) ].shape[0] )899print( 'Number of patients with recording for Suspected infection + 3, Out - 3 : ' , df_with_6_Signals[(df_with_6_Signals['SI+3_timeoverlap_exists']==1) & ( df_with_6_Signals['OUT-3_timeoverlap_exists']==1) ].shape[0] )900print( 'Number of patients with recording for Suspected infection + 4, Out - 4 : ' , df_with_6_Signals[(df_with_6_Signals['SI+4_timeoverlap_exists']==1) & ( df_with_6_Signals['OUT-4_timeoverlap_exists']==1) ].shape[0] )901print('-------suspected infection + 1,2,3,4 AND Sepsis onset time - 1,2,3,4 - to predict sepsis------------')902# suspected infection + 1,2,3,4 AND Sepsis onset time - 1,2,3,4 - to predict sepsis.903print( 'Number of patients with recording for Suspected infection + 1, Sepsis onset time - 1 : ' , df_with_6_Signals[(df_with_6_Signals['SI+1_timeoverlap_exists']==1) & ( df_with_6_Signals['SEPSIS-1_timeoverlap_exists']==1) ].shape[0] )904print( 'Number of patients with recording for Suspected infection + 2, Sepsis onset time - 2 : ' , df_with_6_Signals[(df_with_6_Signals['SI+2_timeoverlap_exists']==1) & ( df_with_6_Signals['SEPSIS-2_timeoverlap_exists']==1) ].shape[0] )905print( 'Number of patients with recording for Suspected infection + 3, Sepsis onset time - 3 : ' , df_with_6_Signals[(df_with_6_Signals['SI+3_timeoverlap_exists']==1) & ( df_with_6_Signals['SEPSIS-3_timeoverlap_exists']==1) ].shape[0] )906print( 'Number of patients with recording for Suspected infection + 4, Sepsis onset time - 4 : ' , df_with_6_Signals[(df_with_6_Signals['SI+4_timeoverlap_exists']==1) & ( df_with_6_Signals['SEPSIS-4_timeoverlap_exists']==1) ].shape[0] )907print('---------suspected infection + 1,2,3,4 AND Sepsis onset time - 1,2,3,4 AND sepsis onset time + 1,2,3,4 AND Septic shock -1,2,3,4 - to predict septic shock right from suspected infection------')908# suspected infection + 1,2,3,4 AND Sepsis onset time - 1,2,3,4 AND sepsis onset time + 1,2,3,4 AND Septic shock -1,2,3,4 - to predict septic shock right from suspected infection .909print( 'Number of patients with recording for Suspected infection + 1, Sepsis onset time - 1, sepsis onset time + 1, Septic shock - 1 : ' , df_with_6_Signals[(df_with_6_Signals['SI+1_timeoverlap_exists']==1) & ( df_with_6_Signals['SEPSIS-1_timeoverlap_exists']==1) & ( df_with_6_Signals['SEPSIS+1_timeoverlap_exists']==1) & ( df_with_6_Signals['SHOCK-1_timeoverlap_exists']==1) ].shape[0] )910print( 'Number of patients with recording for Suspected infection + 2, Sepsis onset time - 2, sepsis onset time + 2, Septic shock - 2 : ' , df_with_6_Signals[(df_with_6_Signals['SI+2_timeoverlap_exists']==1) & ( df_with_6_Signals['SEPSIS-2_timeoverlap_exists']==1) & ( df_with_6_Signals['SEPSIS+2_timeoverlap_exists']==1) & ( df_with_6_Signals['SHOCK-2_timeoverlap_exists']==1) ].shape[0] )911print( 'Number of patients with recording for Suspected infection + 3, Sepsis onset time - 3, sepsis onset time + 3, Septic shock - 3 : ' , df_with_6_Signals[(df_with_6_Signals['SI+3_timeoverlap_exists']==1) & ( df_with_6_Signals['SEPSIS-3_timeoverlap_exists']==1) & ( df_with_6_Signals['SEPSIS+3_timeoverlap_exists']==1) & ( df_with_6_Signals['SHOCK-3_timeoverlap_exists']==1) ].shape[0] )912print( 'Number of patients with recording for Suspected infection + 4, Sepsis onset time - 4, sepsis onset time + 4, Septic shock - 4 : ' , df_with_6_Signals[(df_with_6_Signals['SI+4_timeoverlap_exists']==1) & ( df_with_6_Signals['SEPSIS-4_timeoverlap_exists']==1) & ( df_with_6_Signals['SEPSIS+4_timeoverlap_exists']==1) & ( df_with_6_Signals['SHOCK-4_timeoverlap_exists']==1) ].shape[0] )913print('---------sepsis onset time + 1,2,3,4 AND Septic shock -1,2,3,4 AND Septic shock + 1,2,3,4 - to check around sepsis and septic shock-------------------')914# sepsis onset time + 1,2,3,4 AND Septic shock -1,2,3,4 AND Septic shock + 1,2,3,4 - to check around sepsis and septic shock915print( 'Number of patients with recording for sepsis onset time + 1, Septic shock - 1, Septic shock + 1 : ' , df_with_6_Signals[( df_with_6_Signals['SEPSIS+1_timeoverlap_exists']==1) & ( df_with_6_Signals['SHOCK-1_timeoverlap_exists']==1) & ( df_with_6_Signals['SHOCK+1_timeoverlap_exists']==1) ].shape[0] )916print( 'Number of patients with recording for sepsis onset time + 2, Septic shock - 2, Septic shock + 2 : ' , df_with_6_Signals[( df_with_6_Signals['SEPSIS+2_timeoverlap_exists']==1) & ( df_with_6_Signals['SHOCK-2_timeoverlap_exists']==1) & ( df_with_6_Signals['SHOCK+2_timeoverlap_exists']==1) ].shape[0] )917print( 'Number of patients with recording for sepsis onset time + 3, Septic shock - 3, Septic shock + 3 : ' , df_with_6_Signals[( df_with_6_Signals['SEPSIS+3_timeoverlap_exists']==1) & ( df_with_6_Signals['SHOCK-3_timeoverlap_exists']==1) & ( df_with_6_Signals['SHOCK+3_timeoverlap_exists']==1) ].shape[0] )918print( 'Number of patients with recording for sepsis onset time + 4, Septic shock - 4, Septic shock + 4 : ', df_with_6_Signals[( df_with_6_Signals['SEPSIS+4_timeoverlap_exists']==1) & ( df_with_6_Signals['SHOCK-4_timeoverlap_exists']==1) & ( df_with_6_Signals['SHOCK+4_timeoverlap_exists']==1) ].shape[0] )919# to get patients with sepsis and septic shock and 6 signals 920# and time overlap with sepsis and septick shock onset time921print( df_with_6_Signals[( df_with_6_Signals['SEPSIS+4_timeoverlap_exists']==1) & ( df_with_6_Signals['SHOCK-4_timeoverlap_exists']==1) & ( df_with_6_Signals['SHOCK+4_timeoverlap_exists']==1) ] )922# to get patients with ONLY sepsis and NO septic shock and 6 signals and time overlap with sepsis onset time923df_only_sepsis = df_with_6_Signals[( df_with_6_Signals['SEPSIS+4_timeoverlap_exists']==1) & ( df_with_6_Signals['SEPSIS-4_timeoverlap_exists']==1) & (df_waveform_exists['sepstic_shock_onsettime'].isnull()) ] 924print(df_only_sepsis.subject_id)925# to get non sepsis patients with 6 signals and time overlap with outtime926df_test = df_waveform_exists[(df_waveform_exists['6_sig_exists']==1) & (df_waveform_exists['sepsis_onsettime'].isnull()) & ( df_with_6_Signals['OUT-4_timeoverlap_exists']==1)] # add and sepsis onset time is not null...

Full Screen

Full Screen

1_extractingrequiredsignalsandtimeoverlapwithicustay.py

Source:1_extractingrequiredsignalsandtimeoverlapwithicustay.py Github

copy

Full Screen

1# -*- coding: utf-8 -*-2"""1_ExtractingRequiredSignalsandTimeOverlapWithICUStay.ipynb3Automatically generated by Colaboratory.4Original file is located at5 https://colab.research.google.com/drive/1WlVZsHEAe1H4sLDaFeTUTI1lwymSx_X26"""7# This notebook does the following:8# 1. Extract the SUBJECT_IDs for whom a record edxists in the MIMIC matched waveform dataset (total number 4,653) from a 'df_waveform_exists.csv' CSV file into a dataframe9# 2. Extract all the unique signals that are recorded for above patients and generate a 'physio_signals.txt' file.10# 3. Check for which SUBJECT_IDs there exists all relevant signals i.e. HR, SPO2%, ABP SYS, ABP DIAS, ABP MEAN, and RESP. If there exists all signals11# insert '1' in the 'sig_exists' column for that particular patient in the dataframe.12# 4. Considering the SUBJECT_IDs for which there exists all relevant signals in the database, 13# check if the recordings in the database overlap with their ICU stays i.e. check if there is a overlap between the recording start date and end date 14# and ICU intime and outtime. If there exists a overlap, insert '1' in the 'timeoverlap' column for that particular patient in the dataframe.15# 5. Generate a final CSV 'df_TS_exists_withoutTEMP_overlapcount.csv'16# 6. There are a few sample records, 1 for once per second sampling frequency and 1 for once per minute frequency. Each of these are read from the17# matched waveform database and we extract only required HR, SPO2%, ABP SYS, ABP DIAS, ABP MEAN,& RESP signals and insert it into a dataframe. For sample18# with onec per second sampling frequency, the data is converted to once per minute by calculating average for each 60 secs.19# Commented out IPython magic to ensure Python compatibility.20!pip install wfdb21import io22import pandas as pd23from IPython.display import display24import matplotlib.pyplot as plt25# %matplotlib inline26import numpy as np27import os28import shutil29import posixpath30import wfdb31import urllib.request32import datetime33from google.colab import files34uploaded = files.upload()35df_csvdata = pd.read_csv(io.BytesIO(uploaded['df_waveform_exists.csv']))36# Dataset is now stored in a Pandas Dataframe37print (df_csvdata.shape)38df_csvdata['sig_exists']='';39df_csvdata['timeoverlap']='';40df_csvdata['Number_of_overlaping_records'] =''41print (df_csvdata.shape)42#To get a list of all the unique signals present in PHYSIOBANK43from collections import namedtuple44for index, row in df_csvdata.iterrows():45 #print(row['subject_id'], row['icustay_id'], row['sepsis_onsettime'],row['intime'],row['outtime'])46 signals_in_pyhisobank=[]47 wdb_dir_path = 'mimic3wdb/matched/p'+ str(row['subject_id']).zfill(6)[:2] + '/p' + str(row['subject_id']).zfill(6) + '/';48 wdb_path_toAllRecords = 'https://archive.physionet.org/physiobank/database/'+ wdb_dir_path + 'RECORDS';49 wdb_records = urllib.request.urlopen(wdb_path_toAllRecords); 50 for lines in wdb_records.readlines():51 record = lines.decode("utf-8"); 52 record = str(record).rstrip()53 #print (record[-1:])54 if record[-1:] == 'n':55 #print(record);56 #print (wdb_dir_path);57 record = str(record).rstrip()58 try:59 print(row['subject_id'])60 signals,fields = wfdb.rdsamp(record, pb_dir=wdb_dir_path) ; 61 for i in fields['sig_name']:62 if i not in signals_in_pyhisobank:63 signals_in_pyhisobank.append(i);64 65 except ValueError:66 print('Error occured while reading waveform: ', record);67"""68#testing date part69 70 71 72 Range = namedtuple('Range', ['start', 'end'])73 print ('intime :', datetime.datetime.strptime(row['intime'],'%Y-%m-%d %H:%M:%S') ); 74 75 print ('outtime :', datetime.datetime.strptime(row['outtime'],'%Y-%m-%d %H:%M:%S') ); 76 print ('record starttime' , record_starttime);77 print ('record endtime' , record_endtime);78 r1 = Range(start= datetime.datetime.strptime(row['intime'],'%Y-%m-%d %H:%M:%S'), end= datetime.datetime.strptime(row['outtime'],'%Y-%m-%d %H:%M:%S'))79 r2 = Range(start= record_starttime, end = record_endtime)80 latest_start = max(r1.start, r2.start)81 earliest_end = min(r1.end, r2.end)82 delta = (earliest_end - latest_start).days + 183 if delta > 0 :84 print('NO OVERLAP BETWEEN RECORD DATETIME AND ICU STAY DATETIME')85 df_csvdata['TS_exists'] = 1;86 #overlap = max(0, delta) #to find exact overlap days87"""88print(len(signals_in_pyhisobank))89with open("physio_signals.txt", "w") as output:90 output.write(str(signals_in_pyhisobank))91from google.colab import files92files.download('physio_signals.txt')93#### IMPORTANT ! DO NOT DELETE94#To get patients for whom required signals exists and there are TS records for the ICU stay in consideration95from collections import namedtuple96for index, row in df_csvdata.iterrows():97 #print(row['subject_id'], row['icustay_id'], row['sepsis_onsettime'],row['intime'],row['outtime'])98 records_toRead=[]99 wdb_dir_path = 'mimic3wdb/matched/p'+ str(row['subject_id']).zfill(6)[:2] + '/p' + str(row['subject_id']).zfill(6) + '/';100 wdb_path_toAllRecords = 'https://archive.physionet.org/physiobank/database/'+ wdb_dir_path + 'RECORDS';101 wdb_records = urllib.request.urlopen(wdb_path_toAllRecords); 102 count_overlaping_records = 0 103 for lines in wdb_records.readlines():104 record = lines.decode("utf-8"); 105 record = str(record).rstrip()106 #print (record[-1:])107 if record[-1:] == 'n':108 #print(record);109 #print (wdb_dir_path);110 record = str(record).rstrip()111 try:112 signals,fields = wfdb.rdsamp(record, pn_dir=wdb_dir_path) ; 113 #wfdb.plot_items(signal=signals, fs=fields['fs'])114 #display(signals)115 #display(fields)116 #print ('fs' , fields['fs']);117 #print ('signal length',fields['sig_len']);118 #print ('date' ,fields['base_date'] ); 119 #print ('time' ,fields['base_time'] );120 #print ('%.3f'%(fields['fs']))121 122 list_sig_name = [item.upper().replace(' ','') for item in fields['sig_name']]123 sig_exist_1 = all(x in list_sig_name for x in ['HR', 'SPO2','ABPSYS','ABPDIAS','ABPMEAN','RESP']); #%SpO2124 sig_exist_2 = all(x in list_sig_name for x in ['HR', '%SPO2','ABPSYS','ABPDIAS','ABPMEAN','RESP']); 125 if ((sig_exist_1 == True) or (sig_exist_2 == True)) :126 df_csvdata.loc[index,'sig_exists'] = 1;127 record_starttime = datetime.datetime.combine(fields['base_date'] ,fields['base_time'] ) ;128 129 if '%.3f'%(fields['fs']) == '1.000' :130 record_endtime = record_starttime + datetime.timedelta(seconds= (fields['sig_len']-1)) ;131 elif '%.3f'%(fields['fs'])== '0.017' :132 record_endtime = record_starttime + datetime.timedelta(minutes = (fields['sig_len']-1)) ;133 else : 134 print('ERROR IN SAMPLING');135 print(record);136 print (wdb_dir_path);137 #Caculate if we have a recording for the time of icu stay138 Range = namedtuple('Range', ['start', 'end'])139 r1 = Range(start= datetime.datetime.strptime(row['intime'],'%Y-%m-%d %H:%M:%S'), end= datetime.datetime.strptime(row['outtime'],'%Y-%m-%d %H:%M:%S'))140 r2 = Range(start= record_starttime, end = record_endtime)141 latest_start = max(r1.start, r2.start)142 earliest_end = min(r1.end, r2.end)143 delta = (earliest_end - latest_start).days + 1144 if delta >= 0 :145 print('RECORD EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED : ', row['subject_id'])146 df_csvdata.loc[index,'timeoverlap'] = 1;147 count_overlaping_records = count_overlaping_records +1 ;148 #todo : adding new dataframe, exatracting required signals, computing avergage for per sminute values in case of per second sampling frequency149 else: 150 print('RECORD DOES NOT EXISTS FOR THE ICU STAYS WITH THE SIGNALS NEEDED : ', row['subject_id'])151 #df_csvdata.loc[index,'timeoverlap'] = 0;152 else:153 #df_csvdata.loc[index,'sig_exists'] = 0 ;154 print('DO NOT SELECT THIS RECORD', row['subject_id'])155 except ValueError:156 print('Error occured while reading waveform: ', record);157 df_csvdata.loc[index,'Number_of_overlaping_records'] = count_overlaping_records;158"""159#testing date part160 161 162 163 Range = namedtuple('Range', ['start', 'end'])164 print ('intime :', datetime.datetime.strptime(row['intime'],'%Y-%m-%d %H:%M:%S') ); 165 166 print ('outtime :', datetime.datetime.strptime(row['outtime'],'%Y-%m-%d %H:%M:%S') ); 167 print ('record starttime' , record_starttime);168 print ('record endtime' , record_endtime);169 r1 = Range(start= datetime.datetime.strptime(row['intime'],'%Y-%m-%d %H:%M:%S'), end= datetime.datetime.strptime(row['outtime'],'%Y-%m-%d %H:%M:%S'))170 r2 = Range(start= record_starttime, end = record_endtime)171 latest_start = max(r1.start, r2.start)172 earliest_end = min(r1.end, r2.end)173 delta = (earliest_end - latest_start).days + 1174 if delta > 0 :175 print('NO OVERLAP BETWEEN RECORD DATETIME AND ICU STAY DATETIME')176 df_csvdata['TS_exists'] = 1;177 #overlap = max(0, delta) #to find exact overlap days178"""179"""180# check dataframe 181print(df_csvdata[(df_csvdata['sig_exists'] == 1)].shape)182print(df_csvdata[(df_csvdata['timeoverlap'] == 1)].shape)183df_csvdata.to_csv ('df_TS_exists_withoutTEMP_overlapcount.csv', sep=',', index = False, header=True);184from google.colab import files185files.download('df_TS_exists_withoutTEMP_overlapcount.csv')186"""187# check dataframe 188print(df_csvdata[(df_csvdata['sig_exists'] == 1)].shape)189print(df_csvdata[(df_csvdata['timeoverlap'] == 1)].shape)190df_csvdata.to_csv ('df_TS_exists_withoutTEMP_overlapcount.csv', sep=',', index = False, header=True);191from google.colab import files192files.download('df_TS_exists_withoutTEMP_overlapcount.csv')193# Commented out IPython magic to ensure Python compatibility.194#example to send to Marcela195!pip install wfdb196import io197import pandas as pd198from IPython.display import display199import matplotlib.pyplot as plt200# %matplotlib inline201import numpy as np202import os203import shutil204import posixpath205import wfdb206import urllib.request207import datetime208"""209'HR',210 'PULSE', 211 'ABP SYS',212 'ABP DIAS',213"""214channels = ['ABP DIAS','ABP SYS','PULSE','temp','HR']215signals,fields = wfdb.rdsamp('p042930-2190-07-28-20-30n', pn_dir='mimic3wdb/matched/p04/p042930/', channel_names=['HR','ABP MEAN', 'ABP SYS','ABP DIAS'], sampfrom=100, sampto=120)216print(' ')217print(' ')218print(' ')219wfdb.plot_items(signal=signals, fs=fields['fs'])220print('Printing signals')221display(signals)222print('Printing fields')223display(fields)224print('------------------------------')225print ('fs' , fields['fs']);226print ('signal length',fields['sig_len']);227print ('date' ,fields['base_date'] ); 228print ('time' ,fields['base_time'] );229record_starttime = datetime.datetime.combine(fields['base_date'] ,fields['base_time'] ) ;230print ('%.3f'%(fields['fs']))231if '%.3f'%(fields['fs']) == '1.000':232 print ('Sampled once per second')233 record_endtime = record_starttime + datetime.timedelta(seconds = (fields['sig_len']-1)) ;234elif '%.3f'%(fields['fs'])== '0.017' :235 print('Sampled once per minute')236 record_endtime = record_starttime + datetime.timedelta(minutes = (fields['sig_len']-1)) ;237else :238 print('ERROR IN SAMPLING')239# Commented out IPython magic to ensure Python compatibility.240#### To show a sample of person suffereing from sepsis with high values for ABP Systolic (90 and less than 120) and low values for ABP diastolic (60 and less than 80)241# SAMPLE FREQUENCY: ONCE PER SECOND242#for i in subject_ids:243 # print (i.zfill(6)); --2186-01-26 14:13:07244!pip install wfdb245import io246import pandas as pd247from IPython.display import display248import matplotlib.pyplot as plt249# %matplotlib inline250import numpy as np251import os252import shutil253import posixpath254import wfdb255import urllib.request256import datetime257signals,fields = wfdb.rdsamp('p030582-2129-04-07-17-23n', pn_dir='mimic3wdb/matched/p03/p030582/')#sampfrom = 150 , sampto = 200258wfdb.plot_items(signal=signals, fs=fields['fs'])259#display(record.__dict__)260display(signals)261display(fields)262df_ts_records_columns = ['RECORD','TIME','HR', 'SPO2','ABPSYS','ABPDIAS','ABPMEAN','RESP'] 263df_ts_records = pd.DataFrame(columns=df_ts_records_columns);264for i in fields['sig_name']:265 if i.upper().replace(' ','') == 'HR':266 idx_HR = fields['sig_name'].index(i);267 df_ts_records['HR']= signals[:,idx_HR ]268 elif (( i.upper().replace(' ','') == 'SPO2') or (i.upper().replace(' ','') =='%SPO2')):269 idx_SPO2 = fields['sig_name'].index(i);270 df_ts_records['SPO2']= signals[:,idx_SPO2]271 elif i.upper().replace(' ','') == 'ABPSYS' :272 idx_ABPSYS = fields['sig_name'].index(i);273 df_ts_records['ABPSYS']= signals[:,idx_ABPSYS]274 elif i.upper().replace(' ','') == 'ABPDIAS' :275 idx_ABPDIAS = fields['sig_name'].index(i);276 df_ts_records['ABPDIAS']= signals[:,idx_ABPDIAS]277 elif i.upper().replace(' ','') == 'ABPMEAN' :278 idx_ABPMEAN = fields['sig_name'].index(i);279 df_ts_records['ABPMEAN']= signals[:,idx_ABPMEAN]280 281 elif i.upper().replace(' ','') == 'RESP' :282 idx_RESP = fields['sig_name'].index(i);283 df_ts_records['RESP']= signals[:,idx_RESP]284 285record_starttime = datetime.datetime.combine(fields['base_date'] ,fields['base_time'] ) ;286if '%.3f'%(fields['fs']) == '1.000' :287 print ('Sampled once per second')288 record_endtime = record_starttime + datetime.timedelta(seconds = (fields['sig_len']-1)) ;289elif '%.3f'%(fields['fs'])== '0.017' :290 print('Sampled once per minute')291 record_endtime = record_starttime + datetime.timedelta(minutes = (fields['sig_len']-1)) ;292else :293 print('ERROR IN SAMPLING') 294print ('start time: ', record_starttime);295print ('end time: ', record_endtime);296""" START COMMENT297df_ts_records['TIME'] = pd.date_range( record_starttime , periods = fields['sig_len'], freq='S'); 298df_ts_records.TIME = pd.to_datetime(df_ts_records.TIME)299#dat['STA_STD_NEW'] = dat['STA_STD']300#dat.loc[dat['STA_STD'].dt.time == datetime.time(23,59), 'STA_STD_NEW'] += datetime.timedelta(minutes=1)301#In [5]: dat302df_ts_records['RECORD'] = 'p081193-2186-01-26-19-36n';303print(df_ts_records)304# to aggregate and convert per second values into per minute values305start_idx = 0;306df_ts_records_columns_new = ['RECORD','TIME','HR', 'SPO2','ABPSYS','ABPDIAS','ABPMEAN','RESP'] 307df_ts_records_new = pd.DataFrame(columns=df_ts_records_columns_new);308print('length of new df ' ,df_ts_records_new.shape[0] )309for index, rows in df_ts_records.iterrows():310 if start_idx >= df_ts_records.shape[0]:311 exit;312 else: 313 #print(df_ts_records.iloc[start_idx: (start_idx+2), 0:4])314 print(df_ts_records.iloc[start_idx: (start_idx+60), 2:8])315 array = np.array( df_ts_records.iloc[start_idx: (start_idx+60), 2:8].mean(axis=0))316 print('printing array of average')317 print (array)318 current_index = df_ts_records_new.shape[0]319 df_ts_records_new.loc[current_index ,'HR']= array[0]320 df_ts_records_new.loc[current_index,'SPO2']= array[1]321 df_ts_records_new.loc[current_index,'ABPSYS']= array[2]322 df_ts_records_new.loc[current_index,'ABPDIAS']= array[3]323 df_ts_records_new.loc[current_index,'ABPMEAN']= array[4]324 df_ts_records_new.loc[current_index,'RESP']= array[5]325 print(df_ts_records_new)326 print('next average')327 start_idx = start_idx+60;328 #print('start index :: ' , start_idx)329df_ts_records_new['TIME'] = pd.date_range(record_starttime, periods=(fields['sig_len']/60), freq='1min'); 330df_ts_records_new.TIME = pd.to_datetime(df_ts_records_new.TIME)331df_ts_records_new['RECORD'] = 'p081193-2186-01-26-19-36n';332print(df_ts_records_new)333 334""" #END COMMENT335# testing to convert per second data into per minute336"""337#df.iloc[1:3, 0:3]338mydict = [{'a': 1, 'b': 2, 'c': 3, 'd': 4},339 {'a': 100, 'b': 200, 'c': 300, 'd': 400},340 {'a': 1000, 'b': 2000, 'c': 3000, 'd': 4000 },341 {'a': 10, 'b': 30, 'c': 50, 'd': 70 },342 {'a': 20, 'b': 40, 'c': 60, 'd': 80 },343 {'a': 20, 'b': 40, 'c': 60, 'd': 80 },344 {'a': 20, 'b': 40, 'c': 60, 'd': 80 },345 {'a': 20, 'b': 40, 'c': 60, 'd': 80 },346 {'a': 20, 'b': 40, 'c': 60, 'd': 80 }347 ]348import numpy as np349df = pd.DataFrame(mydict)350print (df)351"""352"""353print(df.shape)354print('-----')355print(df.iloc[0:2, 0:4])356dd = pd.DataFrame( df.iloc[0:2, 0:4].mean(axis =0))357print(dd)358"""359"""360print('---------------------')361start_idx = 0 362new_df=pd.DataFrame(columns=['a','b','c','d']);363print(new_df)364print('length of new df ' ,new_df.shape[0] )365for index, rows in df.iterrows():366 if start_idx > df.shape[0]:367 exit;368 else: 369 print(df.iloc[start_idx: (start_idx+2), 0:4])370 print(df.iloc[start_idx: (start_idx+2), 0:4].mean(axis=0))371 array = np.array( df.iloc[start_idx: (start_idx+2), 0:4].mean(axis=0))372 print (array)373 374 current_index = new_df.shape[0]375 new_df.loc[current_index ,'a']= array[0]376 new_df.loc[current_index,'b']= array[1]377 new_df.loc[current_index,'c']= array[2]378 new_df.loc[current_index,'d']= array[3]379 print(new_df)380 print('next average')381 start_idx = start_idx+2;382 print('start index :: ' , start_idx)383 384"""385#### To show a sample of person NOT suffereing from sepsis 386##############################387################################388#for i in subject_ids:389 # print (i.zfill(6)); --2186-01-26 14:13:07390 391 392 # SAMPLE FREQUENCY: ONCE PER MINUTE393df_ts_records_columns = ['RECORD','TIME','HR', 'SPO2','ABPSYS','ABPDIAS','ABPMEAN','RESP','TEMP'] 394sig_list_1 = ['HR', 'SPO2','ABPSYS','ABPDIAS','ABPMEAN','RESP','TEMP'] 395sig_list_2 = ['HR', '%SPO2','ABPSYS','ABPDIAS','ABPMEAN','RESP','TEMP']396df_ts_records = pd.DataFrame(columns=df_ts_records_columns);397signals, fields = wfdb.rdsamp('p059864-2173-05-16-11-56n', pn_dir ='mimic3wdb/matched/p05/p059864/')398# signals, fields = wfdb.rdsamp('p042930-2190-07-28-20-30n', pn_dir='mimic3wdb/matched/p04/p042930/',sampfrom=0, sampto=10)399wfdb.plot_items(signal=signals, fs=fields['fs'])400display(signals)401print( signals.shape) ;402display(fields)403for i in fields['sig_name']:404 if i.upper().replace(' ','') == 'HR':405 idx_HR = fields['sig_name'].index(i);406 df_ts_records['HR']= signals[:,idx_HR ]407 elif (( i.upper().replace(' ','') == 'SPO2') or (i.upper().replace(' ','') =='%SPO2')):408 idx_SPO2 = fields['sig_name'].index(i);409 df_ts_records['SPO2']= signals[:,idx_SPO2]410 elif i.upper().replace(' ','') == 'ABPSYS' :411 idx_ABPSYS = fields['sig_name'].index(i);412 df_ts_records['ABPSYS']= signals[:,idx_ABPSYS]413 elif i.upper().replace(' ','') == 'ABPDIAS' :414 idx_ABPDIAS = fields['sig_name'].index(i);415 df_ts_records['ABPDIAS']= signals[:,idx_ABPDIAS]416 elif i.upper().replace(' ','') == 'ABPMEAN' :417 idx_ABPMEAN = fields['sig_name'].index(i);418 df_ts_records['ABPMEAN']= signals[:,idx_ABPMEAN]419 420 elif i.upper().replace(' ','') == 'RESP' :421 idx_RESP = fields['sig_name'].index(i);422 df_ts_records['RESP']= signals[:,idx_RESP]423 elif i.upper().replace(' ','') == 'TEMP':424 idx_TEMP = fields['sig_name'].index(i);425 df_ts_records['TEMP']= signals[:,idx_TEMP ]426 427print(df_ts_records);428record_starttime = datetime.datetime.combine(fields['base_date'] ,fields['base_time'] ) ;429if '%.3f'%(fields['fs']) == '1.000' :430 print ('Sampled once per second')431 record_endtime = record_starttime + datetime.timedelta(seconds = ( fields['sig_len']-1 )) ;432elif '%.3f'%(fields['fs'])== '0.017' :433 print('Sampled once per minute')434 record_endtime = record_starttime + datetime.timedelta(minutes = ( fields['sig_len']-1 )) ;435else :436 print('ERROR IN SAMPLING') 437print ('start time: ', record_starttime);438print ('end time: ', record_endtime);439df_ts_records['TIME'] = pd.date_range(record_starttime, periods=fields['sig_len'], freq='1min'); 440df_ts_records.TIME = pd.to_datetime(df_ts_records.TIME)441df_ts_records['RECORD'] = 'p042930-2190-07-28-20-30n';442#print('--------#####-----')443#print(df_ts_records[ (df_ts_records['TIME'] >= '2151-10-06 05:25:35') & (df_ts_records['TIME'] <= '2151-10-06 05:29:35')])444#print('dropping all null')445#print(df_ts_records.dropna())446# Commented out IPython magic to ensure Python compatibility.447#### IMPORTANT ! DO NOT DELETE448#To get patient records if multiple ts existis 449!pip install wfdb450import io451import pandas as pd452from IPython.display import display453import matplotlib.pyplot as plt454# %matplotlib inline455import numpy as np456import os457import shutil458import posixpath459import wfdb460import urllib.request461import datetime462from collections import namedtuple463df_ts_records_columns = ['RECORD','TIME','HR', 'SPO2','ABPSYS','ABPDIAS','ABPMEAN','RESP'] 464df_ts_records = pd.DataFrame(columns=df_ts_records_columns); 465#subject_id= 48149; # per second multiple 466#icu_intime = datetime.datetime(2127, 5, 25, 8, 34,39) # for 48149467#icu_outtime = datetime.datetime(2127, 6, 16, 1, 15,22) # for 4814946866965 23/01/2198 12:41 28/01/2198 19:16 22/01/2198 22:46 23/01/2198 13:46469subject_id= 55638; # per minute 470icu_intime = datetime.datetime(2106, 11, 25, 12, 37,32) # for 55638471icu_outtime = datetime.datetime(2106, 11, 27, 10, 49,33) # for 55638472print ('icu intime =', icu_intime)473print ('icu outtime', icu_outtime)474"""475subject_id= 59864;476icu_intime = datetime.datetime(2173, 5, 16, 12, 14,45)477print ('icu intime =', icu_intime)478icu_outtime = datetime.datetime(2173, 6, 8, 15, 45,23)479print ('icu outtime', icu_outtime)480#2173-05-16 12:14:45,2173-06-08 15:45:23,481"""482wdb_dir_path = 'mimic3wdb/matched/p'+ str(subject_id).zfill(6)[:2] + '/p' + str(subject_id).zfill(6) + '/';483wdb_path_toAllRecords = 'https://archive.physionet.org/physiobank/database/'+ wdb_dir_path + 'RECORDS';484wdb_records = urllib.request.urlopen(wdb_path_toAllRecords); 485try:486 df_ts_records.drop(df_ts_records.index, inplace=True)487except:488 print('df_ts_records does not exist')489count_overlap = 0; 490for lines in wdb_records.readlines():491 record = lines.decode("utf-8"); 492 record = str(record).rstrip()493 #print (record[-1:])494 if record[-1:] == 'n':495 print(record);496 #print (wdb_dir_path);497 record = str(record).rstrip()498 499 500 #try:501 signals =''502 fields = ''503 signals,fields = wfdb.rdsamp(record, pn_dir=wdb_dir_path) ; 504 505 list_sig_name = [item.upper().replace(' ','') for item in fields['sig_name']]506 sig_exist_1 = all(x in list_sig_name for x in ['HR', 'SPO2','ABPSYS','ABPDIAS','ABPMEAN','RESP']); #%SpO2507 sig_exist_2 = all(x in list_sig_name for x in ['HR', '%SPO2','ABPSYS','ABPDIAS','ABPMEAN','RESP']); 508 record_starttime = datetime.datetime.combine(fields['base_date'] ,fields['base_time'] ) ;509 510 if '%.3f'%(fields['fs']) == '1.000' :511 record_endtime = record_starttime + datetime.timedelta(seconds= (fields['sig_len']-1)) ;512 elif '%.3f'%(fields['fs'])== '0.017' :513 record_endtime = record_starttime + datetime.timedelta(minutes = (fields['sig_len']-1)) ;514 else : 515 print('ERROR IN SAMPLING');516 print(record);517 print(wdb_dir_path);518 print('record START time: ', record_starttime)519 print('record END time: ', record_endtime)520 Range = namedtuple('Range', ['start', 'end'])521 r1 = Range(start= icu_intime, end= icu_outtime)522 r2 = Range(start= record_starttime, end = record_endtime)523 latest_start = max(r1.start, r2.start)524 earliest_end = min(r1.end, r2.end)525 delta = (earliest_end - latest_start).days + 1526 #delta >= 0 :527 print('sig_exist_1 : ', sig_exist_1)528 print('sig_exist_2 : ', sig_exist_2)529 print('delta : ', delta)530 if ( ((sig_exist_1 == True) or (sig_exist_2 == True)) and (delta >= 0)):531 ###532 try:533 df_ts_indv_record_temp.drop(df_ts_indv_record_temp.index, inplace=True)534 except:535 print('individual record for a single patient df does not exists')536 537 df_ts_indv_record_temp = pd.DataFrame(columns = df_ts_records_columns ) # individual record for a single patient #safiya538 ###539 df_row_idx = df_ts_records.shape[0] ;540 print('length of signal: ', len(signals))541 print('index of dataframe before inserting into it: ', df_row_idx)542 543 for i in fields['sig_name']:544 545 if i.upper().replace(' ','') == 'HR':546 idx_HR = fields['sig_name'].index(i);547 elif (( i.upper().replace(' ','') == 'SPO2') or (i.upper().replace(' ','') =='%SPO2')):548 idx_SPO2 = fields['sig_name'].index(i);549 elif i.upper().replace(' ','') == 'ABPSYS' :550 idx_ABPSYS = fields['sig_name'].index(i);551 elif i.upper().replace(' ','') == 'ABPDIAS' :552 idx_ABPDIAS = fields['sig_name'].index(i);553 elif i.upper().replace(' ','') == 'ABPMEAN' :554 idx_ABPMEAN = fields['sig_name'].index(i);555 elif i.upper().replace(' ','') == 'RESP' :556 idx_RESP = fields['sig_name'].index(i);557 558 559 560 if count_overlap == 0 : 561 if record_starttime > icu_intime:562 print('inserting nulls before the record start time')563 #print( (datetime.datetime.strptime((icu_intime.strftime('%Y-%m-%d %H:%M' )), '%Y-%m-%d %H:%M')) ) #+ datetime.timedelta(seconds= int(record_starttime.strftime('%S'))) )564 #print(icu_intime.strftime('%Y-%m-%d %H:%M'))565 if '%.3f'%(fields['fs'])== '0.017' :566 minutes_to_insert_start = (datetime.datetime.strptime((record_starttime.strftime('%Y-%m-%d %H:%M' )), '%Y-%m-%d %H:%M'))- (datetime.datetime.strptime((icu_intime.strftime('%Y-%m-%d %H:%M' )), '%Y-%m-%d %H:%M'))567 elif '%.3f'%(fields['fs'])== '1.000' :568 minutes_to_insert_start = record_starttime - icu_intime569 print('minutes_to_insert_start: ', minutes_to_insert_start)570 duration_in_s = minutes_to_insert_start.total_seconds()571 minutes_to_insert_start = divmod(duration_in_s, 60)[0] - 1 572 try:573 df_ts_records_time_temp_start.drop(df_ts_records_time_temp_start.index, inplace=True)574 except :575 print( 'df_ts_records_time_temp_start does not exist')576 577 df_ts_records_time_temp_start = pd.DataFrame(columns=df_ts_records_columns)578 if '%.3f'%(fields['fs'])== '0.017' :579 df_ts_records_time_temp_start['TIME'] = pd.date_range(icu_intime + datetime.timedelta(minutes=1), 580 periods=minutes_to_insert_start, freq='1min'); 581 elif '%.3f'%(fields['fs'])== '1.000' :582 df_ts_records_time_temp_start['TIME'] = pd.date_range(icu_intime + datetime.timedelta(seconds=1), 583 periods= (duration_in_s-1), freq='S'); 584 print ('INSERTING ONLY NULL IN START:')585 print (df_ts_records_time_temp_start)586 df_ts_indv_record_temp = df_ts_indv_record_temp.append(df_ts_records_time_temp_start, ignore_index=True);587 print('inserting nulls in start IN INDV LEVEL')588 print(df_ts_indv_record_temp)589 try:590 df_ts_records_temp.drop(df_ts_records_temp.index, inplace=True)591 except:592 print( 'df_ts_records_temp does not exist')593 df_ts_records_temp = pd.DataFrame(columns=df_ts_records_columns)594 df_ts_records_temp['HR']= signals[:,idx_HR ] 595 df_ts_records_temp['SPO2']= signals[:,idx_SPO2 ] 596 df_ts_records_temp['ABPSYS']= signals[:,idx_ABPSYS ] 597 df_ts_records_temp['ABPDIAS']= signals[:,idx_ABPDIAS ] 598 df_ts_records_temp['ABPMEAN']= signals[:,idx_ABPMEAN ] 599 df_ts_records_temp['RESP']= signals[:,idx_RESP ] 600 if '%.3f'%(fields['fs'])== '0.017' :601 df_ts_records_temp['TIME'] = pd.date_range(record_starttime, periods=fields['sig_len'], freq='1min'); 602 elif '%.3f'%(fields['fs'])== '1.000' :603 df_ts_records_temp['TIME'] = pd.date_range(record_starttime, periods=fields['sig_len'], freq='S'); 604 df_ts_records_temp.TIME = pd.to_datetime(df_ts_records_temp.TIME)605 df_ts_indv_record_temp = df_ts_indv_record_temp.append(df_ts_records_temp, ignore_index=True); #safiya606 print('inserting nulls in start + first record data')607 print(df_ts_indv_record_temp)608 if '%.3f'%(fields['fs'])== '1.000' : #safiya609 print("AGGREGATING")610 start_idx = 0;611 df_ts_records_new = pd.DataFrame(columns=df_ts_records_columns);612 #print('length of new df ' , df_ts_records_new.shape[0] )613 for index, rows in df_ts_indv_record_temp.iterrows():614 print('start index for first: ', start_idx)615 if start_idx >= df_ts_indv_record_temp.shape[0]:616 exit;617 else: 618 619 #print(df_ts_records.iloc[start_idx: (start_idx+60), 2:8])620 array = np.array( df_ts_indv_record_temp.iloc[start_idx: (start_idx+60), 2:8].mean(axis=0))621 #print('printing array of average')622 #print (array)623 current_index = df_ts_records_new.shape[0]624 df_ts_records_new.loc[current_index ,'HR']= array[0]625 df_ts_records_new.loc[current_index,'SPO2']= array[1]626 df_ts_records_new.loc[current_index,'ABPSYS']= array[2]627 df_ts_records_new.loc[current_index,'ABPDIAS']= array[3]628 df_ts_records_new.loc[current_index,'ABPMEAN']= array[4]629 df_ts_records_new.loc[current_index,'RESP']= array[5]630 #print(df_ts_records_new)631 #print('next average')632 start_idx = start_idx+60;633 #print('start index :: ' , start_idx)634 print('# record time: ',df_ts_records_new.shape[0])635 df_ts_records_new['TIME'] = pd.date_range(df_ts_indv_record_temp.loc[0,'TIME'], periods= df_ts_records_new.shape[0], freq='1min'); 636 df_ts_records_new.TIME = pd.to_datetime(df_ts_records_new.TIME)637 #print(df_ts_records_new)638 df_ts_indv_record_temp.drop(df_ts_indv_record_temp.index, inplace=True);639 #df_ts_records = pd.DataFrame(columns=df_ts_records_columns)640 df_ts_records = df_ts_records.append(df_ts_records_new, ignore_index=True);641 print('only first record aggregated at individual record level: ')642 print(df_ts_records_new)643 print('inserting aggregated first record into FINAL SUBJEC DATAFRAME')644 print(df_ts_records)645 df_ts_records_new.drop(df_ts_records_new.index, inplace=True)646 df_ts_records['RECORD'] = record 647 else:648 df_ts_records = df_ts_records.append(df_ts_indv_record_temp, ignore_index=True);649 df_ts_records['RECORD'] = record 650 print('inserting nulls in start + first record data into FINAL SUBJEC DATAFRAME')651 print(df_ts_records)652 653 654 else:655 if record_starttime <= icu_outtime :656 last_Record_time = df_ts_records.loc[(df_row_idx-1),'TIME']657 print('main DF last time record: ',last_Record_time )658 if '%.3f'%(fields['fs'])== '0.017' :659 minutes_to_insert = (datetime.datetime.strptime((record_starttime.strftime('%Y-%m-%d %H:%M' )), '%Y-%m-%d %H:%M')) - (datetime.datetime.strptime((last_Record_time.strftime('%Y-%m-%d %H:%M' )), '%Y-%m-%d %H:%M'))660 elif '%.3f'%(fields['fs'])== '1.000' :661 minutes_to_insert = record_starttime - last_Record_time662 duration_in_s = minutes_to_insert.total_seconds()663 minutes_to_insert = divmod(duration_in_s, 60)[0] - 1664 print ('minutes_to_insert: ', minutes_to_insert);665 print('seconds to insert: ', duration_in_s)666 try:667 df_ts_records_time_temp.drop(df_ts_records_time_temp.index, inplace=True);668 df_ts_records_temp.drop(df_ts_records_temp.index, inplace=True);669 except:670 print ('df_ts_records_temp and df_ts_records_time_temp does not exits')671 df_ts_records_time_temp = pd.DataFrame(columns=df_ts_records_columns)672 if '%.3f'%(fields['fs'])== '0.017' :673 df_ts_records_time_temp['TIME'] = pd.date_range(last_Record_time + datetime.timedelta(minutes=1), 674 periods=minutes_to_insert, freq='1min'); 675 elif '%.3f'%(fields['fs'])== '1.000' :676 print('last record time' , last_Record_time)677 print('(duration_in_s-1)' , (duration_in_s-1))678 df_ts_records_time_temp['TIME'] = pd.date_range(last_Record_time + datetime.timedelta(seconds=1), 679 periods=(duration_in_s-1), freq='S'); 680 print ('INSERTING ONLY NULL UNTILL NEXT RECORD START TIME:')681 print (df_ts_records_time_temp)682 df_ts_indv_record_temp = df_ts_indv_record_temp.append(df_ts_records_time_temp, ignore_index=True);683 print('inserting nulls UNTILL NEXT RECORD START TIME INTO INDV LEVEL')684 print(df_ts_indv_record_temp)685 df_ts_records_temp = pd.DataFrame(columns=df_ts_records_columns)686 687 df_ts_records_temp['HR']= signals[:,idx_HR ] 688 df_ts_records_temp['SPO2']= signals[:,idx_SPO2 ] 689 df_ts_records_temp['ABPSYS']= signals[:,idx_ABPSYS ] 690 df_ts_records_temp['ABPDIAS']= signals[:,idx_ABPDIAS ] 691 df_ts_records_temp['ABPMEAN']= signals[:,idx_ABPMEAN ] 692 df_ts_records_temp['RESP']= signals[:,idx_RESP ] 693 if '%.3f'%(fields['fs'])== '0.017' :694 df_ts_records_temp['TIME'] = pd.date_range(record_starttime, periods=fields['sig_len'], freq='1min'); 695 elif '%.3f'%(fields['fs'])== '1.000' :696 df_ts_records_temp['TIME'] = pd.date_range(record_starttime, periods=fields['sig_len'], freq='S'); 697 df_ts_records_temp.TIME = pd.to_datetime(df_ts_records_temp.TIME)698 699 print('before appending: ')700 701 print( df_ts_records_temp);702 df_ts_indv_record_temp = df_ts_indv_record_temp.append(df_ts_records_temp, ignore_index=True);703 print('inserting nulls in start + SECOND record data')704 print(df_ts_indv_record_temp)705 if '%.3f'%(fields['fs'])== '1.000' : #safiya706 start_idx = 0;707 df_ts_records_new = pd.DataFrame(columns=df_ts_records_columns);708 #print('length of new df ' , df_ts_records_new.shape[0] )709 for index, rows in df_ts_indv_record_temp.iterrows():710 if start_idx >= df_ts_indv_record_temp.shape[0]:711 exit;712 else: 713 714 #print(df_ts_records.iloc[start_idx: (start_idx+60), 2:8])715 array = np.array( df_ts_indv_record_temp.iloc[start_idx: (start_idx+60), 2:8].mean(axis=0))716 #print('printing array of average')717 #print (array)718 current_index = df_ts_records_new.shape[0]719 df_ts_records_new.loc[current_index ,'HR']= array[0]720 df_ts_records_new.loc[current_index,'SPO2']= array[1]721 df_ts_records_new.loc[current_index,'ABPSYS']= array[2]722 df_ts_records_new.loc[current_index,'ABPDIAS']= array[3]723 df_ts_records_new.loc[current_index,'ABPMEAN']= array[4]724 df_ts_records_new.loc[current_index,'RESP']= array[5]725 #print(df_ts_records_new)726 #print('next average')727 start_idx = start_idx+60;728 #print('start index :: ' , start_idx)729 print('# record time: ',df_ts_records_new.shape[0])730 df_ts_records_new['TIME'] = pd.date_range(df_ts_indv_record_temp.loc[0,'TIME'], periods= df_ts_records_new.shape[0], freq='1min'); 731 df_ts_records_new.TIME = pd.to_datetime(df_ts_records_new.TIME)732 #print(df_ts_records_new)733 df_ts_indv_record_temp.drop(df_ts_indv_record_temp.index, inplace=True);734 #df_ts_records = pd.DataFrame(columns=df_ts_records_columns)735 df_ts_records = df_ts_records.append(df_ts_records_new, ignore_index=True);736 737 print('only first record aggregated at individual record level: ')738 print(df_ts_records_new)739 print('inserting aggregated first record into FINAL SUBJEC DATAFRAME')740 print(df_ts_records)741 df_ts_records_new.drop(df_ts_records_new.index, inplace=True)742 df_ts_records['RECORD'] = record 743 else:744 df_ts_records = df_ts_records.append(df_ts_indv_record_temp, ignore_index=True);745 df_ts_records['RECORD'] = record 746 print('inserting nulls in start + first record data into FINAL SUBJEC DATAFRAME')747 print(df_ts_records)748 749 750 count_overlap = count_overlap +1751 print('overlap count after all insertions: ', count_overlap )752 else:753 print('Either all 6 signals not exists or there is no overlapt with recording time and ICU in time and out time')754last_record_idx = df_ts_records.shape[0] - 1755all_records_end_time = df_ts_records.loc[last_record_idx,'TIME']756 757if (all_records_end_time < icu_outtime ):758 #print('INSERTING NULLS AT THE END')759 try:760 df_ts_records_time_temp_end.drop(df_ts_records_time_temp_end.index, inplace=True)761 except:762 print('df_ts_records_time_temp_end does not exists')763 #print('main DF last time record: ',last_Record_time )764 if '%.3f'%(fields['fs'])== '0.017' :765 minutes_to_insert_end = (datetime.datetime.strptime((icu_outtime.strftime('%Y-%m-%d %H:%M' )), '%Y-%m-%d %H:%M')) - (datetime.datetime.strptime((all_records_end_time.strftime('%Y-%m-%d %H:%M' )), '%Y-%m-%d %H:%M')) 766 elif '%.3f'%(fields['fs'])== '1.000' :767 minutes_to_insert_end = icu_outtime - all_records_end_time;768 duration_in_s = minutes_to_insert_end.total_seconds()769 minutes_to_insert_end = divmod(duration_in_s, 60)[0] - 1770 df_ts_records_time_temp_end = pd.DataFrame(columns=df_ts_records_columns)771 772 df_ts_records_time_temp_end['TIME'] = pd.date_range(all_records_end_time + datetime.timedelta(minutes=1), 773 periods=minutes_to_insert_end, freq='1min'); 774 df_ts_records = df_ts_records.append(df_ts_records_time_temp_end, ignore_index=True);775 776 df_ts_records['RECORD'] = record777print('printing final data for this patient')778print(df_ts_records)779#print(df_ts_records)780df_ts_records.to_csv ('SampleRecordWith6SigalsExtracted.csv', sep=',', index = False, header=True);781from google.colab import files...

Full Screen

Full Screen

AspenDynamicsReader.py

Source:AspenDynamicsReader.py Github

copy

Full Screen

1# %%2"""3# AspenDynamicsReader4> This is a simple python module for read the time-dependent data from5> AspenDynamics software.6> During my research, I dealt with plenty of data pasting from software.7> It is a time consuming step, so I built it for saving my time.8### Requirement9* Have AspenDynamics software10* Inkscape: convert the svg plotfile to emf plotfile. If you don't need it.11 you can comment the code in 'plot_dynamic_results', and 'multiplot_dynamic_results'.12### Simple Using Step131. You have to open the AspenDynamics file and finish the simulation at first.142. Specified the plot setting and the plot file name for saving.153. Run the python script.16### Future Work:17* OOP my code.18* rebuilt the data structure by python dictionary.19* built a function can save the data with ExcelFile.20### Contributor: Shen, Shiau-Jeng (johnson840205@gmail.com)21### Reference:22* Aspen Custom Modeler 2004.1 - Aspen Modeler Reference Guide23"""24# %%25class ADConnector:26 def __init__(self, path=None, version=None):27 """Launch the AspenDynamics by the program. You can specified the path to open the file or just28 open the file by yourself.29 :param path: a exist AspenDynamics file path. If it's None. It will read the file you open.30 :param version: specified the version of AspenDynamics. for V10, 3600. for V8.8, 3400.31 """32 import win32com.client as win3233 # 連動 AspenDynamics 程式!!!34 if version is None:35 self.ad = win32.Dispatch('AD Application')36 else:37 self.ad = win32.Dispatch(f'AD Application {version}')38 if path is None:39 # 匯入當前正在跑的檔案到程式裡面,並創造一個'模擬檔案'的物件40 self.ad_sim = self.ad.Simulation41 print(self.ad_sim.Name) # 確認檔名對不對42 # 將檔案中的製程匯入43 self.ad_flowsheet = self.ad_sim.Flowsheet44 else:45 pass46 # TODO: 新增用程式開啟檔案之指令,47 # 不過要解決輸入字串但判別不出字串的問題48 @staticmethod49 def read_data(variables):50 """Read the specified AspenDynamics time-dependent parameter.51 The parameter should be set on a form.52 :param variables: a list of ADConnector Object.53 :return: a set of dictionary data with a list.54 """55 import re56 # 所有輸出的資料都會在這列表當中57 data_set = []58 # 先建立時間序列59 variable_record = variables[0][0].history # 取得物流流量60 record_interval = variable_record.Interval # 取得紀錄區間61 record_starttime = variable_record.StartTime # 取得紀錄開始時間62 record_endtime = variable_record.EndTime # 取得紀錄結束時間63 time_dic = {'Name': 'Time', # 時間的標題: Time64 'Unit': 'hr', # 時間的單位: hr65 'Data': []} # 時間的數據66 for pre_time in range(int(record_starttime), int(record_endtime / record_interval + 1)):67 time = pre_time * record_interval68 time_dic['Data'].append(time)69 # 把時間序列加進數據列表當中70 data_set.append(time_dic)71 # 開始讀取數據72 name_match = re.compile(r"[A-Z]+\([\'|\"](\w+)[\'|\"]\).") # 抓取單元或物流名稱用的正則表達式73 controller_name_match = re.compile(r"(\w+).(PV|OP)") # 抓取控制器名稱用的正則表達式74 for pre_obj in variables: # pre_obj=(AspenObject, axial_label)75 obj = pre_obj[0]76 label_name = pre_obj[1] # 顯示在座標軸上面的label77 # 控制器變數名稱的判別跟物流或單元不一樣,因此要使用邏輯判斷以個別處理78 if obj.TypeName == 'control_signal':79 var_name = controller_name_match.search(obj.Name).group(1) # 控制器名稱80 else:81 var_name = name_match.search(obj.Name).group(1) # 單元或物流的名稱82 var_unit = obj.Units # 單元或物流變數的單位83 var_dic = {'Name': var_name + ', ' + label_name,84 'Unit': var_unit,85 'Data': []} # 目前該變數的所以資訊都會匯入到此列表,換個變數後即清空86 record_data = obj.History # 所有的數據都在這history類別裡面87 # 用個迴圈把數據一個一個加到 var_dic['Data'] 當中88 for pre_time in range(int(record_starttime), int(record_endtime / record_interval + 1)):89 time = pre_time * record_interval90 var_dic['Data'].append(record_data.AtTime(time))91 # 如果數據資料長度跟時間長度不一致,需要檢查一下看出了什麼錯誤92 if len(var_dic['Data']) != len(time_dic['Data']):93 raise IndexError("The Data length is not equal to the time set, Pleas Check the Code !!!")94 # 數據長度與時間長度一致,就加入到 data_set 裡吧~95 data_set.append(var_dic)96 return data_set97 @staticmethod98 def set_time0_at(data, attime):99 """Reset the time of the given data at specified time.100 :param data: AspenDynamicReader-Type data101 :param attime: zeroing time point you want102 :return: zeroed data103 """104 import copy105 new_time = [i - attime for i in data[0]['Data']]106 new_data = copy.deepcopy(data)107 new_data[0]['Data'] = new_time108 return new_data109 def blocks_list(self):110 """Return a list of name of all blocks in AspenDynamics File.111 :return: a list contains all block's name in the AD File.112 """113 b_list = []114 for block in self.ad_flowsheet.Blocks:115 b_list.append(block.Name)116 return b_list117 def streams_list(self):118 """Return a list of name of all streams in AspenDynamics File.119 :return: a list contains all stream's name in the AD File.120 """121 s_list = []122 for stream in self.ad_flowsheet.Streams:123 s_list.append(stream.Name)124 return s_list125 def stream_moleflowrate(self, sname):126 """Return the AD Object of specified stream moleflowrate.127 :param sname: available stream name in the file.128 :return: AD Object of specified stream moleflowrate.129 """130 return self.ad_flowsheet.Streams(sname).F, "Moleflowrate"131 def stream_molefraction(self, sname, component):132 """Return the AD Object of specified stream and component mole fraction.133 :param sname: available stream name in the file.134 :param component: available component name in the file.135 :return: AD Object of specified stream and component mole fraction.136 """137 return self.ad_flowsheet.Streams(sname).Zn(component), f"MoleFrac. of {component}"138 def column_qr(self, bname):139 """Return the AD Object of specified RadFrac's reboiler duty.140 :param bname: available RadFrac name in the file.141 :return: AD Object of specified RadFrac's reboiler duty.142 """143 return self.ad_flowsheet.Blocks(bname).QReb, "QR"144 def column_stage_temperature(self, bname, stage):145 """Return the AD Object of specified RadFrac and stage temperature.146 :param bname: available RadFrac name in the file.147 :param stage: available specified RadFrac stage number in the file.148 :return: AD Object of specified RadFrac and stage temperature.149 """150 return self.ad_flowsheet.Blocks(bname).Stage(stage).T, f"T{stage}"151 def controller_pv(self, bname):152 """Return the AD Object of specified controller PV value.153 :param bname: available controller name in the file.154 :return: AD Object of specified controller PV value.155 """156 return self.ad_flowsheet.Blocks(bname).PV, "PV"157 def controller_op(self, bname):158 """Return the AD Object of specified controller OP value.159 :param bname: available controller name in the file.160 :return: AD Object of specified controller OP value.161 """162 return self.ad_flowsheet.Blocks(bname).OP, "OP"163 def controller_sp(self, bname):164 """Return the AD Object of specified controller SP value.165 :param bname: available controller name in the file.166 :return: AD Object of specified controller SP value.167 """168 return self.ad_flowsheet.Blocks(bname).SP, "SP"169# %%170class ADPlot:171 import matplotlib.pyplot as plt172 import subprocess, math, itertools173 def plot_dynamic_results(self,174 data_set,175 save_filename,176 figure_size=(7, 5),177 font_style="Times New Roman",178 num_of_column=2):179 """將指定的動態數據畫成圖,格式為(2C *R)180 """181 fig = self.plt.figure(figsize=figure_size) # 創造一個圖並設定整個圖的大小182 self.plt.rcParams["font.family"] = font_style # 設定整個圖的字型183 num_plot = len(data_set) - 1 # 計算要畫幾張圖(減去Time那欄)184 num_plot_row = self.math.ceil(num_plot / num_of_column) # 計算整個圖要幾個row185 # 讀取數據畫圖中......186 ax_obj_list = [] # 儲存圖物件的列表(客製化操作用)187 for i in range(1, num_plot + 1):188 ax = fig.add_subplot(num_plot_row, num_of_column, i)189 ax_obj_list.append(ax)190 line = ax.plot(data_set[0]['Data'], data_set[i]['Data'], 'b-') # 排除數據標題後的數據191 ax.set_xlim(0, data_set[0]['Data'][-1])192 ax.set_xlabel('Time (hr)')193 ax.set_ylabel(f'{data_set[i]["Name"]} ({data_set[i]["Unit"]})')194 self.user_plot_setting(i, ax_obj_list=ax_obj_list, line=line) # 使用者自訂的一些圖形規格(ex: 座標標籤、座標scale......)195 self.plt.tight_layout()196 # 儲存向量圖檔svg並利用inkscape軟體轉乘emf檔197 filename = save_filename198 self.plt.savefig(filename + '.svg')199 self.subprocess.call('C:\\Program Files\\Inkscape\\inkscape.exe ' + filename200 + '.svg ' + '--export-emf=' + filename + '.emf')201 self.plt.show()202 def multiplot_dynamic_results(self,203 data_set_group,204 save_filename,205 figure_size=(7, 5),206 font_style="Times New Roman",207 num_of_column=2,208 set_legend_for_each_data_group=None):209 """將指定的動態數據畫成圖,格式為(2C *R),座標標籤預設為第一組資料的座標label210 """211 # 判斷如果有一組以上的數據要重疊的話,確認每組數據變數是否數量一樣212 if len(data_set_group) > 1:213 data_length_list = []214 for i_data in data_set_group:215 data_length_list.append(len(i_data))216 if len(set(data_length_list)) != 1:217 raise IndexError('The length of each data set which want to merge')218 fig = self.plt.figure(figsize=figure_size) # 設定整個圖的大小219 self.plt.rcParams["font.family"] = font_style # 設定整個圖的字型220 num_plot = len(data_set_group[0]) - 1 # 計算要畫幾張圖(減去Time那欄)221 num_plot_row = self.math.ceil(num_plot / num_of_column) # 計算整個圖要幾個row222 # 讀取數據畫圖中......223 ax_list = [] # 把全部的圖物件都放到列表當中,之後回傳以客製化224 line_group_list = [] # 把全部圖的線物件都放到列表當中,之後回傳以客製化225 for i in range(1, num_plot + 1):226 ax = fig.add_subplot(num_plot_row, num_of_column, i)227 ax_list.append(ax)228 for num_plot in data_set_group:229 line = ax.plot(num_plot[0]['Data'], num_plot[i]['Data']) # 各數據標題後的數據230 line_group_list.append(ax.lines)231 ax.set_xlim(0, data_set_group[0][0]['Data'][-1]) # 時間座標的上限預設為第一組資料的最後一個時間點232 ax.set_xlabel('Time (hr)')233 ax.set_ylabel(f'{data_set_group[0][i]["Name"]} ({data_set_group[0][i]["Unit"]})') # y_label預設為第一組資料的每個類別名稱234 self.user_multiplot_setting(ax_list=ax_list,235 line_group_list=line_group_list) # 使用者自訂的一些圖形規格(ex: 座標標籤、座標scale......)236 # 設定legend的迴圈237 ax_list_length = len(ax_list)238 data_set_length = len(data_set_group)239 if set_legend_for_each_data_group is not None:240 # 替每個圖中的每條線設定lebel (設定legend的前置作業)241 for ax_index, line_index in self.itertools.product(range(ax_list_length), range(data_set_length)):242 line_group_list[ax_index][line_index].set_label(set_legend_for_each_data_group[line_index])243 # 替每個圖放上legend244 for ax_object in ax_list:245 ax_object.legend(loc='best')246 self.plt.tight_layout()247 # 儲存向量圖檔svg並利用inkscape軟體轉乘emf檔248 filename = save_filename249 self.plt.savefig(filename + '.svg')250 self.subprocess.call('C:\\Program Files\\Inkscape\\inkscape.exe ' + filename251 + '.svg ' + '--export-emf=' + filename + '.emf')252 self.plt.show()253 def user_plot_setting(self, index, ax_obj_list, line):254 """客製化特定圖形的細節參數,需要自己寫一些程式碼就是,好像有點爛255 """256 pass257 def user_multiplot_setting(self, ax_list, line_group_list):258 """客製化特定圖形的細節參數,需要自己寫一些程式碼就是,好像有點爛259 """260 pass261 def change_one_ylabel(self, index_of_plot, label, ax_obj_list):262 if index_of_plot == 1:263 ax_obj_list[index_of_plot - 1].set_ylabel(label)264 def change_one_xlabel(self, index_of_plot, label, ax_obj_list):265 if index_of_plot == 1:266 ax_obj_list[index_of_plot - 1].set_xlabel(label)267 def change_all_ylabel(self, label_list, ax_obj_list):268 for ax_index, label in self.itertools.zip_longest(range(len(ax_obj_list)), label_list):269 ax_obj_list[ax_index].set_ylabel(label)270 def change_all_xlabel(self, label_list, ax_obj_list):271 for ax_index, label in self.itertools.zip_longest(range(len(ax_obj_list)), label_list):272 ax_obj_list[ax_index].set_xlabel(label)273 def change_one_set_linestyle(self, index_of_line, style, ax_obj_list, line_group_list):274 for i in range(len(ax_obj_list)):275 line_group_list[i][index_of_line - 1].set_linestyle(style)276 def change_one_set_linecolor(self, index_of_line, color, ax_obj_list, line_group_list):277 for i in range(len(ax_obj_list)):278 line_group_list[i][index_of_line - 1].set_color(color)279# %%280if __name__ == '__main__':281 import pickle282 class adplot(ADPlot):283 def user_plot_setting(self, index, ax_obj_list, line):284 self.change_one_ylabel(1, 'I change the D1 y label', ax_obj_list)285 def user_multiplot_setting(self, ax_list, line_group_list):286 # plot setting change287 label_list = ['C1, T13 (C)', 'QR1 (kW)',288 'C2, T4 (C)', 'QR2 (kW)',289 'MoleFrac. of AAol', 'MoleFlowrate of AAol (kmole/hr)',290 'MoleFrac. of Water', 'MoleFlowrate of Water (kmole/hr)',291 'MoleFlowrate of Solvent (kmole/hr)']292 self.change_all_ylabel(label_list, ax_list)293 self.change_one_set_linestyle(2, '--', ax_list, line_group_list)294 self.change_one_set_linecolor(1, 'b', ax_list, line_group_list)295 self.change_one_set_linecolor(2, 'r', ax_list, line_group_list)296 ad = ADConnector()297 adp = adplot()298 data = ad.read_data([ad.controller_pv('C1_T13C'),299 ad.column_qr('C1'),300 ad.controller_pv('C2_T4C'),301 ad.column_qr('C2'),302 ad.stream_molefraction('B1', 'AAOL'),303 ad.stream_moleflowrate('B1'),304 ad.stream_molefraction('B2', 'WATER'),305 ad.stream_moleflowrate('B2'),306 ad.stream_moleflowrate('SOL')])307 # # 將從AspenDynamics取得的資料存成 data.pickle ,如此可以再跑下一run,然後再畫圖308 # with open('data.pickle', 'ab') as f:309 # pickle.dump(data, f)310 # """311 # open()的語法312 # 'wb': 以二進位打開檔案,並從頭開始寫錄資料,原本的資料會被刪除313 # 'ab': 以二進位打開檔案,並從檔案後面開始寫錄資料314 # """315 with open('data_m.pickle', 'rb') as f:316 data1 = pickle.load(f)317 data2 = pickle.load(f)318 adp.plot_dynamic_results(data, save_filename='Dynamic_result1', figure_size=(7, 12))319 adp.plot_dynamic_results(ad.set_time0_at(data, 1), save_filename='Dynamic_result1', figure_size=(7, 12))320 adp.multiplot_dynamic_results([data1, data2],321 save_filename='Dynamic_result2',322 figure_size=(7.5, 14),...

Full Screen

Full Screen

zapplive.py

Source:zapplive.py Github

copy

Full Screen

1# -*- coding: utf-8 -*-2import xbmcgui,xbmc,xbmcaddon,os,sys,time,re,threading,shutil,hashlib3from datetime import date, datetime,timedelta4import requests as requests5xbmcPlayer = xbmc.Player()6xbmcPlayer.stop()7temp = xbmc.translatePath("special://temp/")8icon = xbmc.translatePath("special://home/addons/plugin.video.iptvxtra-de/resources/lib/iptv.png")9net = xbmc.translatePath("special://home/addons/plugin.video.iptvxtra-de/resources/lib/netx.png")10xbmc.executebuiltin("Skin.SetString(iptvxtra_replaytext, "+''+")")11xbmc.executebuiltin("Skin.SetString(iptvxtra_replaypuffer, 0)")12xbmc.executebuiltin("Skin.SetString(iptvxtra_replayplaytime, 0)")13xbmc.executebuiltin("Skin.Reset(iptvxtra_replay_segment_ok)")14xbmc.executebuiltin("Skin.Reset(iptvxtra_replay_ok)")15__settings__ = xbmcaddon.Addon(id="plugin.video.iptvxtra-de")16record_active = __settings__.getSetting("record_live_active")17record_time = __settings__.getSetting("record_live_time")18record_endtime = __settings__.getSetting("record_live_endtime")19record_quali = __settings__.getSetting("hd_aktiv")20record_folderx = __settings__.getSetting("record_folder")21max2g = __settings__.getSetting("record_max2g")22fullscreen = __settings__.getSetting("setFull")23user= __settings__.getSetting("login").strip()24pwd= __settings__.getSetting("password").strip()25mdx = hashlib.md5('#user='+user+'pass='+pwd).hexdigest()26modex = sys.argv[1].replace("url=", "")27try: mode = modex.decode("hex")28except: mode = modex29idx = mode.split('***')30xbmcPlayer = xbmc.Player()31xbmcPlayer.stop()32def replay_pfad(record_folderx,temp):33 if record_folderx == 'Kodi Cache Verzeichnis': record_folderx = temp34 if not os.path.isdir(record_folderx):35 xbmc.executebuiltin( "Dialog.Close(infodialog)" ) 36 xbmc.executebuiltin('XBMC.Notification(der Download-Pfad wurde nicht gefunden , der Standard Temp Pfad von Kodi wurde gesetzt ,5000,'+icon+')')37 __settings__.setSetting("record_folder","Kodi Cache Verzeichnis")38 record_folderx = temp39 record_folder_neu = os.path.join(record_folderx,'IPTVxtraPL','lxa')40 record_folderx = os.path.join(record_folderx,'IPTVxtraPL','erfgbn.txt').replace('erfgbn.txt','')41 shutil.rmtree(record_folderx, ignore_errors=True)42 try: os.makedirs(record_folder_neu)43 except:44 try:45 shutil.rmtree(record_folderx, ignore_errors=True)46 os.makedirs(record_folder_neu)47 except: pass48 return record_folderx49 50def runstream(max2g,record_time,record_folderx,record_quali,record_active,idx,temp,record_endtime,fullsceen,mdx): 51 m3u8_files = getstream(mdx)52 if record_active.strip() != 'true' or 'http://c001.p' not in idx[0]:53 print idx54 if 'giniko.com' in idx[0]: idx[0] = ginico(idx[0])55 try: listitem = xbmcgui.ListItem( idx[4], iconImage=idx[5], thumbnailImage=idx[5])56 except: listitem = xbmcgui.ListItem( idx[4], iconImage=icon, thumbnailImage=icon)57 playlist = xbmc.PlayList( xbmc.PLAYLIST_VIDEO )58 playlist.clear()59 playlist.add( idx[0]+ '|X-Forwarded-For=' + idx[3], listitem )60 xbmc.executebuiltin("Skin.SetString(iptvxtra_addon_aktuell, plugin://plugin.video.iptvxtra-de)") 61 if fullsceen == 'false': xbmcPlayer.play(playlist,listitem,True)62 else: xbmcPlayer.play(playlist,listitem,False) 63 xbmc.executebuiltin( "Dialog.Close(infodialog)" )64 sys.exit(0)65 record_folder = replay_pfad(record_folderx,temp)66 if 'IPTVxtraPL' not in record_folder: record_folder = replay_pfad(record_folderx,temp)67 try: xbmc.executebuiltin('XBMC.Notification('+idx[4]+' , einen Moment der Stream wird analysiert ,30000,'+idx[5]+')')68 except: xbmc.executebuiltin('XBMC.Notification('+idx[4]+' , einen Moment der Stream wird analysiert ,30000,'+icon+')')69 m3u8_file = idx[0]70 pufferpause = 071 xbmc.executebuiltin("Skin.SetString(iptvxtra_titel_aktuell, "+idx[4]+")")72 xbmc.executebuiltin("Skin.SetString(iptvxtra_icon_aktuell, "+idx[5]+")")73 xbmc.executebuiltin("Skin.SetString(iptvxtra_addon_aktuell, plugin://plugin.video.iptvxtra-de)") 74 if record_endtime == '0': rend = 1440; rendx = '04:00'75 elif record_endtime == '1': rend = 2160; rendx = '06:00'76 elif record_endtime == '2': rend = 2880; rendx = '08:00'77 xbmc.executebuiltin("Skin.SetString(iptvxtra_starttime_aktuell, 00:00)")78 xbmc.executebuiltin("Skin.SetString(iptvxtra_endtime_aktuell, "+rendx+")")79 xbmc.executebuiltin("Skin.SetString(iptvxtra_stream_aktuell, "+m3u8_file.replace('|','&&')+")")80 xbmc.executebuiltin("Skin.SetString(iptvxtra_time_aktuell, "+str(int(time.time()))+")")81 xbmc.executebuiltin("Skin.SetBool(iptvxtra_replay_segment_ok)")82 xbmc.executebuiltin("Skin.SetString(iptvxtra_live_ok, true)")83 if record_active == 'true':84 try:85 if len(filter(lambda x: x.endswith("_stream.ts"), os.listdir(record_folder))) > 0:86 for i in filter(lambda x: x.endswith("_stream.ts"), os.listdir(record_folder)):87 try: os.remove(record_folder + i)88 except: pass89 except: pass90 if 'hd_1' in m3u8_file: quali = 'index_2500_av-p.m3u8'91 else: quali = 'index_1300_av-p.m3u8'92 url = m3u8_file.replace('master.m3u8',quali).split('###')93 try: r=requests.get(url[0], headers={"X-Forwarded-For":idx[3], "User-Agent":"Mozilla/5.0 (iPhone; U; CPU iPhone OS 3_0 like Mac OS X; en-us) AppleWebKit/528.18 (KHTML, like Gecko) Version/4.0 Mobile/7A341 Safari/528.16"})94 except:95 try: r=requests.get(url[0].replace('_av-p','_av-b'), headers={"X-Forwarded-For":idx[3], "User-Agent":"Mozilla/5.0 (iPhone; U; CPU iPhone OS 3_0 like Mac OS X; en-us) AppleWebKit/528.18 (KHTML, like Gecko) Version/4.0 Mobile/7A341 Safari/528.16"})96 except: 97 xbmc.executebuiltin( "Dialog.Close(infodialog)" )98 xbmc.executebuiltin('XBMC.Notification(Playlist Download Fehler ! , dieser Stream kann gerade nicht geladen werden ,7000,'+icon+')')99 sys.exit(0)100 print r.text101 try: segment_nr = re.findall(r'EXT-X-MEDIA-SEQUENCE:(\d*)',r.text)[0]102 except:103 xbmc.executebuiltin( "Dialog.Close(infodialog)" )104 xbmc.executebuiltin('XBMC.Notification(Segment Download Fehler ! , dieser Stream kann noch nicht geladen werden ,7000,'+icon+')')105 sys.exit(0)106 segmentx = re.findall(r'(http://.*?ts)',r.text)107 segment_part = segmentx[0].split(segment_nr)108 a = 0109 segment_urls = []110 for i in range(0,rend):111 new_segnr = str(int(segment_nr) + i)112 new_segment_part = segment_part[0] + new_segnr + segment_part[1]113 segment_urls.append(new_segment_part.replace('_av-b','_av-p'))114 if not os.path.isdir(record_folder): record_folder = replay_pfad(record_folderx,temp)115 116 segment_urlsx=[]117 for id in segment_urls:118 a = a + 1119 # xx = re.findall(r'(akamaihd.net/i/.*?.ts)',id)[0].replace('/','_').replace('akamaihd.net_i_',record_folder).replace('@','_')120 xx = re.findall(r'(edgesuite.net/i/c001/.*?.ts)',id)[0].replace('/','_').replace('edgesuite.net_i_c001_',record_folder).replace('@','_')121 segment_urlsx.append([id,xx.replace('_av-p',''),str(a)]) 122 if a == 2: a = 0123 segment_urls = segment_urlsx124 try:125 if max2g == 'false': max2g = 0126 else: max2g = 1127 except: max2g = 1128 129 try: os.remove(temp + "stop.stp")130 except: pass131 thread_IPTVxtraGrabber = ChunkGrabber(segment_urls, record_folder, idx[3], segment_part, max2g)132 thread_IPTVxtraGrabber.start()133 try: os.remove(record_folder + 'IPTVxtra.m3u8')134 except: pass135 m3u8_file = record_folder + 'IPTVxtra.m3u8'136 fobj_out = open(m3u8_file,"w")137 fobj_out.write('#EXTM3U'+'\n')138 fobj_out.write('#EXT-X-TARGETDURATION:10'+'\n')139 fobj_out.write('#EXT-X-ALLOW-CACHE:YES'+'\n')140 fobj_out.write('#EXT-X-VERSION:3'+'\n')141 fobj_out.write('#EXT-X-MEDIA-SEQUENCE:'+'\n')142 for id in segment_urlsx:143 fobj_out.write('#EXTINF:10.000,'+'\n')144 fobj_out.write(id[1] + '\n')145 fobj_out.write('#EXT-X-DISCONTINUITY'+'\n')146 fobj_out.write('#EXT-X-ENDLIST'+'\n')147 fobj_out.close()148 if record_time == '0': n = 20; puffersoll = 1149 elif record_time == '1': n = 30; puffersoll = 3150 elif record_time == '2': n = 40; puffersoll = 6151 i = 0152 while i <= n:153 pufferak = xbmc.getInfoLabel("Skin.String(iptvxtra_replaypuffer)")154 if pufferak == '': pufferak = '0'155 try: 156 if int(pufferak) > puffersoll: break157 except:158 i = 500159 break160 pufferaktuell = xbmc.getInfoLabel("Skin.String(iptvxtra_replaytext)")161 if pufferaktuell == '':162 try: xbmc.executebuiltin('XBMC.Notification('+idx[4]+' ,der Download der Sendung ist gestartet ,100,'+idx[5]+')')163 except: xbmc.executebuiltin('XBMC.Notification('+idx[4]+' ,der Download der Sendung ist gestartet ,100,'+icon+')')164 else:165 try: xbmc.executebuiltin('XBMC.Notification('+pufferaktuell+' ,der Download der Sendung ist gestartet ,100,'+idx[5]+')')166 except: xbmc.executebuiltin('XBMC.Notification('+pufferaktuell+' ,der Download der Sendung ist gestartet ,100,'+icon+')')167 time.sleep(1)168 i = i + 1169 if i > n: pufferpause = 1170 xbmc.executebuiltin( "Dialog.Close(infodialog)" )171 172 try: fobj_out.close()173 except: pass174 try: listitem = xbmcgui.ListItem( idx[4], iconImage=idx[5], thumbnailImage=idx[5])175 except: listitem = xbmcgui.ListItem( idx[4], iconImage=icon, thumbnailImage=icon)176 playlist = xbmc.PlayList( xbmc.PLAYLIST_VIDEO )177 playlist.clear()178 playlist.add( m3u8_file, listitem )179 if fullsceen == 'false': xbmcPlayer.play(playlist,listitem,True)180 else: xbmcPlayer.play(playlist,listitem,False)181 if pufferpause == 1: xbmcPlayer.pause()182 xbmc.executebuiltin( "Dialog.Close(infodialog)" )183 sys.exit(0)184def getstream(mdx):185 import requests as requests186 try: 187 r = requests.get("http://api.iptvxtra.net/check.php", params = {'loc': mdx ,'la':'DE'} )188 rtx = int(r.text)189 except:190 xbmc.executebuiltin('XBMC.Notification(Netzwerkfehler , fehlerhafter Zugang zum Login-Server,25000,'+net+')')191 sys.exit(0)192 if rtx > 5:193 if rtx == 7:194 xbmc.executebuiltin('XBMC.Notification( verbotener Mehrfach-Login !!! , Dein Zugang wird gleichzeitig von mehreren Standorten aus benutzt - bei 5 Fehlern wird der Zugang bis 24.00 GMT-0 gesperrt - Kodi bitte neu starten ,60000,'+net+')')195 if rtx == 8:196 xbmc.executebuiltin('XBMC.Notification( verbotener Mehrfach-Login !!! , Dein Zugang wurde automatisch bis 24.00 GMT-0 gesperrt ,60000,'+net+')')197 if rtx == 9:198 xbmc.executebuiltin('XBMC.Notification( Fehler in den Zugangsdaten !!! , Ein Fehler wurde in den Zugangsdaten erkannt - Passwort oder Username muss verkehrt sein ,60000,'+net+')')199 if rtx == 10:200 xbmc.executebuiltin('XBMC.Notification( gesperrter Zugang !!! , Dein Zugang ist auf unbestimmte Zeit gesperrt - wende dich per EMail an uns ,60000,'+net+')')201 sys.exit(0)202def ginico(url):203 204 if 'xxx&User' in url:205 x = url.partition('xxx&User')206 url = x[0] + 'xxx'207 x = url.partition('---')208 url = x[0]209 id = x[2].replace('xxx','')210 r = requests.get("http://giniko.com/watch.php?id=" + id)211 if r.text.find('m3u8?'):212 s = r.text.partition('m3u8?')213 s = s[2].partition('"')214 if len(s[0]) > 120 and len(s[0]) < 134:215 s = url + '?' + s[0]216 return s217 r = requests.get("http://giniko.com/watch.php?id=37")218 if r.text.find('m3u8?'):219 s = r.text.partition('m3u8?')220 s = s[2].partition('"')221 if len(s[0]) > 120 and len(s[0]) < 134:222 s = url + '?' + s[0]223 return s224 r = requests.get("http://giniko.com/watch.php?id=220")225 if r.text.find('m3u8?'):226 s = r.text.partition('m3u8?')227 s = s[2].partition('"')228 if len(s[0]) > 120 and len(s[0]) < 134:229 s = url + '?' + s[0]230 return s231 else: return url232class ChunkGrabber(threading.Thread):233 234 def __init__(self, segment_urls, record_folder, ip, segment_part, max2g):235 xbmc.executebuiltin("Skin.SetString(iptvxtra_replaytext, "+''+")")236 xbmc.executebuiltin("Skin.SetString(iptvxtra_replaypuffer, 0)")237 xbmc.executebuiltin("Skin.SetString(iptvxtra_replayplaytime, 0)")238 threading.Thread.__init__(self)239 self._temp = xbmc.translatePath("special://temp/")240 self._temp_stopfi = xbmc.translatePath("special://temp/stop.stp")241 self._segment_part = segment_part242 self._segment_urls = segment_urls243 self._record_folder = record_folder244 self._m3u8 = record_folder + 'IPTVxtra.m3u8'245 self._ip = ip246 self._max2g = max2g247 self.segmente = 0248 self.puffer_end = 0249 self._neg = 0250 def run(self): 251 try: os.remove(temp + "stop.stp")252 except: pass253 in_stream = False254 pipe_ok = True255 self.puffer_end = 0256 self.segmente = 0257 puffera = 0258 pufferb = 0259 count = 0260 segmentcounter = 0261 ok = ''262 for i in self._segment_urls:263 segmentcount = 600264 if segmentcounter > segmentcount-1: 265 os.remove(self._segment_urls[segmentcounter-segmentcount][1])266 self._neg += 10267 abc = self.playlist(segmentcounter,segmentcount)268 segmentcounter += 1269 270 fifo = open(i[1], "ab")271 if i[0] == self._segment_urls[-1][0] or i[0] == self._segment_urls[-2][0] or i[0] == self._segment_urls[-3][0]: 272 xbmc.executebuiltin("Skin.SetString(iptvxtra_replaytext, Video-Puffer: komplett)")273 self.puffer_end = 1274 if os.path.isfile(self._temp_stopfi) or not os.path.isfile(self._m3u8): break275 while pipe_ok:276 if os.path.isfile(self._temp_stopfi) or not os.path.isfile(self._m3u8): break277 try: daten = requests.get(i[0], headers={"X-Forwarded-For":self._ip}, timeout=5)278 except: 279 if os.path.isfile(self._temp_stopfi) or not os.path.isfile(self._m3u8): break280 if self.puffer_end == 0: ok = self.set_puffer(0,0)281 try: daten = requests.get(i[0].replace('_av-p','_av-b'), headers={"X-Forwarded-For":self._ip}, timeout=5)282 except:283 if os.path.isfile(self._temp_stopfi) or not os.path.isfile(self._m3u8): break284 if self.puffer_end == 0: ok = self.set_puffer(0,0)285 try: daten = requests.get(i[0], headers={"X-Forwarded-For":self._ip}, timeout=10)286 except:287 if os.path.isfile(self._temp_stopfi) or not os.path.isfile(self._m3u8): break288 if self.puffer_end == 0: ok = self.set_puffer(0,0)289 try: daten = requests.get(i[0].replace('_av-p','_av-b'), headers={"X-Forwarded-For":self._ip}, timeout=10)290 except:291 if os.path.isfile(self._temp_stopfi) or not os.path.isfile(self._m3u8): break292 if self.puffer_end == 0: ok = self.set_puffer(0,0)293 try: daten = requests.get(i[0], headers={"X-Forwarded-For":self._ip}, timeout=15)294 except: 295 fifo.close()296 if os.path.isfile(self._temp_stopfi) or not os.path.isfile(self._m3u8): break297 if self.puffer_end == 0: ok = self.set_puffer(0,0)298 break299 print '-------------------------------------------------------------------- Daten Fehler - ein Live-Segment wurde uebersprungen'300 if (daten):301 count = 0302 if os.path.isfile(self._temp_stopfi) or not os.path.isfile(self._m3u8): break303 try:304 fifo.write(daten.content)305 fifo.close()306 if self.puffer_end == 0: 307 ok = self.set_puffer(1,0)308 except IOError:309 pipe_ok = False310 break311 in_stream = True312 break313 elif (in_stream):314 time.sleep(1)315 ok = self.set_puffer(0,0)316 continue317 break 318 if not pipe_ok: break319 if os.path.isfile(self._temp_stopfi) or not os.path.isfile(self._m3u8): break320 fifo.close()321 if os.path.isfile(self._temp_stopfi) or not os.path.isfile(self._m3u8):322 xbmc.executebuiltin("Skin.SetString(iptvxtra_replaytext, "+''+")")323 xbmc.executebuiltin("Skin.SetString(iptvxtra_replaypuffer, 0)")324 xbmc.executebuiltin("Skin.SetString(iptvxtra_replayplaytime, 0)")325 def playlist(self, segmentcounter, segmentcount):326 start = 0327 fobj_out = open(self._m3u8,"w")328 fobj_out.write('#EXTM3U'+'\n')329 fobj_out.write('#EXT-X-TARGETDURATION:10'+'\n')330 fobj_out.write('#EXT-X-ALLOW-CACHE:YES'+'\n')331 fobj_out.write('#EXT-X-VERSION:3'+'\n')332 fobj_out.write('#EXT-X-MEDIA-SEQUENCE:'+'\n')333 for id in self._segment_urls:334 if start >= segmentcounter-segmentcount+2:335 fobj_out.write('#EXTINF:10.000,'+'\n')336 fobj_out.write(id[1] + '\n')337 start += 1338 fobj_out.write('#EXT-X-DISCONTINUITY'+'\n')339 fobj_out.write('#EXT-X-ENDLIST'+'\n')340 fobj_out.close()341 return 'ok'342 def set_puffer(self, zaehl, neg): 343 if zaehl <> 1: zaehl = 0344 puffer = int(xbmc.getInfoLabel("Skin.String(iptvxtra_replaypuffer)"))345 xbmc.executebuiltin("Skin.SetString(iptvxtra_replaypuffer, "+str(puffer + zaehl)+")")346 try: puffera = int(xbmc.Player().getTime())347 except: puffera = 0348 seconds = ((puffer + zaehl) * 10) - puffera349 zaehl = 0350 minutes = seconds // 60351 seconds = seconds % 60352 secx = ''353 if seconds < 10: secx = '0'354 minx = ''355 if minutes < 10: minx = '0'356 pufferx = minx+str(minutes)+':'+secx+str(seconds)357 xbmc.executebuiltin("Skin.SetString(iptvxtra_replaytext, Video-Puffer: "+pufferx+" min)")358 if xbmc.Player.isPlaying and not '0-'in pufferx and puffera > 120 and puffer > -1:359 if neg == 0: xbmc.executebuiltin("Skin.SetString(iptvxtra_replayplaytime, "+str(puffera)+")")360 elif self._neg > 0: xbmc.executebuiltin("Skin.SetString(iptvxtra_replayplaytime, "+str(puffera-self._neg)+")")361 return 'ok'362 def stop(self):363 open(self._temp_stopfi, "a").close()...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Radish automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful