How to use convert_df_to_csv method in dbt-osmosis

Best Python code snippet using dbt-osmosis_python

findash.py

Source:findash.py Github

copy

Full Screen

...112 113 114 115 @st.cache116 def convert_df_to_csv(df):117 return df.to_csv().encode('utf-8')118 st.download_button(label="Download Summary",data=convert_df_to_csv(Summary),file_name='StockSummary.csv',mime='text/csv',)119 120#==============================================================================121# Tab 2122#==============================================================================123124def tab2():125 126 #Dashboard Title127 annotated_text(("Stock Analysis","Chart","#3498DB"))128129 130 # Add table to show stock data131 @st.cache132 def GetStockData(tickers, start_date, end_date):133 return pd.concat([si.get_data(tick, start_date, end_date) for tick in tickers])134 135 col1,col2 = st.columns([2,2])136 137 #To select Period138 select_data = ['1mo', '3mo','6mo','ytd','1y','2y','5y','max']139 default = select_data.index('1y')140 select_Period = col1.selectbox("Select Period", select_data,index = default)141 142 #To Select interval143 select_interval = ['1d','1mo']144 interval= col2.selectbox("Select Interval", select_interval)145 146 #To select Graph147 select_graph = st.radio("Select Graph", ["Line","Candle"])148 149 st.write('<style>div.row-widget.stRadio > div{flex-direction:row;}</style>', unsafe_allow_html=True)150 151 #getting the stock data 152 data = yf.download(ticker, period = select_Period,interval = interval)153 154 data['diff'] = data['Close'] - data['Open']155 data.loc[data['diff']>=0, 'color'] = 'green'156 data.loc[data['diff']<0, 'color'] = 'red'157 158 159 160 if ticker != '-':161 stock_price = GetStockData([ticker], start_date, end_date)162 163 #check box to display the data164 show_data = st.checkbox("Show data") 165 166 if show_data:167 st.write('Stock price data')168 st.dataframe(stock_price)169 170 if select_graph == "Line": 171 if ticker != '-':172 data = yf.download(ticker, period = select_Period,interval = interval)173 174 fig = make_subplots(rows=2, cols=1, shared_xaxes=True, 175 vertical_spacing=0.07, subplot_titles=('Stock Trend', 'Volume'), 176 row_width=[0.2, 0.7])177 fig.add_trace(go.Scatter(x=data.index, y=data['Close'],name="Stock Trend",showlegend=True),row= 1,col = 1)178 fig.add_trace(go.Bar(x=data.index, y=data['Volume'],name="Volume",showlegend=True),row=2,col = 1)179 fig.update(layout_xaxis_rangeslider_visible=False)180 fig.update_layout(title="Stock Summary Line Plot", yaxis_title="Close Price")181 fig.update_layout(width = 1000 , height = 600)182 st.plotly_chart(fig)183184 elif select_graph == "Candle":185 fig = make_subplots(rows=2, cols=1, shared_xaxes=True, 186 vertical_spacing=0.03, subplot_titles=('Stock Trend', 'Volume'), 187 row_width=[0.2, 0.7])188189 #candlestick190 fig.add_trace(go.Candlestick(x=data.index, open=data["Open"], high=data["High"],191 low=data["Low"], close=data["Close"], name="Stock Trend"), 192 row=1, col=1)193 fig.update_layout(title="Stock Summary Candlestick Plot", yaxis_title="Close Price")194 195 #Volume196 fig.add_trace(go.Bar(x=data.index, y=data['Volume'],name="Volume",showlegend=True), row=2, col=1)197 fig.add_trace(go.Scatter(x=data.index,y=data['Close'].rolling(window=50).mean(),marker_color='orange',name='50 Day MA'))198 #This removes rangeslider 199 fig.update(layout_xaxis_rangeslider_visible=False)200 201 fig.update_layout(202 width=1000,203 height=600,204 autosize=False,205 template="plotly_white")206 st.plotly_chart(fig)207208209210#==============================================================================211# Tab 3 - Statistics212#============================================================================== 213def tab3():214 215#Dashboard Header 216 annotated_text(("Stock","Statistics","#3498DB"))217 218 219# Getting stock data220 def GetStatsEval(ticker):221 return si.get_stats_valuation(ticker)222 def GetStats(ticker):223 return si.get_stats(ticker)224 225 def convert_df_to_csv(df):226 return df.to_csv().encode('utf-8')227 228 229 if ticker != '-':230 statsval = GetStatsEval(ticker)231 statsval = statsval.rename(columns={0:'Valuation Measures',1:'USD'})232 233 #Valuation Measures234 annotated_text(("VALUATION","MEASURES","#3498DB"))235 st.dataframe(statsval,height = 1000)236 #Get Remaining stats237 if ticker != '-':238 stat = GetStats(ticker)239 stat = stat.set_index('Attribute')240 241 #stock Price History242 annotated_text(("STOCK PRICE","HISTORY","#3498DB"))243 Sph = stat.iloc[0:7,]244 st.dataframe(Sph,height = 1000)245 246 #share statistics247 annotated_text(("SHARE","STATISTICS","#3498DB"))248 Shs = stat.iloc[7:19,]249 st.dataframe(Shs,height = 1000)250 251 #Dividend & Splits252 annotated_text(("DIVIDEND","SPLITS","#3498DB"))253 Div = stat.iloc[19:29,]254 st.table(Div)255 256 #Financial Highlights257 annotated_text(("FINANCIAL","HIGHLIGHTS","#3498DB"))258 Finh = stat.iloc[29:31,]259 st.table(Finh)260 261 #Profitability262 annotated_text(("STOCK","PROFITABILITY","#3498DB"))263 Prof = stat.iloc[31:33,]264 st.dataframe(Prof,height = 1000)265 266 #Management Effectiveness267 annotated_text(("Management","Effectiveness","#3498DB"))268 Meff = stat.iloc[33:35,]269 st.dataframe(Meff,height = 1000)270 271 #Income Statement272 IncS = stat.iloc[35:43,]273 annotated_text(("INCOME","STATEMENT","#3498DB"))274 st.dataframe(IncS,height = 1000)275 276 #Balance Sheet277 annotated_text(("BALANCE","SHEET","#3498DB"))278 BalS = stat.iloc[43:49,]279 st.dataframe(BalS,height = 1000)280 281 #Cash Flow282 annotated_text(("CASH","FLOW","#3498DB"))283 Caf = stat.iloc[49:51,]284 st.dataframe(Caf,height = 1000)285 286 287 288# =============================================================================289# df = stat.style.set_properties(**{'background-color': 'black',290# 'color': 'lawngreen',291# 'border-color': 'white'})292# =============================================================================293294 #Download Required Data 295 data_to_download = ["Valuation Measures","stock Price History","share statistics","Dividend & Splits","Financial Highlights",296 "Profitability","Management Effectiveness","Income Statement","Balance Sheet","Cash Flow"]297 to_download = st.selectbox("Choose Data to Download", data_to_download) 298 299 #Conditions to selecr the300 if to_download == 'Valuation Measures':301 st.download_button(label="Download Stats",data=convert_df_to_csv(statsval),file_name='ValuationMeasures.csv',mime='text/csv',)302 elif to_download == 'stock Price History':303 st.download_button(label="Download Stats",data=convert_df_to_csv(Sph),file_name='stockPriceHistory.csv',mime='text/csv',)304 elif to_download == 'share statistics':305 st.download_button(label="Download Stats",data=convert_df_to_csv(Shs),file_name='shareStatistics.csv',mime='text/csv',)306 elif to_download == 'Dividend & Splits':307 st.download_button(label="Download Stats",data=convert_df_to_csv(Div),file_name='DividendAndSplits.csv',mime='text/csv',)308 elif to_download == 'Financial Highlights':309 st.download_button(label="Download Stats",data=convert_df_to_csv(Finh),file_name='FinancialHighlights.csv',mime='text/csv',) 310 elif to_download == 'Profitability':311 st.download_button(label="Download Stats",data=convert_df_to_csv(Prof),file_name='Profitability.csv',mime='text/csv',)312 elif to_download == 'Management Effectiveness':313 st.download_button(label="Download Stats",data=convert_df_to_csv(Meff),file_name='ManagementEffectiveness.csv',mime='text/csv',)314 elif to_download == 'Income Statement':315 st.download_button(label="Download Stats",data=convert_df_to_csv(IncS),file_name='IncomeStatement.csv',mime='text/csv',)316 elif to_download == 'Balance Sheet':317 st.download_button(label="Download Stats",data=convert_df_to_csv(BalS),file_name='BalanceSheet.csv',mime='text/csv',)318 elif to_download == 'Cash Flow':319 st.download_button(label="Download Stats",data=convert_df_to_csv(Caf),file_name='CashFlow.csv',mime='text/csv',)320 321 322 323 324 325 326327#==============================================================================328# Tab 4 - Financials 329#============================================================================== 330def tab4():331 332 annotated_text(("Stock", "Financials","#33ADFF"))333 col1,col2 = st.columns([2,2]) ...

Full Screen

Full Screen

CTCscanner.py

Source:CTCscanner.py Github

copy

Full Screen

...278 #AgGrid(tx_log_df, key = 'txs', editable = True, fit_columns_on_grid_load = True)279 280# Download as a csv281 @st.cache282 def convert_df_to_csv(df):283 return df.to_csv().encode('utf-8')284 st.download_button(285 label = 'Download as CSV',286 data=convert_df_to_csv(tx_log_df),287 file_name='transactions.csv',288 mime='text/csv',289 ) 290 291 292 293# CUSTOMER DATABASE294if selected == 'Customer Database':295# Tile and file names296 st.title('Customer Database')297298# Filter299 customer_database_df['DOB'] = pd.to_datetime(customer_database_df['DOB']).dt.date300 customer_database_df = customer_database_df.sort_values('Date_Approved', ascending=True)301 customer_database_df.set_index('ID', inplace=True)302 303 st.write(customer_database_df.shape)304 st.dataframe(customer_database_df)305 #AgGrid(customer_database_df, key = 'customers', editable = True, fit_columns_on_grid_load = True)306 307# Download as a csv308 @st.cache309 def convert_df_to_csv(df):310 return df.to_csv().encode('utf-8')311 st.download_button(312 label = 'Download as CSV',313 data=convert_df_to_csv(customer_database_df),314 file_name='all_customers',315 mime='text/csv',316 )317 318319 320# AGGREGATE TOTALS321if selected == 'Aggregate Volumes':322# Tile and file names323 st.title('Aggregate Volumes')324 325# Frame326 agg_from_concat = concat_df[['Agg_Volume', 'Percentile', 'ID_x', 'Name_y', 'Company_Name', 'Last_TX', 'Status', 'Risk_Rating',327 'Statements_Needed', 'Statements_Collected', 'Review_Needed', 'Last_Review', 'Notes_y']]328 329 agg_from_concat.drop_duplicates(subset=['ID_x'], inplace=True)330 agg_from_concat.set_index('ID_x', inplace=True)331 332# Print333 st.write(agg_from_concat.shape) 334 st.dataframe(agg_from_concat)335336# Download as a csv337 @st.cache338 def convert_df_to_csv(df):339 return df.to_csv().encode('utf-8')340 st.download_button(341 label = 'Download as CSV',342 data=convert_df_to_csv(agg_from_concat),343 file_name='agg_volumes.csv',344 mime='text/csv',345 )346 347 348# SHARED WALLET349if selected == 'Shared Wallet Scanner':350# Tile and file names351 st.title('Shared Wallet Scanner')352 353# Print354 st.write(shared_wallets_df.shape)355 st.dataframe(shared_wallets_df)356357# Download as a csv358 @st.cache359 def convert_df_to_csv(df):360 return df.to_csv().encode('utf-8')361 st.download_button(362 label = 'Download as CSV',363 data=convert_df_to_csv(shared_wallets_df),364 file_name='shared_wallets.csv',365 mime='text/csv',366 )367368369370# OFAC MATCHES371if selected == 'OFAC Matches':372# Tile and file names373 st.title('OFAC Matches')374 375# Frame376 ofac_from_concat = concat_df[['Address', 'Address_Match', 'ID_x', 'Name_y', 'Company_Name', 'Last_TX', 'Status', 'Risk_Rating',377 'Notes_y']]378 ofac_from_concat.sort_values('Last_TX', ascending=True, inplace=True)379 ofac_from_concat.set_index('Address', inplace=True)380 381# Filter382 ofac_from_concat = ofac_from_concat.loc[(ofac_from_concat['Address_Match'] == True)]383 384 BL_addresses.drop_duplicates()385 address_list = BL_addresses.set_index('Address').to_dict()['Note']386 387# Print388 st.write(ofac_from_concat.shape)389 st.dataframe(ofac_from_concat)390 391# Download as a csv392 @st.cache393 def convert_df_to_csv(df):394 return df.to_csv().encode('utf-8')395 st.download_button(396 label = 'Download as CSV',397 data=convert_df_to_csv(ofac_from_concat),398 file_name='ofac_wallets.csv',399 mime='text/csv',400 )401 402# Show BL403 show_bl = st.checkbox("Show Blacklist")404 if show_bl:405 st.write(address_list)406 407408409# All410if selected == 'All Data':411 412# Tile and file names413 st.title('All')414 concat_df.drop(['indicator_column', 'indicator_column2'], axis=1, inplace=True)415 concat_df = concat_df.sort_values(by='Control', ascending=True)416 concat_df.set_index('Control', inplace=True)417 418# Define filter419 def filter_dataframe(df: pd.DataFrame) -> pd.DataFrame:420 """421 Adds a UI on top of a dataframe to let viewers filter columns422423 Args:424 df (pd.DataFrame): Original dataframe425426 Returns:427 pd.DataFrame: Filtered dataframe428 """429 modify = st.checkbox("Add filters", key = "modify")430431 if not modify:432 return df433 df = df.copy()434435 # Try to convert datetimes into a standard format (datetime, no timezone)436 for col in df.columns:437 if is_object_dtype(df[col]):438 try:439 df[col] = pd.to_datetime(df[col])440 except Exception:441 pass442443 if is_datetime64_any_dtype(df[col]):444 df[col] = df[col].dt.tz_localize(None)445446 modification_container = st.container()447448 with modification_container:449 to_filter_columns = st.multiselect("Filter On", df.columns, key = "to_filter_columns")450 for column in to_filter_columns:451 left, right = st.columns((1, 20))452 # Treat columns with < 10 unique values as categorical453 if is_categorical_dtype(df[column]) or df[column].nunique() < 10:454 user_cat_input = right.multiselect(f"Values for {column}",455 df[column].unique(),456 default=list(df[column].unique()),457 )458 df = df[df[column].isin(user_cat_input)]459 elif is_numeric_dtype(df[column]):460 _min = float(df[column].min())461 _max = float(df[column].max())462 step = (_max - _min) / 100463 user_num_input = right.slider(f"Values for {column}",464 min_value=_min,465 max_value=_max,466 value=(_min, _max),467 step=step,468 )469 470 df = df[df[column].between(*user_num_input)]471 elif is_datetime64_any_dtype(df[column]):472 user_date_input = right.date_input(f"Values for {column}",473 value=(474 df[column].min(),475 df[column].max(),476 ),477 )478 if len(user_date_input) == 2:479 user_date_input = tuple(map(pd.to_datetime, user_date_input))480 start_date, end_date = user_date_input481 df = df.loc[df[column].between(start_date, end_date)]482 else:483 user_text_input = right.text_input(f"Substring or regex in {column}",484 )485 if user_text_input:486 df = df[df[column].astype(str).str.contains(user_text_input)]487488489 check2 = st.checkbox("Remove Columns", key = "check2")490491 if not check2:492 return df493 df = df.copy()494495 modification_container2 = st.container()496 columns = list(df.columns.unique())497498 with modification_container2:499 to_remove_columns = st.multiselect("Remove Columns", columns, default = columns, key = "to_remove_columns")500 df = df[to_remove_columns]501502 return df503 504# Print505 df = concat_df506 df1 = filter_dataframe(df)507 st.write(df1.shape)508 st.dataframe(df1)509510# Download as a csv511 @st.cache512 def convert_df_to_csv(df):513 return df.to_csv().encode('utf-8')514 st.download_button(515 label = 'Download as CSV',516 data=convert_df_to_csv(df1),517 file_name='custom_data_filters.csv',518 mime='text/csv',519 ) 520 521 522 523# CUSTOMER SEARCH524if selected == 'Customer Search':525 526# Tile and file names ...

Full Screen

Full Screen

app.py

Source:app.py Github

copy

Full Screen

...148 prediction_MLGL = pd.DataFrame(data = [prediction_MLGL], columns = colonnes )149 # concat choosen features and prediction to create dataframe150 result_mlgl = pd.concat([input_df,prediction_MLGL], axis = 1)151 # convert result to csv and save it152 def convert_df_to_csv(result_mlgl):153 return result_mlgl.to_csv().encode('utf-8') 154 st.download_button(label="Download data as CSV",data = convert_df_to_csv(result_mlgl),155 file_name='prediction_TZ_MLGL.csv',mime='text/csv')156 # prediction on loaded csv file157 158 #st.title("make prediction on your own file")159 #uploaded_file = st.file_uploader("Choose your file")160 #if uploaded_file is not None:161 #df_loaded = pd.read_csv(uploaded_file)162 #uploaded_file= pd.DataFrame(uploaded_file)163 #if st.button('prediction'):164 #df_loaded = pd.DataFrame(scaler_TZ_AC_MLGL_correl_tx.transform(df_loaded), columns =df_loaded.columns)165 #prediction = load_model_TZ_AC_MLGL_correl_tx.predict(df_loaded)166 #st.success('value of TZ_AC_MLGL_air_max is {}'.format(prediction))167 #convert result to csv file168 # prediction_MLGL_loaded_file = pd.to_numeric(prediction)169 # colonnes = ["TZ_AC_MLGL_air_max"]170 171 #prediction_MLGL_loaded_file = pd.DataFrame(data = [[prediction_MLGL_loaded_file]], columns = colonnes )172 173 #def convert_df_to_csv(prediction_MLGL_loaded_file):174 # IMPORTANT: Cache the conversion to prevent computation on every rerun175 #return prediction_MLGL_loaded_file.to_csv().encode('utf-8') 176 #st.download_button(label="Download data as CSV",data = convert_df_to_csv(prediction_MLGL_loaded_file),177 #file_name='prediction_MLGL_loaded_file.csv',mime='text/csv') 178 179 180 181 #prediction for second model182 if task2: 183 st.title("Right vertical force on the ground air")184 scaler_TZ_AC_MLGR_air_max= joblib.load(open("./scalers/scaler_TZ_AC_MLGR_air_max.save",'rb'))185 df = pd.DataFrame(scaler_TZ_AC_MLGR_air_max.transform(input_df), columns = input_df.columns)186 ## load the model file187 load_model_TZ_AC_MLGR_air_max = pickle.load(open('./models/model_TZ_AC_MLGR_air_max.pkl', 'rb'))188 189 # use the model to predict target190 if st.button('predict Right vertical force on the ground "air" '):191 prediction = load_model_TZ_AC_MLGR_air_max.predict(df)192 st.success('Vertical ground force "air" of main lainding gear right{}'.format(prediction))193 194 # Download result to csv file195 prediction_MLGR = float(prediction)196 colonnes = ["TZ_AC_MLGR_air_max"]197 prediction_MLGR = pd.DataFrame(data = [[prediction_MLGR]], columns = colonnes )198 199 # concat choosen features and prediction to create dataframe200 result_mlgr = pd.concat([input_df,prediction_MLGR], axis = 1)201 # convert result to csv and save it202 def convert_df_to_csv(result_mlgr):203 # IMPORTANT: Cache the conversion to prevent computation on every rerun204 return result_mlgr.to_csv().encode('utf-8') 205 st.download_button(label="Download data as CSV",data = convert_df_to_csv(result_mlgr),206 file_name='prediction_TZ_MLGR.csv',mime='text/csv')207 208 209 # show user profile 210 elif task3:# == "Users Profile":211 #st.subheader("Users Profile")212 user_result = view_all_users()213 clean_db = pd.DataFrame(user_result,columns=['User','Password'])214 st.dataframe(clean_db) 215 else:216 st.warning("Incorrect Username and Password")217 st.info('Retry or SignUp')218 #sign up session219 elif choice == "SignUp":...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run dbt-osmosis automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful