How to use execute_raw method in pytest-play

Best Python code snippet using pytest-play_python

db_stats.py

Source:db_stats.py Github

copy

Full Screen

...3https://gist.github.com/anvk/475c22cbca1edc5ce94546c871460fdd4"""5from functools import wraps6from pathlib import Path7def execute_raw(raw):8 from aiida.manage.manager import get_manager9 backend = get_manager()._load_backend(schema_check=False)10 return backend.execute_raw(raw)11# ------------------12# -- Memory Size --13# ------------------14def memory_db_df():15 import pandas as pd16 result = execute_raw(17 r"""18 SELECT19 datname,20 pg_database_size(datname)21 from pg_database22 order by pg_database_size(datname);23 """24 )25 df = pd.DataFrame(result, columns=["database", "size_mb"])26 df["size_mb"] = df["size_mb"] * 1e-627 return df28def memory_pg_classes_df():29 """Return size of `pg_class`'s30 `pg_class` catalogs tables and most everything else that has columns,31 or is otherwise similar to a table.32 See https://www.postgresql.org/docs/9.3/catalog-pg-class.html33 """34 import pandas as pd35 result = execute_raw(36 r"""37 SELECT38 sum(pg_relation_size(pg_class.oid))::bigint,39 nspname,40 CASE pg_class.relkind41 WHEN 'r' THEN 'table'42 WHEN 'i' THEN 'index'43 WHEN 'S' THEN 'sequence'44 WHEN 'v' THEN 'view'45 WHEN 't' THEN 'toast'46 ELSE pg_class.relkind::text47 END48 FROM pg_class49 LEFT OUTER JOIN pg_namespace ON (pg_namespace.oid = pg_class.relnamespace)50 GROUP BY pg_class.relkind, nspname51 ORDER BY sum(pg_relation_size(pg_class.oid)) DESC;52 """53 )54 df = pd.DataFrame(result, columns=["size_mb", "namespace", "relkind"])55 df.sort_index(axis=1, inplace=True)56 df["size_mb"] = df.size_mb * 1e-657 return df58def memory_tables_df():59 """Return statistics on indices.60 See https://www.postgresql.org/docs/current/monitoring-stats.html61 """62 import pandas as pd63 result = execute_raw(64 r"""65 select66 relname,67 pg_relation_size(relname::regclass) as table_size,68 pg_total_relation_size(relname::regclass) - pg_relation_size(relname::regclass) as index_size,69 pg_total_relation_size(relname::regclass) as total_size70 from pg_stat_user_tables71 """72 )73 df = pd.DataFrame(result, columns=["name", "table_mb", "indices_mb", "total_mb"])74 df.set_index("name", inplace=True)75 df = df * 1e-676 df.sort_values("total_mb", ascending=False, inplace=True)77 return df78# -------------79# -- Indices --80# -------------81def indices_list_df():82 """Return list of indices by table and columns."""83 import pandas as pd84 result = execute_raw(85 r"""86 select87 t.relname as table_name,88 i.relname as index_name,89 string_agg(a.attname, ',') as column_name90 from91 pg_class t,92 pg_class i,93 pg_index ix,94 pg_attribute a95 where96 t.oid = ix.indrelid97 and i.oid = ix.indexrelid98 and a.attrelid = t.oid99 and a.attnum = ANY(ix.indkey)100 and t.relkind = 'r'101 and t.relname not like 'pg_%'102 group by103 t.relname,104 i.relname105 order by106 t.relname,107 i.relname;108 """109 )110 df = pd.DataFrame(result, columns=["table", "index", "columns"])111 df.set_index(["table", "columns"], inplace=True)112 return df113def indices_stats_df(sort_size=False, with_sql=False):114 """Return statistics on indices.115 See https://www.postgresql.org/docs/current/monitoring-stats.html116 """117 import pandas as pd118 result = execute_raw(119 r"""120 SELECT121 pt.tablename AS TableName,122 t.indexname AS IndexName,123 pc.reltuples AS TotalRows,124 pg_relation_size(quote_ident(pt.tablename)::text) AS TableSize,125 pg_relation_size(quote_ident(t.indexrelname)::text) AS IndexSize,126 t.idx_scan AS TotalNumberOfScan,127 t.idx_tup_read AS TotalTupleRead,128 t.idx_tup_fetch AS TotalTupleFetched,129 pgi.indexdef AS IndexDef130 FROM pg_tables AS pt131 LEFT OUTER JOIN pg_class AS pc132 ON pt.tablename=pc.relname133 LEFT OUTER JOIN134 (135 SELECT136 pc.relname AS TableName,137 pc2.relname AS IndexName,138 psai.idx_scan,139 psai.idx_tup_read,140 psai.idx_tup_fetch,141 psai.indexrelname142 FROM143 pg_index AS pi144 JOIN pg_class AS pc145 ON pc.oid = pi.indrelid146 JOIN pg_class AS pc2147 ON pc2.oid = pi.indexrelid148 JOIN pg_stat_all_indexes AS psai149 ON pi.indexrelid = psai.indexrelid150 ) AS T151 ON pt.tablename = T.TableName152 LEFT OUTER JOIN pg_indexes as pgi153 ON T.indexname = pgi.indexname154 WHERE pt.schemaname='public'155 ORDER BY 1;156 """157 )158 columns = [159 "table",160 "index",161 "rows",162 "table_size_mb",163 "index_size_mb",164 # Number of index scans initiated on this index165 "scans",166 # Number of index entries returned by scans on this index167 "read",168 # Number of live rows fetched by index scans169 "fetched",170 "sql",171 ]172 df = pd.DataFrame(result, columns=columns)173 df.set_index(["table", "index"], inplace=True)174 df["table_size_mb"] = df.table_size_mb * 10e-6175 df["index_size_mb"] = df.index_size_mb * 10e-6176 if not with_sql:177 df.drop("sql", axis=1, inplace=True)178 if sort_size:179 df.sort_values("index_size_mb", ascending=False, inplace=True)180 else:181 df.sort_index(axis=0, inplace=True)182 return df183def indices_check_df(min_size_mb=0.1):184 """Check for tables that may require an index."""185 import pandas as pd186 result = execute_raw(187 r"""188 SELECT189 relname,190 seq_scan,191 idx_scan,192 pg_relation_size(relname::regclass) AS rel_size,193 n_live_tup194 FROM pg_stat_all_tables195 WHERE schemaname='public' AND pg_relation_size(relname::regclass)>{min_size};196 """.format(197 min_size=int(min_size_mb * 1e6)198 )199 )200 df = pd.DataFrame(201 result,202 columns=[203 "table",204 # Number of sequential scans initiated on this table205 "seq_scans",206 # Number of index scans initiated on this table207 "idx_scans",208 "size_mb",209 "live_rows",210 ],211 )212 df["idx_usage"] = 100 * df.idx_scans / (df.seq_scans + df.idx_scans)213 df["idx_required"] = (df.seq_scans - df.idx_scans) > 0214 df["size_mb"] = df["size_mb"] * 1e-6215 df.set_index("table", inplace=True)216 return df217# --------------------218# -- Data Integrity --219# --------------------220def cache_hit_ratio():221 """Ideally hit_ration should be > 90%"""222 result = execute_raw(223 r"""224 SELECT225 sum(blks_hit)*100/sum(blks_hit+blks_read) as hit_ratio226 from pg_stat_database;227 """228 )229 return float(result[0][0])230def anomalies_df():231 """232 - c_commit_ratio should be > 95%233 - c_rollback_ratio should be < 5%234 - deadlocks should be close to 0235 - conflicts should be close to 0236 - temp_files and temp_bytes watch out for them237 """238 import pandas as pd239 result = execute_raw(240 r"""241 SELECT242 datname,243 (xact_commit*100)/nullif(xact_commit+xact_rollback,0) as c_commit_ratio,244 (xact_rollback*100)/nullif(xact_commit+xact_rollback, 0) as c_rollback_ratio,245 deadlocks,246 conflicts,247 temp_files,248 temp_bytes249 FROM pg_stat_database;250 """251 )252 df = pd.DataFrame(253 result,254 columns=[255 "database",256 "commit_ratio",257 "rollback_ratio",258 "deadlocks",259 "conflicts",260 "temp_files",261 "temp_size_mb",262 ],263 )264 df["temp_size_mb"] = df["temp_size_mb"] * 1e-6265 return df266def write_activity_df(limit=50):267 """268 hot_rate = rows HOT updated / total rows updated269 (Heap Only Tuple means with no separate index update required)270 Heap Only Tuple (HOT) means, creating a new update tuple if possible on the same page as the old tuple.271 Ideally hot_rate should be close to 100.272 You might be blocking HOT updates with indexes on updated columns. If those are expendable, you might get better overall performance without them.273 """274 import pandas as pd275 result = execute_raw(276 r"""277 SELECT278 s.relname,279 pg_relation_size(relid),280 coalesce(n_tup_ins,0) + 2 * coalesce(n_tup_upd,0) -281 coalesce(n_tup_hot_upd,0) + coalesce(n_tup_del,0) AS total_writes,282 (coalesce(n_tup_hot_upd,0)::float * 100 / (case when n_tup_upd > 0 then n_tup_upd else 1 end)::float) AS hot_rate283 /* This returns None284 (SELECT v[1] FROM regexp_matches(reloptions::text,E'fillfactor=(d+)') as r(v) limit 1) AS fillfactor285 */286 from pg_stat_all_tables287 s join pg_class c ON c.oid=relid288 order by total_writes desc289 limit {limit};290 """.format(291 limit=limit292 )293 )294 columns = [295 "table",296 "size_mb",297 "writes",298 "hot_rate",299 # "fill_factor"300 ]301 df = pd.DataFrame(result, columns=columns)302 df["size_mb"] = df["size_mb"] * 1e-6303 df.set_index("table", inplace=True)304 return df305# How many indexes are in cache306def cached_indices():307 result = execute_raw(308 r"""309 SELECT310 sum(idx_blks_read) as idx_read,311 sum(idx_blks_hit) as idx_hit,312 (sum(idx_blks_hit) - sum(idx_blks_read)) / sum(idx_blks_hit) as ratio313 FROM pg_statio_user_indexes;314 """315 )316 return cached_indices317def dirty_pages():318 """maxwritten_clean and buffers_backend_fsyn should be 0"""319 import pandas as pd320 result = execute_raw(321 r"""322 SELECT buffers_clean, maxwritten_clean, buffers_backend_fsync from pg_stat_bgwriter;323 """324 )325 return pd.Series(326 dict(327 zip(328 ("buffers_clean", "maxwritten_clean", "buffers_backend_fsync"),329 result[0],330 )331 )332 )333# -------------334# -- Queries --335# -------------336def requires_pg_stat(func):337 @wraps(func)338 def wrapper(*args, **kwds):339 try:340 return func(*args, **kwds)341 except Exception as err:342 if 'relation "pg_stat_statements" does not exist' in str(err):343 raise RuntimeError(344 "This function requires that the pg_stat_statements extension is initialised on your database"345 )346 raise347 return wrapper348@requires_pg_stat349def query_reset_stats():350 return execute_raw("select pg_stat_statements_reset();")351@requires_pg_stat352def query_stats_df(limit=100):353 """Return most CPU intensive queries354 See: https://www.postgresql.org/docs/9.4/pgstatstatements.html355 """356 import pandas as pd357 result = execute_raw(358 r"""359 SELECT360 query,361 round(total_time::numeric, 2) AS total_time,362 calls,363 rows,364 round((100 * total_time / sum(total_time::numeric) OVER ())::numeric, 2) AS percentage_cpu365 FROM pg_stat_statements366 ORDER BY total_time DESC367 LIMIT {limit};368 """.format(369 limit=limit370 )371 )372 # avg_time = total_time / calls373 df = pd.DataFrame(374 result, columns=["sql", "time_seconds", "calls", "rows", "cpu_percent"]375 )376 df["time_seconds"] = df["time_seconds"].astype(float) * 1e-6377 df["type"] = df.sql.apply(lambda s: s.split()[0].upper())378 return df379@requires_pg_stat380def query_write_df():381 """Return most writing (to shared_buffers) queries382 See: https://www.postgresql.org/docs/9.4/pgstatstatements.html383 """384 import pandas as pd385 result = execute_raw(386 r"""387 SELECT388 query,389 shared_blks_dirtied390 from pg_stat_statements391 where shared_blks_dirtied > 0392 order by 2 desc;393 """394 )395 return pd.DataFrame(result, columns=["sql", "blocks_written"])396if __name__ == "__main__":397 import argparse, os398 parser = argparse.ArgumentParser()399 parser.add_argument("commands", choices=["queries", "indices", "reset"], nargs='+')...

Full Screen

Full Screen

pgcommands.py

Source:pgcommands.py Github

copy

Full Screen

...26 cursor.execute(qry, [index['name']])27 row = cursor.fetchone()28 cursor.close()29 return row[0] == 130def execute_raw(sql, database='default', parms=None):31 """32 Execute a raw SQL command33 sql (string) : SQL command34 database (string): the database name configured in settings35 """36 try:37 cursor = connections[database].cursor()38 if parms is not None:39 cursor.execute(sql, parms)40 else:41 cursor.execute(sql)42 cursor.close()43 return 044 except Exception, e:45 logging.error('Cant execute %s -- Exception raised %s' % (sql, e))46 return 147def drop_index(index, database='default'):48 """49 Check if index exists and drop it50 index (dict) : index description51 """52 if 'database' in index:53 database = index['database']54 if index_exists(index, database):55 logging.info("Will drop %s" % index['name'])56 res = execute_raw(index['cmd'], database)57 logging.info("%s dropped" % index['name'])58 else:59 res = 160 logging.info("%s doesn't exists" % index['name'])61 return res62def create_index(index, database='default'):63 """64 Create an index65 index (dict) : index description66 {"name": "foo",67 "database": "default",68 "cmd": "CREATE INDEX foo_idx ON table (column)"69 }70 """71 if 'database' in index:72 database = index['database']73 if index_exists(index, database):74 logging.info("%s still exists" % index['name'])75 res = 176 else:77 logging.info("Will create %s" % index['name'])78 res = execute_raw(index['cmd'], database)79 logging.info("%s created" % index['name'])80 return res81def create_extensions(extensions, database='default'):82 """83 Create all extensions84 """85 for extension in extensions:86 cmd = "CREATE EXTENSION IF NOT EXISTS %s" % (extension)87 logging.info("Will create extension %s on database %s" % (extension,88 database))89 res = execute_raw(cmd,90 database=database)91 logging.info("%s created" % extension)...

Full Screen

Full Screen

mappers.py

Source:mappers.py Github

copy

Full Screen

...16 self.embedded_list("settings_raw", AccountSettings)17 ])18 @classmethod19 def up(cls):20 cls.pool.db.execute_raw(21 """22 CREATE TABLE `Accounts` (23 `Login` varchar(255) NOT NULL,24 `Password` varchar(32) NOT NULL,25 `Token` varchar(32) DEFAULT NULL,26 PRIMARY KEY (`Login`)27 ) ENGINE=InnoDB DEFAULT CHARSET=utf8;28 """29 )30 @classmethod31 def down(cls):32 cls.pool.db.execute_raw(33 """DROP TABLE IF EXISTS `Accounts`;"""34 )35class AccountSettingsMapper(SqlMapper):36 dependencies = [AccountsMapper]37 def bind(self):38 """ Настраиваем маппинг """39 from z9.core.auth.models import AccountSetting, AccountSettings, Accounts40 self.set_new_item(AccountSetting)41 self.set_new_collection(AccountSettings)42 self.set_collection_name("AccountSettings")43 self.set_map([44 self.int("id", "ID"),45 self.link("account", "Login", collection=Accounts),46 self.str("name", "Name"),47 self.str("value", "Value"),48 ])49 @classmethod50 def up(cls):51 cls.pool.db.execute_raw(52 """53 CREATE TABLE `AccountSettings` (54 `ID` INT(11) NOT NULL AUTO_INCREMENT,55 `Login` VARCHAR(255) NOT NULL,56 `Name` VARCHAR(255) NOT NULL,57 `Value` VARCHAR(255) NULL DEFAULT NULL,58 PRIMARY KEY (`ID`),59 INDEX `Login` (`Login`)60 ) ENGINE=InnoDB DEFAULT CHARSET=utf8;61 """62 )63 @classmethod64 def down(cls):65 cls.pool.db.execute_raw(66 """DROP TABLE IF EXISTS `AccountSettings`;"""...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run pytest-play automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful