How to use db_type method in hypothesis

Best Python code snippet using hypothesis

CompMigrStat.py

Source:CompMigrStat.py Github

copy

Full Screen

1import errno2import os3import re4import sqlite35import sys6sys.path.insert(0, "%s/work/mutant/ec2-tools/lib/util" % os.path.expanduser("~"))7import Cons8import Util9import Conf10def GetFnStat(fn0, fn1):11 conn = _GenDB(fn0, fn1)12 # https://docs.python.org/2/library/sqlite3.html#row-objects13 conn.row_factory = sqlite3.Row14 cur = conn.cursor()15 _OverallStat(cur, fn0, fn1)16 _HourlyStat(cur)17def _GenDB(fn0, fn1):18 with Cons.MT("Building a stat DB ..."):19 # Put the SSTable creation info in a DB and generate statistics20 fn_db = "%s/sst-creation-info.db" % Conf.GetOutDir()21 try:22 os.remove(fn_db)23 except OSError as e:24 if e.errno != errno.ENOENT:25 raise e26 table_schema = """ CREATE TABLE IF NOT EXISTS sst_creation_info (27 fn text NOT NULL28 , db_type text NOT NULL29 , hour integer NOT NULL30 , sst_id integer NOT NULL31 , sst_size integer NOT NULL32 , job_id integer NOT NULL33 , creation_reason text NOT NULL34 , temp_triggered_single_sst_migr BOOLEAN35 , migr_dirc text NOT NULL36 ); """37 conn = sqlite3.connect(fn_db)38 if conn is None:39 raise RuntimeError("Error! cannot create the database connection.")40 cur = conn.cursor()41 cur.execute(table_schema)42 q = """INSERT INTO sst_creation_info (fn, db_type, hour, sst_id, sst_size, job_id, creation_reason, temp_triggered_single_sst_migr, migr_dirc)43 VALUES (?,?,?,?,?,?,?,?,?)"""44 for db_type in ["RocksDB", "Mutant"]:45 fn = fn0 if db_type == "RocksDB" else fn146 with open(fn) as fo:47 for line in fo:48 if line.startswith("#"):49 continue50 line = line.strip()51 t = re.split(r" +", line)52 hour = int(t[1].split(":")[0])53 sst_id = t[6]54 # Ignore when end sst_id is -, which means an sstable was deleted.55 if sst_id == "-":56 continue57 sst_id = int(sst_id)58 sst_size = int(t[5])59 job_id = int(t[7])60 # Creation reason: R, F, C, -61 cr = t[8]62 temp_triggered_single_sst_migr = (t[9] == "T")63 migr_dirc = t[10]64 cur.execute(q, (fn, db_type, hour, sst_id, sst_size, job_id, cr, temp_triggered_single_sst_migr, migr_dirc))65 conn.commit()66 cur.close()67 return conn68def _OverallStat(cur, fn0, fn1):69 with Cons.MT("Overall stat ..."):70 for db_type in ["RocksDB", "Mutant"]:71 fn = fn0 if db_type == "RocksDB" else fn172 Cons.P("# %s" % db_type)73 Cons.P("# fn=%s" % fn)74 cur.execute("SELECT count(distinct(job_id)) as cnt FROM sst_creation_info WHERE db_type='%s'" % db_type)75 r = cur.fetchone()76 Cons.P("# num_jobs=%d" % r["cnt"])77 cur.execute("SELECT count(distinct(job_id)) as cnt FROM sst_creation_info WHERE db_type='%s' and creation_reason='R'" % db_type)78 Cons.P("# num_jobs_recovery=%d" % cur.fetchone()["cnt"])79 cur.execute("SELECT count(distinct(job_id)) as cnt FROM sst_creation_info WHERE db_type='%s' and creation_reason='F'" % db_type)80 Cons.P("# num_jobs_flush=%d" % cur.fetchone()["cnt"])81 cur.execute("SELECT count(distinct(job_id)) as cnt FROM sst_creation_info WHERE db_type='%s' and creation_reason='C'" % db_type)82 num_comp_jobs_all = cur.fetchone()["cnt"]83 Cons.P("# num_jobs_comp_all=%d" % num_comp_jobs_all)84 cur.execute("SELECT count(distinct(job_id)) as cnt FROM sst_creation_info" \85 " WHERE db_type='%s' and creation_reason='C' and temp_triggered_single_sst_migr=0"86 % db_type)87 num_jobs_comp_level_triggered = cur.fetchone()["cnt"]88 Cons.P("# num_jobs_comp_level_triggered=%d" % num_jobs_comp_level_triggered)89 cur.execute("SELECT count(distinct(sst_id)) as cnt FROM sst_creation_info" \90 " WHERE db_type='%s' and creation_reason='C' and temp_triggered_single_sst_migr=0"91 % db_type)92 num_outssts_comp_level_triggered = cur.fetchone()["cnt"]93 Cons.P("# num_outssts_comp_level_triggered=%d" % num_outssts_comp_level_triggered)94 Cons.P("# num_outssts_comp_level_triggered_per_job=%f" % (float(num_outssts_comp_level_triggered) / num_jobs_comp_level_triggered))95 # Distribution of the number of output SSTables per job96 cur.execute("SELECT job_id, count(distinct(sst_id)) as num_ssts FROM sst_creation_info" \97 " WHERE db_type='%s' and creation_reason='C' and temp_triggered_single_sst_migr=0" \98 " GROUP BY job_id ORDER BY job_id"99 % db_type)100 numssts_cnt = {}101 for r in cur.fetchall():102 c = r["num_ssts"]103 if c not in numssts_cnt:104 numssts_cnt[c] = 1105 else:106 numssts_cnt[c] += 1107 Cons.P("# %s" % numssts_cnt)108 cur.execute("SELECT count(distinct(sst_id)) as cnt FROM sst_creation_info" \109 " WHERE db_type='%s' and creation_reason='C' and temp_triggered_single_sst_migr=0 and migr_dirc IN ('-', 'N')"110 % db_type)111 Cons.P("# num_outssts_comp_level_triggered_regular_compaction=%d" % cur.fetchone()["cnt"])112 cur.execute("SELECT count(distinct(sst_id)) as cnt FROM sst_creation_info" \113 " WHERE db_type='%s' and creation_reason='C' and temp_triggered_single_sst_migr=0 and migr_dirc IN ('S', 'F')"114 % db_type)115 Cons.P("# num_outssts_comp_level_triggered_compaction_migration=%d" % cur.fetchone()["cnt"])116 if True:117 # From the SSTables created from compaction-migrations118 # There are more SSTables that get migrated to the slow storage than to the fast storage. Makes sense, since they get old in general.119 cur.execute("SELECT count(distinct(sst_id)) as cnt FROM sst_creation_info" \120 " WHERE db_type='%s' and creation_reason='C' and temp_triggered_single_sst_migr=0 and migr_dirc IN ('S')"121 % db_type)122 Cons.P("# num_outssts_comp_level_triggered_compaction_migration_to_slow_storage=%d" % cur.fetchone()["cnt"])123 if False:124 cur.execute("SELECT job_id, count(distinct(sst_id)) as num_ssts FROM sst_creation_info" \125 " WHERE db_type='%s' and creation_reason='C' and temp_triggered_single_sst_migr=0 and migr_dirc IN ('S')" \126 " GROUP BY job_id ORDER BY job_id"127 % db_type)128 for r in cur.fetchall():129 Cons.P("# job_id=%d num_ssts=%d" % (r["job_id"], r["num_ssts"]))130 cur.execute("SELECT count(distinct(sst_id)) as cnt FROM sst_creation_info" \131 " WHERE db_type='%s' and creation_reason='C' and temp_triggered_single_sst_migr=0 and migr_dirc IN ('F')"132 % db_type)133 Cons.P("# num_outssts_comp_level_triggered_compaction_migration_to_fast_storage=%d" % cur.fetchone()["cnt"])134 if False:135 cur.execute("SELECT job_id, count(distinct(sst_id)) as num_ssts FROM sst_creation_info" \136 " WHERE db_type='%s' and creation_reason='C' and temp_triggered_single_sst_migr=0 and migr_dirc IN ('F')" \137 " GROUP BY job_id ORDER BY job_id"138 % db_type)139 for r in cur.fetchall():140 Cons.P("# job_id=%d num_ssts=%d" % (r["job_id"], r["num_ssts"]))141 cur.execute("SELECT count(distinct(job_id)) as cnt FROM sst_creation_info" \142 " WHERE db_type='%s' and creation_reason='C' and temp_triggered_single_sst_migr=1"143 % db_type)144 Cons.P("# num_jobs_comp_temp_triggered_migr=%d" % cur.fetchone()["cnt"])145 if False:146 # With a temperature-triggered single-sstable migration, there is always a single input sstable, but there can be multiple output sstables.147 # Interesting.148 # All of those happened with a 256MB L0 SSTable in the fast storage becoming cold and being migrated to the slow storage,149 # making 4 64MB L0 SSTables in the slow storage.150 # I don't think there is any harm there. It's just the output file is splitted into 4 small ones.151 # Count each of them as a single migration.152 cur.execute("SELECT count(distinct(sst_id)) as cnt FROM sst_creation_info" \153 " WHERE db_type='%s' and creation_reason='C' and temp_triggered_single_sst_migr=1"154 % db_type)155 Cons.P("# num_outssts_comp_temp_triggered_migr=%d" % cur.fetchone()["cnt"])156 cur.execute("SELECT job_id, count(distinct(sst_id)) as num_ssts FROM sst_creation_info" \157 " WHERE db_type='%s' and creation_reason='C' and temp_triggered_single_sst_migr=1" \158 " GROUP BY job_id ORDER BY job_id"159 % db_type)160 for r in cur.fetchall():161 if 1 < r["num_ssts"]:162 Cons.P("# job_id=%d num_ssts=%d" % (r["job_id"], r["num_ssts"]))163def _HourlyStat(cur):164 with Cons.MT("Hourly stat ..."):165 Cons.P("# Left set of columns are for RocksDB, right one is for Mutant")166 Cons.P("# J: all jobs")167 Cons.P("# JR: recovery jobs")168 Cons.P("# JF: flush jobs")169 Cons.P("# JC: compaction jobs")170 Cons.P("# JCL: compaction jobs. leveled organization triggered")171 Cons.P("# SCL: sstables created. leveled organization triggered")172 Cons.P("# SPJCL: sstables created / job. leveled organization triggered")173 Cons.P("# SSCL: sum of the sstable sizes created. leveled organization triggered. In GB.")174 Cons.P("# SCLR: sstables created. leveled organization triggered. regular compactions")175 Cons.P("# SCLCM: sstables created. leveled organization triggered. compaction-migrations")176 Cons.P("# JCT: compaction jobs. temperature-triggered single-sstable migration")177 Cons.P("# SSCT: sum of the sstable sizes created. temperature-triggered single-sstable migration. In GB.")178 Cons.P("#")179 fmt = "%1d" \180 " %2d %1d %1d %2d %2d" \181 " %3d %5.3f %4.0f %3d %3d %2d %4.0f" \182 " %6d %1d %1d %3d %3d" \183 " %3d %5.3f %4.0f %3d %3d %3d %4.0f"184 Cons.P(Util.BuildHeader(fmt, "hour" \185 " J JR JF JC JCL" \186 " SCL SPJCL SSCL SCLR SCLCM JCT SSCT" \187 " J JR JF JC JCL" \188 " SCL SPJCL SSCL SCLR SCLCM JCT SSCT"189 ))190 for hour in range(10):191 j = {}192 j_r = {}193 j_f = {}194 j_c = {}195 j_c_l = {}196 s_c_l = {}197 spj_c_l = {}198 ss_c_l = {}199 s_c_l_r = {}200 s_c_l_cm = {}201 j_c_t = {}202 ss_c_t = {}203 for db_type in ["R", "M"]:204 db_type_str = "RocksDB" if db_type == "R" else "Mutant"205 cur.execute("SELECT count(distinct(job_id)) as cnt FROM sst_creation_info WHERE hour=%d and db_type='%s'" % (hour, db_type_str))206 j[db_type] = cur.fetchone()["cnt"]207 cur.execute("SELECT count(distinct(job_id)) as cnt FROM sst_creation_info WHERE hour=%d and db_type='%s' and creation_reason='R'" % (hour, db_type_str))208 j_r[db_type] = cur.fetchone()["cnt"]209 cur.execute("SELECT count(distinct(job_id)) as cnt FROM sst_creation_info WHERE hour=%d and db_type='%s' and creation_reason='F'" % (hour, db_type_str))210 j_f[db_type] = cur.fetchone()["cnt"]211 cur.execute("SELECT count(distinct(job_id)) as cnt FROM sst_creation_info WHERE hour=%d and db_type='%s' and creation_reason='C'" % (hour, db_type_str))212 j_c[db_type] = cur.fetchone()["cnt"]213 cur.execute("SELECT count(distinct(job_id)) as cnt FROM sst_creation_info" \214 " WHERE hour=%d and db_type='%s' and creation_reason='C' and temp_triggered_single_sst_migr=0"215 % (hour, db_type_str))216 j_c_l[db_type] = cur.fetchone()["cnt"]217 cur.execute("SELECT count(sst_id) as cnt FROM sst_creation_info" \218 " WHERE hour=%d and db_type='%s' and creation_reason='C' and temp_triggered_single_sst_migr=0"219 % (hour, db_type_str))220 s_c_l[db_type] = cur.fetchone()["cnt"]221 spj_c_l[db_type] = 0 if j_c_l[db_type] == 0 else (float(s_c_l[db_type]) / j_c_l[db_type])222 cur.execute("SELECT sum(sst_size) as v FROM sst_creation_info" \223 " WHERE hour=%d and db_type='%s' and creation_reason='C' and temp_triggered_single_sst_migr=0"224 % (hour, db_type_str))225 ss_c_l[db_type] = cur.fetchone()["v"]226 if ss_c_l[db_type] is None:227 ss_c_l[db_type] = 0228 ss_c_l[db_type] = float(ss_c_l[db_type]) / 1024 / 1024 / 1024229 cur.execute("SELECT count(sst_id) as cnt FROM sst_creation_info" \230 " WHERE hour=%d and db_type='%s' and creation_reason='C' and temp_triggered_single_sst_migr=0 and migr_dirc IN ('-', 'N')"231 % (hour, db_type_str))232 s_c_l_r[db_type] = cur.fetchone()["cnt"]233 cur.execute("SELECT count(sst_id) as cnt FROM sst_creation_info" \234 " WHERE hour=%d and db_type='%s' and creation_reason='C' and temp_triggered_single_sst_migr=0 and migr_dirc IN ('S', 'F')"235 % (hour, db_type_str))236 s_c_l_cm[db_type] = cur.fetchone()["cnt"]237 cur.execute("SELECT count(distinct(job_id)) as cnt FROM sst_creation_info" \238 " WHERE hour=%d and db_type='%s' and creation_reason='C' and temp_triggered_single_sst_migr=1"239 % (hour, db_type_str))240 j_c_t[db_type] = cur.fetchone()["cnt"]241 cur.execute("SELECT sum(sst_size) as v FROM sst_creation_info" \242 " WHERE hour=%d and db_type='%s' and creation_reason='C' and temp_triggered_single_sst_migr=1"243 % (hour, db_type_str))244 ss_c_t[db_type] = cur.fetchone()["v"]245 if ss_c_t[db_type] is None:246 ss_c_t[db_type] = 0247 ss_c_t[db_type] = float(ss_c_t[db_type]) / 1024 / 1024 / 1024248 Cons.P(fmt % (hour249 , j["R"], j_r["R"], j_f["R"], j_c["R"], j_c_l["R"]250 , s_c_l["R"], spj_c_l["R"], ss_c_l["R"], s_c_l_r["R"], s_c_l_cm["R"], j_c_t["R"], ss_c_t["R"]251 , j["M"], j_r["M"], j_f["M"], j_c["M"], j_c_l["M"]252 , s_c_l["M"], spj_c_l["M"], ss_c_l["M"], s_c_l_r["M"], s_c_l_cm["M"], j_c_t["M"], ss_c_t["M"]253 ))254 # TODO255# fn_out = "%s/rocksdb-sst-creation-cnt-by-reasons-by-time" % Conf.GetOutDir()256# with open(fn_out, "w") as fo:257# fo.write("\n")258# Cons.P("Created %s %d" % (fn_out, os.path.getsize(fn_out)))...

Full Screen

Full Screen

skeletons.py

Source:skeletons.py Github

copy

Full Screen

1import peewee2import copy3__all__ = ["AdapterSkeleton", "DatabaseSkeleton"]4class AdapterSkeleton(object):5 def dispatch(self, db_type):6 if db_type == "sqlite" or db_type == "sqlite3":7 return peewee.SqliteAdapter8 elif db_type == "mysql":9 return peewee.MySQLAdapter10 elif db_type == "postgres" or db_type == "postgresql":11 return peewee.PostgresqlAdapter12 else:13 raise NotImplementedError("%s is not support. support: sqlite, mysql, postgresql" % db_type)14 def concrete(self, db_type):15 db_type = db_type.lower()16 return self.dispatch(db_type)() # return instance17 reserved_tables = [] #18class DatabaseSkeleton(peewee.Database): #TODO: add help message (when calling any method before concrete())19 def dispatch(self, db_type):20 if db_type == "sqlite" or db_type == "sqlite3":21 return peewee.SqliteDatabase22 elif db_type == "mysql":23 return peewee.MySQLDatabase24 elif db_type == "postgres" or db_type == "postgresql":25 return peewee.PostgresqlDatabase26 else:27 raise NotImplementedError("%s is not support. support: sqlite, mysql, postgresql" % db_type)28 def concrete(self, db_type, fields=None, _connect_kwargs=None):29 kwargs = copy.copy(self.connect_kwargs)30 if _connect_kwargs:31 kwargs.update(_connect_kwargs)32 dbclass = self.dispatch(db_type.lower())33 dbname = self.database34 if fields:35 return dbclass(dbname, fields=fields, **kwargs) #return instance36 return dbclass(dbname, **kwargs) #return instance37 def rename(self, new_name):38 self.database = new_name39 def __init__(self, database, **connect_kwargs):...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run hypothesis automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful