How to use _str_length method in pandera

Best Python code snippet using pandera_python

sql_backend.py

Source:sql_backend.py Github

copy

Full Screen

1'''2Copyright (c) 2012-2015, Agora Games, LLC All rights reserved.3https://github.com/agoragames/kairos/blob/master/LICENSE.txt4'''5from .exceptions import *6from .timeseries import *7from sqlalchemy.types import TypeEngine8from sqlalchemy import Table, Column, BigInteger, Integer, String, Unicode, Text, LargeBinary, Float, Boolean, Time, Date, DateTime, Numeric, MetaData, UniqueConstraint, create_engine9from sqlalchemy.sql import select, update, insert, distinct, asc, desc, and_, or_, not_10import time11from datetime import date, datetime12from datetime import time as time_type13from decimal import Decimal14from urlparse import *15# Test python3 compatibility16try:17 x = long(1)18except NameError:19 long = int20try:21 x = unicode('foo')22except NameError:23 unicode = str24TYPE_MAP = {25 str : String,26 'str' : String,27 'string' : String,28 unicode : Unicode, # works for py3 too29 'unicode' : Unicode,30 float : Float,31 'float' : Float,32 int : Integer,33 'int' : Integer,34 'integer' : Integer,35 long : BigInteger, # works for py3 too36 'long' : BigInteger,37 'int64' : BigInteger,38 bool : Boolean,39 'bool' : Boolean,40 'boolean' : Boolean,41 date : Date,42 'date' : Date,43 datetime : DateTime,44 'datetime' : DateTime,45 time_type : Time,46 'time' : Time,47 Decimal : Numeric,48 'decimal' : Numeric,49 'text' : Text,50 'clob' : Text,51 'blob' : LargeBinary,52}53class SqlBackend(Timeseries):54 def __new__(cls, *args, **kwargs):55 if cls==SqlBackend:56 ttype = kwargs.pop('type', None)57 if ttype=='series':58 return SqlSeries.__new__(SqlSeries, *args, **kwargs)59 elif ttype=='histogram':60 return SqlHistogram.__new__(SqlHistogram, *args, **kwargs)61 elif ttype=='count':62 return SqlCount.__new__(SqlCount, *args, **kwargs)63 elif ttype=='gauge':64 return SqlGauge.__new__(SqlGauge, *args, **kwargs)65 raise NotImplementedError("No implementation for %s types"%(ttype))66 return Timeseries.__new__(cls, *args, **kwargs)67 @classmethod68 def url_parse(self, url, **kwargs):69 location = urlparse(url)70 if 'sql' in location.scheme:71 return create_engine( url, **kwargs )72 def __init__(self, client, **kwargs):73 '''74 Initialize the sql backend after timeseries has processed the configuration.75 '''76 self._metadata = MetaData()77 self._str_length = kwargs.get('string_length',255)78 self._txt_length = kwargs.get('text_length', 32*1024)79 vtype = kwargs.get('value_type', float)80 if vtype in TYPE_MAP:81 self._value_type = TYPE_MAP[vtype]82 if self._value_type == String:83 self._value_type = String(self._str_length)84 elif self._value_type == Text:85 self._value_type = Text(self._txt_length)86 elif self._value_type == LargeBinary:87 self._value_type = LargeBinary(self._txt_length)88 elif issubclass(vtype, TypeEngine):89 if vtype == String:90 self._value_type = String(self._str_length)91 elif vtype == Text:92 self._value_type = Text(self._txt_length)93 elif vtype == LargeBinary:94 self._value_type = LargeBinary(self._txt_length)95 elif isinstance(vtype, TypeEngine):96 self._value_type = vtype97 else:98 raise ValueError("Unsupported type '%s'"%(vtype))99 self._table_name = kwargs.get('table_name', self._table_name)100 super(SqlBackend,self).__init__(client, **kwargs)101 def list(self):102 connection = self._client.connect()103 rval = set()104 stmt = select([distinct(self._table.c.name)])105 for row in connection.execute(stmt):106 rval.add(row['name'])107 return list(rval)108 def properties(self, name):109 connection = self._client.connect()110 rval = {}111 for interval,config in self._intervals.items():112 rval.setdefault(interval, {})113 stmt = select([self._table.c.i_time]).where(114 and_(115 self._table.c.name==name,116 self._table.c.interval==interval117 )118 ).order_by( asc(self._table.c.i_time) ).limit(1)119 rval[interval]['first'] = config['i_calc'].from_bucket(120 connection.execute(stmt).first()['i_time'] )121 stmt = select([self._table.c.i_time]).where(122 and_(123 self._table.c.name==name,124 self._table.c.interval==interval125 )126 ).order_by( desc(self._table.c.i_time) ).limit(1)127 rval[interval]['last'] = config['i_calc'].from_bucket(128 connection.execute(stmt).first()['i_time'] )129 return rval130 def expire(self, name):131 '''132 Expire all the data.133 '''134 for interval,config in self._intervals.items():135 if config['expire']:136 # Because we're storing the bucket time, expiry has the same137 # "skew" as whatever the buckets are.138 expire_from = config['i_calc'].to_bucket(time.time() - config['expire'])139 conn = self._client.connect()140 conn.execute( self._table.delete().where(141 and_(142 self._table.c.name==name,143 self._table.c.interval==interval,144 self._table.c.i_time<=expire_from145 )146 ))147 def _insert(self, name, value, timestamp, intervals, **kwargs):148 '''149 Insert the new value.150 '''151 for interval,config in self._intervals.items():152 timestamps = self._normalize_timestamps(timestamp, intervals, config)153 for tstamp in timestamps:154 self._insert_data(name, value, tstamp, interval, config, **kwargs)155 def _get(self, name, interval, config, timestamp, **kws):156 '''157 Get the interval.158 '''159 i_bucket = config['i_calc'].to_bucket(timestamp)160 fetch = kws.get('fetch')161 process_row = kws.get('process_row') or self._process_row162 rval = OrderedDict()163 if fetch:164 data = fetch( self._client.connect(), self._table, name, interval, i_bucket )165 else:166 data = self._type_get(name, interval, i_bucket)167 if config['coarse']:168 if data:169 rval[ config['i_calc'].from_bucket(i_bucket) ] = process_row(data.values()[0][None])170 else:171 rval[ config['i_calc'].from_bucket(i_bucket) ] = self._type_no_value()172 else:173 for r_bucket,row_data in data.values()[0].items():174 rval[ config['r_calc'].from_bucket(r_bucket) ] = process_row(row_data)175 return rval176 def _series(self, name, interval, config, buckets, **kws):177 '''178 Fetch a series of buckets.179 '''180 fetch = kws.get('fetch')181 process_row = kws.get('process_row') or self._process_row182 rval = OrderedDict()183 if fetch:184 data = fetch( self._client.connect(), self._table, name, interval, buckets[0], buckets[-1] )185 else:186 data = self._type_get(name, interval, buckets[0], buckets[-1])187 if config['coarse']:188 for i_bucket in buckets:189 i_key = config['i_calc'].from_bucket(i_bucket)190 i_data = data.get( i_bucket )191 if i_data:192 rval[ i_key ] = process_row( i_data[None] )193 else:194 rval[ i_key ] = self._type_no_value()195 else:196 if data:197 for i_bucket, i_data in data.items():198 i_key = config['i_calc'].from_bucket(i_bucket)199 rval[i_key] = OrderedDict()200 for r_bucket, r_data in i_data.items():201 r_key = config['r_calc'].from_bucket(r_bucket)202 if r_data:203 rval[i_key][r_key] = process_row(r_data)204 else:205 rval[i_key][r_key] = self._type_no_value()206 return rval207 def delete(self, name):208 '''209 Delete time series by name across all intervals. Returns the number of210 records deleted.211 '''212 conn = self._client.connect()213 conn.execute( self._table.delete().where(self._table.c.name==name) )214class SqlSeries(SqlBackend, Series):215 def __init__(self, *a, **kwargs):216 # TODO: define indices217 # TODO: optionally create separate tables for each interval, like mongo?218 self._table_name = 'series'219 super(SqlSeries,self).__init__(*a, **kwargs)220 self._table = Table(self._table_name, self._metadata,221 Column('name', String(self._str_length), nullable=False), # stat name222 Column('interval', String(self._str_length), nullable=False), # interval name223 Column('insert_time', Float, nullable=False), # to preserve order224 Column('i_time', Integer, nullable=False), # interval timestamp225 Column('r_time', Integer, nullable=True), # resolution timestamp226 Column('value', self._value_type, nullable=False) # datas227 )228 self._metadata.create_all(self._client)229 def _insert_data(self, name, value, timestamp, interval, config, **kwargs):230 '''Helper to insert data into sql.'''231 kwargs = {232 'name' : name,233 'interval' : interval,234 'insert_time' : time.time(),235 'i_time' : config['i_calc'].to_bucket(timestamp),236 'value' : value237 }238 if not config['coarse']:239 kwargs['r_time'] = config['r_calc'].to_bucket(timestamp)240 stmt = self._table.insert().values(**kwargs)241 conn = self._client.connect()242 result = conn.execute(stmt)243 def _type_get(self, name, interval, i_bucket, i_end=None):244 connection = self._client.connect()245 rval = OrderedDict()246 stmt = self._table.select()247 if i_end:248 stmt = stmt.where(249 and_(250 self._table.c.name==name,251 self._table.c.interval==interval,252 self._table.c.i_time>=i_bucket,253 self._table.c.i_time<=i_end,254 )255 )256 else:257 stmt = stmt.where(258 and_(259 self._table.c.name==name,260 self._table.c.interval==interval,261 self._table.c.i_time==i_bucket,262 )263 )264 stmt = stmt.order_by( self._table.c.r_time, self._table.c.insert_time )265 for row in connection.execute(stmt):266 rval.setdefault(row['i_time'],OrderedDict()).setdefault(row['r_time'],[]).append( row['value'] )267 return rval268class SqlHistogram(SqlBackend, Histogram):269 def __init__(self, *a, **kwargs):270 # TODO: define indices271 # TODO: optionally create separate tables for each interval, like mongo?272 self._table_name = 'histogram'273 super(SqlHistogram,self).__init__(*a, **kwargs)274 self._table = Table(self._table_name, self._metadata,275 Column('name', String(self._str_length), nullable=False), # stat name276 Column('interval', String(self._str_length), nullable=False), # interval name277 Column('i_time', Integer, nullable=False), # interval timestamp278 Column('r_time', Integer, nullable=True), # resolution timestamp279 Column('value', self._value_type, nullable=False), # histogram keys280 Column('count', Integer, nullable=False), # key counts281 # Use a constraint for transaction-less insert vs update282 UniqueConstraint('name', 'interval', 'i_time', 'r_time', 'value', name='unique_value')283 )284 self._metadata.create_all(self._client)285 def _insert_data(self, name, value, timestamp, interval, config, **kwargs):286 '''Helper to insert data into sql.'''287 conn = self._client.connect()288 if not self._update_data(name, value, timestamp, interval, config, conn):289 try:290 kwargs = {291 'name' : name,292 'interval' : interval,293 'i_time' : config['i_calc'].to_bucket(timestamp),294 'value' : value,295 'count' : 1296 }297 if not config['coarse']:298 kwargs['r_time'] = config['r_calc'].to_bucket(timestamp)299 stmt = self._table.insert().values(**kwargs)300 result = conn.execute(stmt)301 except:302 # TODO: only catch IntegrityError303 if not self._update_data(name, value, timestamp, interval, config, conn):304 raise305 def _update_data(self, name, value, timestamp, interval, config, conn):306 '''Support function for insert. Should be called within a transaction'''307 i_time = config['i_calc'].to_bucket(timestamp)308 if not config['coarse']:309 r_time = config['r_calc'].to_bucket(timestamp)310 else:311 r_time = None312 stmt = self._table.update().where(313 and_(314 self._table.c.name==name,315 self._table.c.interval==interval,316 self._table.c.i_time==i_time,317 self._table.c.r_time==r_time,318 self._table.c.value==value)319 ).values({self._table.c.count: self._table.c.count + 1})320 rval = conn.execute( stmt )321 return rval.rowcount322 def _type_get(self, name, interval, i_bucket, i_end=None):323 connection = self._client.connect()324 rval = OrderedDict()325 stmt = self._table.select()326 if i_end:327 stmt = stmt.where(328 and_(329 self._table.c.name==name,330 self._table.c.interval==interval,331 self._table.c.i_time>=i_bucket,332 self._table.c.i_time<=i_end,333 )334 )335 else:336 stmt = stmt.where(337 and_(338 self._table.c.name==name,339 self._table.c.interval==interval,340 self._table.c.i_time==i_bucket,341 )342 )343 stmt = stmt.order_by( self._table.c.r_time )344 for row in connection.execute(stmt):345 rval.setdefault(row['i_time'],OrderedDict()).setdefault(row['r_time'],{})[row['value']] = row['count']346 return rval347class SqlCount(SqlBackend, Count):348 def __init__(self, *a, **kwargs):349 # TODO: define indices350 # TODO: optionally create separate tables for each interval, like mongo?351 self._table_name = 'count'352 super(SqlCount,self).__init__(*a, **kwargs)353 self._table = Table(self._table_name, self._metadata,354 Column('name', String(self._str_length), nullable=False), # stat name355 Column('interval', String(self._str_length), nullable=False), # interval name356 Column('i_time', Integer, nullable=False), # interval timestamp357 Column('r_time', Integer, nullable=True), # resolution timestamp358 Column('count', Integer, nullable=False), # key counts359 # Use a constraint for transaction-less insert vs update360 UniqueConstraint('name', 'interval', 'i_time', 'r_time', name='unique_count')361 )362 self._metadata.create_all(self._client)363 def _insert_data(self, name, value, timestamp, interval, config, **kwargs):364 '''Helper to insert data into sql.'''365 conn = self._client.connect()366 if not self._update_data(name, value, timestamp, interval, config, conn):367 try:368 kwargs = {369 'name' : name,370 'interval' : interval,371 'i_time' : config['i_calc'].to_bucket(timestamp),372 'count' : value373 }374 if not config['coarse']:375 kwargs['r_time'] = config['r_calc'].to_bucket(timestamp)376 stmt = self._table.insert().values(**kwargs)377 result = conn.execute(stmt)378 except:379 # TODO: only catch IntegrityError380 if not self._update_data(name, value, timestamp, interval, config, conn):381 raise382 def _update_data(self, name, value, timestamp, interval, config, conn):383 '''Support function for insert. Should be called within a transaction'''384 i_time = config['i_calc'].to_bucket(timestamp)385 if not config['coarse']:386 r_time = config['r_calc'].to_bucket(timestamp)387 else:388 r_time = None389 stmt = self._table.update().where(390 and_(391 self._table.c.name==name,392 self._table.c.interval==interval,393 self._table.c.i_time==i_time,394 self._table.c.r_time==r_time)395 ).values({self._table.c.count: self._table.c.count + value})396 rval = conn.execute( stmt )397 return rval.rowcount398 def _type_get(self, name, interval, i_bucket, i_end=None):399 connection = self._client.connect()400 rval = OrderedDict()401 stmt = self._table.select()402 if i_end:403 stmt = stmt.where(404 and_(405 self._table.c.name==name,406 self._table.c.interval==interval,407 self._table.c.i_time>=i_bucket,408 self._table.c.i_time<=i_end,409 )410 )411 else:412 stmt = stmt.where(413 and_(414 self._table.c.name==name,415 self._table.c.interval==interval,416 self._table.c.i_time==i_bucket,417 )418 )419 stmt = stmt.order_by( self._table.c.r_time )420 for row in connection.execute(stmt):421 rval.setdefault(row['i_time'],OrderedDict())[row['r_time']] = row['count']422 return rval423class SqlGauge(SqlBackend, Gauge):424 def __init__(self, *a, **kwargs):425 # TODO: define indices426 # TODO: optionally create separate tables for each interval, like mongo?427 self._table_name = 'gauge'428 super(SqlGauge,self).__init__(*a, **kwargs)429 self._table = Table(self._table_name, self._metadata,430 Column('name', String(self._str_length), nullable=False), # stat name431 Column('interval', String(self._str_length), nullable=False), # interval name432 Column('i_time', Integer, nullable=False), # interval timestamp433 Column('r_time', Integer, nullable=True), # resolution timestamp434 Column('value', self._value_type, nullable=False), # key counts435 # Use a constraint for transaction-less insert vs update436 UniqueConstraint('name', 'interval', 'i_time', 'r_time', name='unique_count')437 )438 self._metadata.create_all(self._client)439 def _insert_data(self, name, value, timestamp, interval, config, **kwargs):440 '''Helper to insert data into sql.'''441 conn = self._client.connect()442 if not self._update_data(name, value, timestamp, interval, config, conn):443 try:444 kwargs = {445 'name' : name,446 'interval' : interval,447 'i_time' : config['i_calc'].to_bucket(timestamp),448 'value' : value449 }450 if not config['coarse']:451 kwargs['r_time'] = config['r_calc'].to_bucket(timestamp)452 stmt = self._table.insert().values(**kwargs)453 result = conn.execute(stmt)454 except:455 # TODO: only catch IntegrityError456 if not self._update_data(name, value, timestamp, interval, config, conn):457 raise458 def _update_data(self, name, value, timestamp, interval, config, conn):459 '''Support function for insert. Should be called within a transaction'''460 i_time = config['i_calc'].to_bucket(timestamp)461 if not config['coarse']:462 r_time = config['r_calc'].to_bucket(timestamp)463 else:464 r_time = None465 stmt = self._table.update().where(466 and_(467 self._table.c.name==name,468 self._table.c.interval==interval,469 self._table.c.i_time==i_time,470 self._table.c.r_time==r_time)471 ).values({self._table.c.value: value})472 rval = conn.execute( stmt )473 return rval.rowcount474 def _type_get(self, name, interval, i_bucket, i_end=None):475 connection = self._client.connect()476 rval = OrderedDict()477 stmt = self._table.select()478 if i_end:479 stmt = stmt.where(480 and_(481 self._table.c.name==name,482 self._table.c.interval==interval,483 self._table.c.i_time>=i_bucket,484 self._table.c.i_time<=i_end,485 )486 )487 else:488 stmt = stmt.where(489 and_(490 self._table.c.name==name,491 self._table.c.interval==interval,492 self._table.c.i_time==i_bucket,493 )494 )495 stmt = stmt.order_by( self._table.c.r_time )496 for row in connection.execute(stmt):497 rval.setdefault(row['i_time'],OrderedDict())[row['r_time']] = row['value']...

Full Screen

Full Screen

strings.py

Source:strings.py Github

copy

Full Screen

1_sample_string = "Hello World" # type: str2_sample_string2 = 'I don\'t believe it!' # type: str3_escape_sequence1 = "Hello \nWorld" # type: str4_escape_sequence2 = "Hello \tWorld" # type: str5# Escape Sequence6print (_escape_sequence1)7print (_escape_sequence2)8# Get Length of the String9_str_length = len(_sample_string)10print (_str_length)11# indexing12print (_sample_string[0]) # indexing example13print (_sample_string[-1]) # indexing example - reverse14# slicing [start:stop:step]15print (_sample_string[2:]) # Get everything after index 216print (_sample_string[:5]) # Get everything before index 517print (_sample_string[1:5]) # Get a subsection of the string b/w index 1 to 518print (_sample_string[::3]) # Get all strings with jump to step size of 319print (_sample_string[::-1]) # Trick to reverse a string20"""21Strings are immutable22You can't do this23name = "Sam" # Want to change this to Pam24name[0] = "P" # This will generate an error25"""26name = 'Sam'27last_letters = name[1:]28new_name = 'P' + last_letters29print (new_name)30# String Concatenation31a = "Hello" # type: str32b = "World!" # type: str33print(a + " " + b) # concat strings34print (a * 10) # You need 10 Hello. Multiply Strings35print (a + str(2)) # Convert the int to str before you can add36print ('2' + '3') # Dynamic Type Casting37# Methods in Python38_str = "Hello World!"39print (_str.upper())40print (_str.lower()) # print is a function and lower is a method41print (_str.split()) # Split on whitespaces...

Full Screen

Full Screen

string_search.py

Source:string_search.py Github

copy

Full Screen

1from gp_framework.phenotype.phenotype import PhenotypeConverter2from gp_framework.bytegenotype import ByteGenotype3from gp_framework.exception import InvalidParameterException4class StringPhenotypeConverter(PhenotypeConverter):5 def __init__(self, str_length: int):6 self._str_length = str_length7 def convert(self, genotype: ByteGenotype):8 """9 Convert a genotype to a string of ASCII characters10 :param genotype:11 :return: A string of the indicated length consisting of each byte converted12 to an ASCII character13 """14 if len(genotype) < self._str_length:15 raise InvalidParameterException("Given genotype must be at least as "16 "long as the string being generated")17 result = ""18 for i in range(self._str_length):19 ascii_value = genotype[i]20 result += chr(ascii_value)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run pandera automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful