Best JavaScript code snippet using stryker-parent
catalogtests.py
Source:catalogtests.py
1import os2import re3import unittest4import gisdata5from geoserver.catalog import Catalog, ConflictingDataError, UploadError, \6 FailedRequestError7from geoserver.support import ResourceInfo, url8from geoserver.support import DimensionInfo9from geoserver.layergroup import LayerGroup10from geoserver.util import shapefile_and_friends11DBPARAMS = dict(host="localhost", port="5432", dbtype="postgis",12 database=os.getenv("DATABASE", "db"),13 user=os.getenv("DBUSER", "postgres"),14 passwd=os.getenv("DBPASS", "password")15)16try:17 import psycopg218 # only used for connection sanity if present19 conn = psycopg2.connect(('dbname=%(database)s user=%(user)s password=%(passwd)s'20 ' port=%(port)s host=%(host)s'21 ) % DBPARAMS)22except ImportError:23 pass24# support resetting geoserver datadir25GEOSERVER_HOME = os.getenv('GEOSERVER_HOME')26if GEOSERVER_HOME:27 dest = os.getenv('DATA_DIR')28 data = os.path.join(GEOSERVER_HOME, 'data/release', '')29 if dest:30 os.system('rsync -v -a --delete %s %s' % (data, os.path.join(dest, '')))31 else:32 os.system('git clean -dxf -- %s' % data)33 os.system('curl -XPOST --user admin:geoserver http://localhost:8080/geoserver/rest/reload')34def drop_table(table):35 def outer(func):36 def inner(*args):37 try: func(*args)38 finally:39 try:40 if conn:41 conn.cursor().execute('DROP TABLE %s' % table)42 except Exception,e:43 print 'ERROR dropping table'44 print e45 return inner46 return outer47class NonCatalogTests(unittest.TestCase):48 def testDimensionInfo(self):49 inf = DimensionInfo( * (None,) * 6 )50 # make sure these work with no resolution set51 self.assertTrue(inf.resolution_millis() is None)52 self.assertTrue(inf.resolution_str() is None)53 inf = lambda r: DimensionInfo(None, None, None, r, None, None)54 def assertEqualResolution(spec, millis):55 self.assertEqual(millis, inf(spec).resolution_millis())56 self.assertEqual(spec, inf(millis).resolution_str())57 assertEqualResolution('0.5 seconds', 500)58 assertEqualResolution('7 days', 604800000)59 assertEqualResolution('10 years', 315360000000000)60class CatalogTests(unittest.TestCase):61 def setUp(self):62 self.cat = Catalog("http://localhost:8080/geoserver/rest")63 def testAbout(self):64 about_html = self.cat.about()65 self.assertTrue('<html xmlns="http://www.w3.org/1999/xhtml"' in about_html)66 def testGSVersion(self):67 version = self.cat.gsversion()68 pat = re.compile('\d\.\d(\.[\dx]|-SNAPSHOT)')69 self.assertTrue(pat.match('2.2.x'))70 self.assertTrue(pat.match('2.3.2'))71 self.assertTrue(pat.match('2.3-SNAPSHOT'))72 self.assertFalse(pat.match('2.3.y'))73 self.assertFalse(pat.match('233'))74 self.assertTrue(pat.match(version))75 def testWorkspaces(self):76 self.assertEqual(7, len(self.cat.get_workspaces()))77 # marking out test since geoserver default workspace is not consistent 78 # self.assertEqual("cite", self.cat.get_default_workspace().name)79 self.assertEqual("topp", self.cat.get_workspace("topp").name)80 def testStores(self):81 topp = self.cat.get_workspace("topp")82 sf = self.cat.get_workspace("sf")83 self.assertEqual(9, len(self.cat.get_stores()))84 self.assertEqual(2, len(self.cat.get_stores(topp)))85 self.assertEqual(2, len(self.cat.get_stores(sf)))86 self.assertEqual("states_shapefile", self.cat.get_store("states_shapefile", topp).name)87 self.assertEqual("states_shapefile", self.cat.get_store("states_shapefile").name)88 self.assertEqual("states_shapefile", self.cat.get_store("states_shapefile").name)89 self.assertEqual("sfdem", self.cat.get_store("sfdem", sf).name)90 self.assertEqual("sfdem", self.cat.get_store("sfdem").name)91 92 def testResources(self):93 topp = self.cat.get_workspace("topp")94 sf = self.cat.get_workspace("sf")95 states = self.cat.get_store("states_shapefile", topp)96 sfdem = self.cat.get_store("sfdem", sf)97 self.assertEqual(19, len(self.cat.get_resources()))98 self.assertEqual(1, len(self.cat.get_resources(states)))99 self.assertEqual(5, len(self.cat.get_resources(workspace=topp)))100 self.assertEqual(1, len(self.cat.get_resources(sfdem)))101 self.assertEqual(6, len(self.cat.get_resources(workspace=sf)))102 self.assertEqual("states", self.cat.get_resource("states", states).name)103 self.assertEqual("states", self.cat.get_resource("states", workspace=topp).name)104 self.assertEqual("states", self.cat.get_resource("states").name)105 states = self.cat.get_resource("states")106 fields = [107 states.title,108 states.abstract,109 states.native_bbox,110 states.latlon_bbox,111 states.projection,112 states.projection_policy113 ]114 self.assertFalse(None in fields, str(fields))115 self.assertFalse(len(states.keywords) == 0)116 self.assertFalse(len(states.attributes) == 0)117 self.assertTrue(states.enabled)118 self.assertEqual("sfdem", self.cat.get_resource("sfdem", sfdem).name)119 self.assertEqual("sfdem", self.cat.get_resource("sfdem", workspace=sf).name)120 self.assertEqual("sfdem", self.cat.get_resource("sfdem").name)121 def testLayers(self):122 expected = set(["Arc_Sample", "Pk50095", "Img_Sample", "mosaic", "sfdem",123 "bugsites", "restricted", "streams", "archsites", "roads",124 "tasmania_roads", "tasmania_water_bodies", "tasmania_state_boundaries",125 "tasmania_cities", "states", "poly_landmarks", "tiger_roads", "poi",126 "giant_polygon"127 ])128 actual = set(l.name for l in self.cat.get_layers())129 missing = expected - actual130 extras = actual - expected131 message = "Actual layer list did not match expected! (Extras: %s) (Missing: %s)" % (extras, missing)132 self.assert_(len(expected ^ actual) == 0, message)133 states = self.cat.get_layer("states")134 self.assert_("states", states.name)135 self.assert_(isinstance(states.resource, ResourceInfo))136 self.assertEqual(set(s.name for s in states.styles), set(['pophatch', 'polygon']))137 self.assertEqual(states.default_style.name, "population")138 def testLayerGroups(self):139 expected = set(["tasmania", "tiger-ny", "spearfish"])140 actual = set(l.name for l in self.cat.get_layergroups())141 missing = expected - actual142 extras = actual - expected143 message = "Actual layergroup list did not match expected! (Extras: %s) (Missing: %s)" % (extras, missing)144 self.assert_(len(expected ^ actual) == 0, message)145 tas = self.cat.get_layergroup("tasmania")146 self.assert_("tasmania", tas.name)147 self.assert_(isinstance(tas, LayerGroup))148 self.assertEqual(tas.layers, ['tasmania_state_boundaries', 'tasmania_water_bodies', 'tasmania_roads', 'tasmania_cities'], tas.layers)149 self.assertEqual(tas.styles, [None, None, None, None], tas.styles)150 def testStyles(self):151 self.assertEqual(20, len(self.cat.get_styles()))152 self.assertEqual("population", self.cat.get_style("population").name)153 self.assertEqual("popshade.sld", self.cat.get_style("population").filename)154 self.assertEqual("population", self.cat.get_style("population").sld_name)155 self.assert_(self.cat.get_style('non-existing-style') is None)156 def testEscaping(self):157 # GSConfig is inconsistent about using exceptions vs. returning None158 # when a resource isn't found.159 # But the basic idea is that none of them should throw HTTP errors from160 # misconstructed URLS161 self.cat.get_style("best style ever")162 self.cat.get_workspace("best workspace ever")163 try:164 self.cat.get_store(workspace="best workspace ever",165 name="best store ever")166 self.fail('expected exception')167 except FailedRequestError, fre:168 self.assertEqual('No store found named: best store ever', fre.message)169 try:170 self.cat.get_resource(workspace="best workspace ever",171 store="best store ever",172 name="best resource ever")173 except FailedRequestError, fre:174 self.assertEqual('No store found named: best store ever', fre.message)175 self.cat.get_layer("best layer ever")176 self.cat.get_layergroup("best layergroup ever")177 def testUnicodeUrl(self):178 """179 Tests that the geoserver.support.url function support unicode strings.180 """181 # Test the url function with unicode182 seg = ['workspaces', 'test', 'datastores', u'operaci\xf3n_repo', 'featuretypes.xml']183 u = url(base=self.cat.service_url, seg=seg)184 self.assertEqual(u, self.cat.service_url + "/workspaces/test/datastores/operaci%C3%B3n_repo/featuretypes.xml")185 # Test the url function with normal string186 seg = ['workspaces', 'test', 'datastores', 'test-repo', 'featuretypes.xml']187 u = url(base=self.cat.service_url, seg=seg)188 self.assertEqual(u, self.cat.service_url + "/workspaces/test/datastores/test-repo/featuretypes.xml")189class ModifyingTests(unittest.TestCase):190 def setUp(self):191 self.cat = Catalog("http://localhost:8080/geoserver/rest")192 def testFeatureTypeSave(self):193 # test saving round trip194 rs = self.cat.get_resource("bugsites")195 old_abstract = rs.abstract196 new_abstract = "Not the original abstract"197 enabled = rs.enabled198 # Change abstract on server199 rs.abstract = new_abstract200 self.cat.save(rs)201 rs = self.cat.get_resource("bugsites")202 self.assertEqual(new_abstract, rs.abstract)203 self.assertEqual(enabled, rs.enabled)204 # Change keywords on server205 rs.keywords = ["bugsites", "gsconfig"]206 enabled = rs.enabled207 self.cat.save(rs)208 rs = self.cat.get_resource("bugsites")209 self.assertEqual(["bugsites", "gsconfig"], rs.keywords)210 self.assertEqual(enabled, rs.enabled)211 212 # Change metadata links on server213 rs.metadata_links = [("text/xml", "TC211", "http://example.com/gsconfig.test.metadata")]214 enabled = rs.enabled215 self.cat.save(rs)216 rs = self.cat.get_resource("bugsites")217 self.assertEqual(218 [("text/xml", "TC211", "http://example.com/gsconfig.test.metadata")],219 rs.metadata_links)220 self.assertEqual(enabled, rs.enabled)221 # Restore abstract222 rs.abstract = old_abstract223 self.cat.save(rs)224 rs = self.cat.get_resource("bugsites")225 self.assertEqual(old_abstract, rs.abstract)226 def testDataStoreCreate(self):227 ds = self.cat.create_datastore("vector_gsconfig")228 ds.connection_parameters.update(**DBPARAMS)229 self.cat.save(ds)230 def testPublishFeatureType(self):231 # Use the other test and store creation to load vector data into a database232 # @todo maybe load directly to database?233 try:234 self.testDataStoreCreateAndThenAlsoImportData()235 except FailedRequestError:236 pass237 try:238 lyr = self.cat.get_layer('import')239 # Delete the existing layer and resource to allow republishing.240 self.cat.delete(lyr)241 self.cat.delete(lyr.resource)242 ds = self.cat.get_store("gsconfig_import_test")243 # make sure it's gone244 self.assert_(self.cat.get_layer('import') is None)245 self.cat.publish_featuretype("import", ds, native_crs="EPSG:4326")246 # and now it's not247 self.assert_(self.cat.get_layer('import') is not None)248 finally:249 # tear stuff down to allow the other test to pass if we run first250 ds = self.cat.get_store("gsconfig_import_test")251 lyr = self.cat.get_layer('import')252 # Delete the existing layer and resource to allow republishing.253 self.cat.delete(lyr)254 self.cat.delete(lyr.resource)255 self.cat.delete(ds)256 def testDataStoreModify(self):257 ds = self.cat.get_store("sf")258 self.assertFalse("foo" in ds.connection_parameters)259 ds.connection_parameters = ds.connection_parameters260 ds.connection_parameters["foo"] = "bar"261 orig_ws = ds.workspace.name262 self.cat.save(ds)263 ds = self.cat.get_store("sf")264 self.assertTrue("foo" in ds.connection_parameters)265 self.assertEqual("bar", ds.connection_parameters["foo"])266 self.assertEqual(orig_ws, ds.workspace.name)267 @drop_table('import')268 def testDataStoreCreateAndThenAlsoImportData(self):269 ds = self.cat.create_datastore("gsconfig_import_test")270 ds.connection_parameters.update(**DBPARAMS)271 self.cat.save(ds)272 ds = self.cat.get_store("gsconfig_import_test")273 self.cat.add_data_to_store(ds, "import", {274 'shp': 'test/data/states.shp',275 'shx': 'test/data/states.shx',276 'dbf': 'test/data/states.dbf',277 'prj': 'test/data/states.prj'278 })279 def testCoverageStoreCreate(self):280 ds = self.cat.create_coveragestore2("coverage_gsconfig")281 ds.data_url = "file:data/mytiff.tiff"282 self.cat.save(ds)283 def testCoverageStoreModify(self):284 cs = self.cat.get_store("sfdem")285 self.assertEqual("GeoTIFF", cs.type)286 cs.type = "WorldImage"287 self.cat.save(cs)288 cs = self.cat.get_store("sfdem")289 self.assertEqual("WorldImage", cs.type)290 # not sure about order of test runs here, but it might cause problems291 # for other tests if this layer is misconfigured292 cs.type = "GeoTIFF"293 self.cat.save(cs) 294 def testCoverageSave(self):295 # test saving round trip296 rs = self.cat.get_resource("Arc_Sample")297 old_abstract = rs.abstract298 new_abstract = "Not the original abstract"299 # # Change abstract on server300 rs.abstract = new_abstract301 self.cat.save(rs)302 rs = self.cat.get_resource("Arc_Sample")303 self.assertEqual(new_abstract, rs.abstract)304 # Restore abstract305 rs.abstract = old_abstract306 self.cat.save(rs)307 rs = self.cat.get_resource("Arc_Sample")308 self.assertEqual(old_abstract, rs.abstract)309 # Change metadata links on server310 rs.metadata_links = [("text/xml", "TC211", "http://example.com/gsconfig.test.metadata")]311 enabled = rs.enabled312 self.cat.save(rs)313 rs = self.cat.get_resource("Arc_Sample")314 self.assertEqual(315 [("text/xml", "TC211", "http://example.com/gsconfig.test.metadata")],316 rs.metadata_links)317 self.assertEqual(enabled, rs.enabled)318 srs_before = set(['EPSG:4326'])319 srs_after = set(['EPSG:4326', 'EPSG:3785'])320 formats = set(['ARCGRID', 'ARCGRID-GZIP', 'GEOTIFF', 'PNG', 'GIF', 'TIFF'])321 formats_after = set(["PNG", "GIF", "TIFF"])322 # set and save request_srs_list323 self.assertEquals(set(rs.request_srs_list), srs_before, str(rs.request_srs_list))324 rs.request_srs_list = rs.request_srs_list + ['EPSG:3785']325 self.cat.save(rs)326 rs = self.cat.get_resource("Arc_Sample")327 self.assertEquals(set(rs.request_srs_list), srs_after, str(rs.request_srs_list))328 # set and save response_srs_list329 self.assertEquals(set(rs.response_srs_list), srs_before, str(rs.response_srs_list))330 rs.response_srs_list = rs.response_srs_list + ['EPSG:3785']331 self.cat.save(rs)332 rs = self.cat.get_resource("Arc_Sample")333 self.assertEquals(set(rs.response_srs_list), srs_after, str(rs.response_srs_list))334 # set and save supported_formats335 self.assertEquals(set(rs.supported_formats), formats, str(rs.supported_formats))336 rs.supported_formats = ["PNG", "GIF", "TIFF"]337 self.cat.save(rs)338 rs = self.cat.get_resource("Arc_Sample")339 self.assertEquals(set(rs.supported_formats), formats_after, str(rs.supported_formats))340 def testWmsStoreCreate(self):341 ws = self.cat.create_wmsstore("wmsstore_gsconfig")342 ws.capabilitiesURL = "http://suite.opengeo.org/geoserver/ows?service=wms&version=1.1.1&request=GetCapabilities"343 ws.type = "WMS"344 self.cat.save(ws)345 346 def testWmsLayer(self):347 self.cat.create_workspace("wmstest", "http://example.com/wmstest")348 wmstest = self.cat.get_workspace("wmstest")349 wmsstore = self.cat.create_wmsstore("wmsstore", wmstest)350 wmsstore.capabilitiesURL = "http://suite.opengeo.org/geoserver/ows?service=wms&version=1.1.1&request=GetCapabilities"351 wmsstore.type = "WMS"352 self.cat.save(wmsstore)353 wmsstore = self.cat.get_store("wmsstore")354 self.assertEqual(1, len(self.cat.get_stores(wmstest)))355 available_layers = wmsstore.get_resources(available=True)356 for layer in available_layers:357 # sanitize the layer name - validation will fail on newer geoservers358 name = layer.replace(':', '_')359 new_layer = self.cat.create_wmslayer(wmstest, wmsstore, name, nativeName=layer)360 added_layers = wmsstore.get_resources()361 self.assertEqual(len(available_layers), len(added_layers))362 changed_layer = added_layers[0]363 self.assertEqual(True, changed_layer.advertised)364 self.assertEqual(True, changed_layer.enabled)365 changed_layer.advertised = False366 changed_layer.enabled = False367 self.cat.save(changed_layer)368 self.cat._cache.clear()369 changed_layer = wmsstore.get_resources()[0]370 changed_layer.fetch()371 self.assertEqual(False, changed_layer.advertised)372 self.assertEqual(False, changed_layer.enabled)373 def testFeatureTypeCreate(self):374 shapefile_plus_sidecars = shapefile_and_friends("test/data/states")375 expected = {376 'shp': 'test/data/states.shp',377 'shx': 'test/data/states.shx',378 'dbf': 'test/data/states.dbf',379 'prj': 'test/data/states.prj'380 }381 self.assertEqual(len(expected), len(shapefile_plus_sidecars))382 for k, v in expected.iteritems():383 self.assertEqual(v, shapefile_plus_sidecars[k])384 385 sf = self.cat.get_workspace("sf")386 self.cat.create_featurestore("states_test", shapefile_plus_sidecars, sf)387 self.assert_(self.cat.get_resource("states_test", workspace=sf) is not None)388 self.assertRaises(389 ConflictingDataError, 390 lambda: self.cat.create_featurestore("states_test", shapefile_plus_sidecars, sf)391 )392 self.assertRaises(393 UploadError,394 lambda: self.cat.create_coveragestore("states_raster_test", shapefile_plus_sidecars, sf)395 )396 bogus_shp = {397 'shp': 'test/data/Pk50095.tif',398 'shx': 'test/data/Pk50095.tif',399 'dbf': 'test/data/Pk50095.tfw',400 'prj': 'test/data/Pk50095.prj'401 }402 self.assertRaises(403 UploadError,404 lambda: self.cat.create_featurestore("bogus_shp", bogus_shp, sf)405 )406 lyr = self.cat.get_layer("states_test")407 self.cat.delete(lyr)408 self.assert_(self.cat.get_layer("states_test") is None)409 def testCoverageCreate(self):410 tiffdata = {411 'tiff': 'test/data/Pk50095.tif',412 'tfw': 'test/data/Pk50095.tfw',413 'prj': 'test/data/Pk50095.prj'414 }415 sf = self.cat.get_workspace("sf")416 # TODO: Uploading WorldImage file no longer works???417 # ft = self.cat.create_coveragestore("Pk50095", tiffdata, sf)418 # self.assert_(self.cat.get_resource("Pk50095", workspace=sf) is not None)419 # self.assertRaises(420 # ConflictingDataError, 421 # lambda: self.cat.create_coveragestore("Pk50095", tiffdata, sf)422 # )423 self.assertRaises(424 UploadError, 425 lambda: self.cat.create_featurestore("Pk50095_vector", tiffdata, sf)426 )427 bogus_tiff = {428 'tiff': 'test/data/states.shp',429 'tfw': 'test/data/states.shx',430 'prj': 'test/data/states.prj'431 }432 self.assertRaises(433 UploadError,434 lambda: self.cat.create_coveragestore("states_raster", bogus_tiff)435 )436 def testLayerSave(self):437 # test saving round trip438 lyr = self.cat.get_layer("states")439 old_attribution = lyr.attribution440 new_attribution = "Not the original attribution"441 # change attribution on server442 lyr.attribution = new_attribution443 self.cat.save(lyr)444 lyr = self.cat.get_layer("states")445 self.assertEqual(new_attribution, lyr.attribution)446 # Restore attribution447 lyr.attribution = old_attribution448 self.cat.save(lyr)449 lyr = self.cat.get_layer("states")450 self.assertEqual(old_attribution, lyr.attribution)451 self.assertEqual(lyr.default_style.name, "population")452 453 old_default_style = lyr.default_style454 lyr.default_style = (s for s in lyr.styles if s.name == "pophatch").next()455 lyr.styles = [old_default_style]456 self.cat.save(lyr)457 lyr = self.cat.get_layer("states")458 self.assertEqual(lyr.default_style.name, "pophatch")459 self.assertEqual([s.name for s in lyr.styles], ["population"])460 def testStyles(self):461 # upload new style, verify existence462 self.cat.create_style("fred", open("test/fred.sld").read())463 fred = self.cat.get_style("fred")464 self.assert_(fred is not None)465 self.assertEqual("Fred", fred.sld_title)466 # replace style, verify changes467 self.cat.create_style("fred", open("test/ted.sld").read(), overwrite=True)468 fred = self.cat.get_style("fred")469 self.assert_(fred is not None)470 self.assertEqual("Ted", fred.sld_title)471 # delete style, verify non-existence472 self.cat.delete(fred, purge=True)473 self.assert_(self.cat.get_style("fred") is None)474 # attempt creating new style475 self.cat.create_style("fred", open("test/fred.sld").read())476 fred = self.cat.get_style("fred")477 self.assertEqual("Fred", fred.sld_title)478 # verify it can be found via URL and check the name479 f = self.cat.get_style_by_url(fred.href)480 self.assert_(f is not None)481 self.assertEqual(f.name, fred.name)482 def testWorkspaceStyles(self):483 # upload new style, verify existence484 self.cat.create_style("jed", open("test/fred.sld").read(), workspace="topp")485 jed = self.cat.get_style("jed", workspace="blarny")486 self.assert_(jed is None)487 jed = self.cat.get_style("jed", workspace="topp")488 self.assert_(jed is not None)489 self.assertEqual("Fred", jed.sld_title)490 jed = self.cat.get_style("topp:jed")491 self.assert_(jed is not None)492 self.assertEqual("Fred", jed.sld_title)493 # replace style, verify changes494 self.cat.create_style("jed", open("test/ted.sld").read(), overwrite=True, workspace="topp")495 jed = self.cat.get_style("jed", workspace="topp")496 self.assert_(jed is not None)497 self.assertEqual("Ted", jed.sld_title)498 # delete style, verify non-existence499 self.cat.delete(jed, purge=True)500 self.assert_(self.cat.get_style("jed", workspace="topp") is None)501 # attempt creating new style502 self.cat.create_style("jed", open("test/fred.sld").read(), workspace="topp")503 jed = self.cat.get_style("jed", workspace="topp")504 self.assertEqual("Fred", jed.sld_title)505 # verify it can be found via URL and check the full name506 f = self.cat.get_style_by_url(jed.href)507 self.assert_(f is not None)508 self.assertEqual(f.fqn, jed.fqn)509 def testLayerWorkspaceStyles(self):510 # upload new style, verify existence511 self.cat.create_style("ned", open("test/fred.sld").read(), overwrite=True, workspace="topp")512 self.cat.create_style("zed", open("test/ted.sld").read(), overwrite=True, workspace="topp")513 ned = self.cat.get_style("ned", workspace="topp")514 zed = self.cat.get_style("zed", workspace="topp")515 self.assert_(ned is not None)516 self.assert_(zed is not None)517 lyr = self.cat.get_layer("states")518 lyr.default_style = ned519 lyr.styles = [zed]520 self.cat.save(lyr)521 self.assertEqual("topp:ned", lyr.default_style)522 self.assertEqual([zed], lyr.styles)523 lyr.refresh()524 self.assertEqual("topp:ned", lyr.default_style.fqn)525 self.assertEqual([zed.fqn], [s.fqn for s in lyr.styles])526 def testWorkspaceCreate(self):527 ws = self.cat.get_workspace("acme")528 self.assertEqual(None, ws)529 self.cat.create_workspace("acme", "http://example.com/acme")530 ws = self.cat.get_workspace("acme")531 self.assertEqual("acme", ws.name)532 def testWorkspaceDelete(self): 533 self.cat.create_workspace("foo", "http://example.com/foo")534 ws = self.cat.get_workspace("foo")535 self.cat.delete(ws)536 ws = self.cat.get_workspace("foo")537 self.assert_(ws is None)538 def testWorkspaceDefault(self):539 # save orig540 orig = self.cat.get_default_workspace()541 neu = self.cat.create_workspace("neu", "http://example.com/neu")542 try:543 # make sure setting it works544 self.cat.set_default_workspace("neu")545 ws = self.cat.get_default_workspace()546 self.assertEqual('neu', ws.name)547 finally:548 # cleanup and reset to the way things were549 self.cat.delete(neu)550 self.cat.set_default_workspace(orig.name)551 ws = self.cat.get_default_workspace()552 self.assertEqual(orig.name, ws.name)553 def testFeatureTypeDelete(self):554 pass555 def testCoverageDelete(self):556 pass557 def testDataStoreDelete(self):558 states = self.cat.get_store('states_shapefile')559 self.assert_(states.enabled == True)560 states.enabled = False561 self.assert_(states.enabled == False)562 self.cat.save(states)563 states = self.cat.get_store('states_shapefile')564 self.assert_(states.enabled == False)565 states.enabled = True566 self.cat.save(states)567 states = self.cat.get_store('states_shapefile')568 self.assert_(states.enabled == True)569 def testLayerGroupSave(self):570 tas = self.cat.get_layergroup("tasmania")571 self.assertEqual(tas.layers, ['tasmania_state_boundaries', 'tasmania_water_bodies', 'tasmania_roads', 'tasmania_cities'], tas.layers)572 self.assertEqual(tas.styles, [None, None, None, None], tas.styles)573 tas.layers = tas.layers[:-1]574 tas.styles = tas.styles[:-1]575 self.cat.save(tas)576 # this verifies the local state577 self.assertEqual(tas.layers, ['tasmania_state_boundaries', 'tasmania_water_bodies', 'tasmania_roads'], tas.layers)578 self.assertEqual(tas.styles, [None, None, None], tas.styles)579 # force a refresh to check the remote state580 tas.refresh()581 self.assertEqual(tas.layers, ['tasmania_state_boundaries', 'tasmania_water_bodies', 'tasmania_roads'], tas.layers)582 self.assertEqual(tas.styles, [None, None, None], tas.styles)583 def testTimeDimension(self):584 sf = self.cat.get_workspace("sf")585 files = shapefile_and_friends(os.path.join(gisdata.GOOD_DATA, "time", "boxes_with_end_date"))586 self.cat.create_featurestore("boxes_with_end_date", files, sf)587 get_resource = lambda: self.cat._cache.clear() or self.cat.get_layer('boxes_with_end_date').resource588 # configure time as LIST589 resource = get_resource()590 timeInfo = DimensionInfo("time", "true", "LIST", None, "ISO8601", None, attribute="date")591 resource.metadata = {'time':timeInfo}592 self.cat.save(resource)593 # and verify594 resource = get_resource()595 timeInfo = resource.metadata['time']596 self.assertEqual("LIST", timeInfo.presentation)597 self.assertEqual(True, timeInfo.enabled)598 self.assertEqual("date", timeInfo.attribute)599 self.assertEqual("ISO8601", timeInfo.units)600 # disable time dimension601 timeInfo = resource.metadata['time']602 timeInfo.enabled = False603 # since this is an xml property, it won't get written unless we modify it604 resource.metadata = {'time' : timeInfo}605 self.cat.save(resource)606 # and verify607 resource = get_resource()608 timeInfo = resource.metadata['time']609 self.assertEqual(False, timeInfo.enabled)610 # configure with interval, end_attribute and enable again611 timeInfo.enabled = True612 timeInfo.presentation = 'DISCRETE_INTERVAL'613 timeInfo.resolution = '3 days'614 timeInfo.end_attribute = 'enddate'615 resource.metadata = {'time' : timeInfo}616 self.cat.save(resource)617 # and verify618 resource = get_resource()619 timeInfo = resource.metadata['time']620 self.assertEqual(True, timeInfo.enabled)621 self.assertEqual('DISCRETE_INTERVAL', timeInfo.presentation)622 self.assertEqual('3 days', timeInfo.resolution_str())623 self.assertEqual('enddate', timeInfo.end_attribute)624 def testImageMosaic(self):625 # testing the mosaic creation626 name = 'cea_mosaic'627 data = open('test/data/mosaic/cea.zip', 'rb')628 self.cat.create_imagemosaic(name, data)629 # get the layer resource back630 self.cat._cache.clear()631 resource = self.cat.get_layer(name).resource632 self.assert_(resource is not None)633 # delete granule from mosaic634 coverage = name635 store = name636 granule_id = name + '.1'637 self.cat.mosaic_delete_granule(coverage, store, granule_id)638if __name__ == "__main__":...
test_operators.py
Source:test_operators.py
1import warnings2import numpy as np3import pytest4import pandas as pd5from pandas import (6 Categorical,7 DataFrame,8 Series,9 date_range,10)11import pandas._testing as tm12class TestCategoricalOpsWithFactor:13 def test_categories_none_comparisons(self):14 factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"], ordered=True)15 tm.assert_categorical_equal(factor, factor)16 def test_comparisons(self, factor):17 result = factor[factor == "a"]18 expected = factor[np.asarray(factor) == "a"]19 tm.assert_categorical_equal(result, expected)20 result = factor[factor != "a"]21 expected = factor[np.asarray(factor) != "a"]22 tm.assert_categorical_equal(result, expected)23 result = factor[factor < "c"]24 expected = factor[np.asarray(factor) < "c"]25 tm.assert_categorical_equal(result, expected)26 result = factor[factor > "a"]27 expected = factor[np.asarray(factor) > "a"]28 tm.assert_categorical_equal(result, expected)29 result = factor[factor >= "b"]30 expected = factor[np.asarray(factor) >= "b"]31 tm.assert_categorical_equal(result, expected)32 result = factor[factor <= "b"]33 expected = factor[np.asarray(factor) <= "b"]34 tm.assert_categorical_equal(result, expected)35 n = len(factor)36 other = factor[np.random.permutation(n)]37 result = factor == other38 expected = np.asarray(factor) == np.asarray(other)39 tm.assert_numpy_array_equal(result, expected)40 result = factor == "d"41 expected = np.zeros(len(factor), dtype=bool)42 tm.assert_numpy_array_equal(result, expected)43 # comparisons with categoricals44 cat_rev = Categorical(["a", "b", "c"], categories=["c", "b", "a"], ordered=True)45 cat_rev_base = Categorical(46 ["b", "b", "b"], categories=["c", "b", "a"], ordered=True47 )48 cat = Categorical(["a", "b", "c"], ordered=True)49 cat_base = Categorical(["b", "b", "b"], categories=cat.categories, ordered=True)50 # comparisons need to take categories ordering into account51 res_rev = cat_rev > cat_rev_base52 exp_rev = np.array([True, False, False])53 tm.assert_numpy_array_equal(res_rev, exp_rev)54 res_rev = cat_rev < cat_rev_base55 exp_rev = np.array([False, False, True])56 tm.assert_numpy_array_equal(res_rev, exp_rev)57 res = cat > cat_base58 exp = np.array([False, False, True])59 tm.assert_numpy_array_equal(res, exp)60 # Only categories with same categories can be compared61 msg = "Categoricals can only be compared if 'categories' are the same"62 with pytest.raises(TypeError, match=msg):63 cat > cat_rev64 cat_rev_base2 = Categorical(["b", "b", "b"], categories=["c", "b", "a", "d"])65 with pytest.raises(TypeError, match=msg):66 cat_rev > cat_rev_base267 # Only categories with same ordering information can be compared68 cat_unorderd = cat.set_ordered(False)69 assert not (cat > cat).any()70 with pytest.raises(TypeError, match=msg):71 cat > cat_unorderd72 # comparison (in both directions) with Series will raise73 s = Series(["b", "b", "b"])74 msg = (75 "Cannot compare a Categorical for op __gt__ with type "76 r"<class 'numpy\.ndarray'>"77 )78 with pytest.raises(TypeError, match=msg):79 cat > s80 with pytest.raises(TypeError, match=msg):81 cat_rev > s82 with pytest.raises(TypeError, match=msg):83 s < cat84 with pytest.raises(TypeError, match=msg):85 s < cat_rev86 # comparison with numpy.array will raise in both direction, but only on87 # newer numpy versions88 a = np.array(["b", "b", "b"])89 with pytest.raises(TypeError, match=msg):90 cat > a91 with pytest.raises(TypeError, match=msg):92 cat_rev > a93 # Make sure that unequal comparison take the categories order in94 # account95 cat_rev = Categorical(list("abc"), categories=list("cba"), ordered=True)96 exp = np.array([True, False, False])97 res = cat_rev > "b"98 tm.assert_numpy_array_equal(res, exp)99 # check that zero-dim array gets unboxed100 res = cat_rev > np.array("b")101 tm.assert_numpy_array_equal(res, exp)102class TestCategoricalOps:103 def test_compare_frame(self):104 # GH#24282 check that Categorical.__cmp__(DataFrame) defers to frame105 data = ["a", "b", 2, "a"]106 cat = Categorical(data)107 df = DataFrame(cat)108 result = cat == df.T109 expected = DataFrame([[True, True, True, True]])110 tm.assert_frame_equal(result, expected)111 result = cat[::-1] != df.T112 expected = DataFrame([[False, True, True, False]])113 tm.assert_frame_equal(result, expected)114 def test_compare_frame_raises(self, comparison_op):115 # alignment raises unless we transpose116 op = comparison_op117 cat = Categorical(["a", "b", 2, "a"])118 df = DataFrame(cat)119 msg = "Unable to coerce to Series, length must be 1: given 4"120 with pytest.raises(ValueError, match=msg):121 op(cat, df)122 def test_datetime_categorical_comparison(self):123 dt_cat = Categorical(date_range("2014-01-01", periods=3), ordered=True)124 tm.assert_numpy_array_equal(dt_cat > dt_cat[0], np.array([False, True, True]))125 tm.assert_numpy_array_equal(dt_cat[0] < dt_cat, np.array([False, True, True]))126 def test_reflected_comparison_with_scalars(self):127 # GH8658128 cat = Categorical([1, 2, 3], ordered=True)129 tm.assert_numpy_array_equal(cat > cat[0], np.array([False, True, True]))130 tm.assert_numpy_array_equal(cat[0] < cat, np.array([False, True, True]))131 def test_comparison_with_unknown_scalars(self):132 # https://github.com/pandas-dev/pandas/issues/9836#issuecomment-92123057133 # and following comparisons with scalars not in categories should raise134 # for unequal comps, but not for equal/not equal135 cat = Categorical([1, 2, 3], ordered=True)136 msg = "Invalid comparison between dtype=category and int"137 with pytest.raises(TypeError, match=msg):138 cat < 4139 with pytest.raises(TypeError, match=msg):140 cat > 4141 with pytest.raises(TypeError, match=msg):142 4 < cat143 with pytest.raises(TypeError, match=msg):144 4 > cat145 tm.assert_numpy_array_equal(cat == 4, np.array([False, False, False]))146 tm.assert_numpy_array_equal(cat != 4, np.array([True, True, True]))147 def test_comparison_with_tuple(self):148 cat = Categorical(np.array(["foo", (0, 1), 3, (0, 1)], dtype=object))149 result = cat == "foo"150 expected = np.array([True, False, False, False], dtype=bool)151 tm.assert_numpy_array_equal(result, expected)152 result = cat == (0, 1)153 expected = np.array([False, True, False, True], dtype=bool)154 tm.assert_numpy_array_equal(result, expected)155 result = cat != (0, 1)156 tm.assert_numpy_array_equal(result, ~expected)157 def test_comparison_of_ordered_categorical_with_nan_to_scalar(158 self, compare_operators_no_eq_ne159 ):160 # https://github.com/pandas-dev/pandas/issues/26504161 # BUG: fix ordered categorical comparison with missing values (#26504 )162 # and following comparisons with scalars in categories with missing163 # values should be evaluated as False164 cat = Categorical([1, 2, 3, None], categories=[1, 2, 3], ordered=True)165 scalar = 2166 with warnings.catch_warnings():167 warnings.simplefilter("ignore", RuntimeWarning)168 expected = getattr(np.array(cat), compare_operators_no_eq_ne)(scalar)169 actual = getattr(cat, compare_operators_no_eq_ne)(scalar)170 tm.assert_numpy_array_equal(actual, expected)171 def test_comparison_of_ordered_categorical_with_nan_to_listlike(172 self, compare_operators_no_eq_ne173 ):174 # https://github.com/pandas-dev/pandas/issues/26504175 # and following comparisons of missing values in ordered Categorical176 # with listlike should be evaluated as False177 cat = Categorical([1, 2, 3, None], categories=[1, 2, 3], ordered=True)178 other = Categorical([2, 2, 2, 2], categories=[1, 2, 3], ordered=True)179 with warnings.catch_warnings():180 warnings.simplefilter("ignore", RuntimeWarning)181 expected = getattr(np.array(cat), compare_operators_no_eq_ne)(2)182 actual = getattr(cat, compare_operators_no_eq_ne)(other)183 tm.assert_numpy_array_equal(actual, expected)184 @pytest.mark.parametrize(185 "data,reverse,base",186 [(list("abc"), list("cba"), list("bbb")), ([1, 2, 3], [3, 2, 1], [2, 2, 2])],187 )188 def test_comparisons(self, data, reverse, base):189 cat_rev = Series(Categorical(data, categories=reverse, ordered=True))190 cat_rev_base = Series(Categorical(base, categories=reverse, ordered=True))191 cat = Series(Categorical(data, ordered=True))192 cat_base = Series(193 Categorical(base, categories=cat.cat.categories, ordered=True)194 )195 s = Series(base)196 a = np.array(base)197 # comparisons need to take categories ordering into account198 res_rev = cat_rev > cat_rev_base199 exp_rev = Series([True, False, False])200 tm.assert_series_equal(res_rev, exp_rev)201 res_rev = cat_rev < cat_rev_base202 exp_rev = Series([False, False, True])203 tm.assert_series_equal(res_rev, exp_rev)204 res = cat > cat_base205 exp = Series([False, False, True])206 tm.assert_series_equal(res, exp)207 scalar = base[1]208 res = cat > scalar209 exp = Series([False, False, True])210 exp2 = cat.values > scalar211 tm.assert_series_equal(res, exp)212 tm.assert_numpy_array_equal(res.values, exp2)213 res_rev = cat_rev > scalar214 exp_rev = Series([True, False, False])215 exp_rev2 = cat_rev.values > scalar216 tm.assert_series_equal(res_rev, exp_rev)217 tm.assert_numpy_array_equal(res_rev.values, exp_rev2)218 # Only categories with same categories can be compared219 msg = "Categoricals can only be compared if 'categories' are the same"220 with pytest.raises(TypeError, match=msg):221 cat > cat_rev222 # categorical cannot be compared to Series or numpy array, and also223 # not the other way around224 msg = (225 "Cannot compare a Categorical for op __gt__ with type "226 r"<class 'numpy\.ndarray'>"227 )228 with pytest.raises(TypeError, match=msg):229 cat > s230 with pytest.raises(TypeError, match=msg):231 cat_rev > s232 with pytest.raises(TypeError, match=msg):233 cat > a234 with pytest.raises(TypeError, match=msg):235 cat_rev > a236 with pytest.raises(TypeError, match=msg):237 s < cat238 with pytest.raises(TypeError, match=msg):239 s < cat_rev240 with pytest.raises(TypeError, match=msg):241 a < cat242 with pytest.raises(TypeError, match=msg):243 a < cat_rev244 @pytest.mark.parametrize(245 "ctor",246 [247 lambda *args, **kwargs: Categorical(*args, **kwargs),248 lambda *args, **kwargs: Series(Categorical(*args, **kwargs)),249 ],250 )251 def test_unordered_different_order_equal(self, ctor):252 # https://github.com/pandas-dev/pandas/issues/16014253 c1 = ctor(["a", "b"], categories=["a", "b"], ordered=False)254 c2 = ctor(["a", "b"], categories=["b", "a"], ordered=False)255 assert (c1 == c2).all()256 c1 = ctor(["a", "b"], categories=["a", "b"], ordered=False)257 c2 = ctor(["b", "a"], categories=["b", "a"], ordered=False)258 assert (c1 != c2).all()259 c1 = ctor(["a", "a"], categories=["a", "b"], ordered=False)260 c2 = ctor(["b", "b"], categories=["b", "a"], ordered=False)261 assert (c1 != c2).all()262 c1 = ctor(["a", "a"], categories=["a", "b"], ordered=False)263 c2 = ctor(["a", "b"], categories=["b", "a"], ordered=False)264 result = c1 == c2265 tm.assert_numpy_array_equal(np.array(result), np.array([True, False]))266 def test_unordered_different_categories_raises(self):267 c1 = Categorical(["a", "b"], categories=["a", "b"], ordered=False)268 c2 = Categorical(["a", "c"], categories=["c", "a"], ordered=False)269 with pytest.raises(TypeError, match=("Categoricals can only be compared")):270 c1 == c2271 def test_compare_different_lengths(self):272 c1 = Categorical([], categories=["a", "b"])273 c2 = Categorical([], categories=["a"])274 msg = "Categoricals can only be compared if 'categories' are the same."275 with pytest.raises(TypeError, match=msg):276 c1 == c2277 def test_compare_unordered_different_order(self):278 # https://github.com/pandas-dev/pandas/issues/16603#issuecomment-279 # 349290078280 a = Categorical(["a"], categories=["a", "b"])281 b = Categorical(["b"], categories=["b", "a"])282 assert not a.equals(b)283 def test_numeric_like_ops(self):284 df = DataFrame({"value": np.random.randint(0, 10000, 100)})285 labels = [f"{i} - {i + 499}" for i in range(0, 10000, 500)]286 cat_labels = Categorical(labels, labels)287 df = df.sort_values(by=["value"], ascending=True)288 df["value_group"] = pd.cut(289 df.value, range(0, 10500, 500), right=False, labels=cat_labels290 )291 # numeric ops should not succeed292 for op, str_rep in [293 ("__add__", r"\+"),294 ("__sub__", "-"),295 ("__mul__", r"\*"),296 ("__truediv__", "/"),297 ]:298 msg = f"Series cannot perform the operation {str_rep}|unsupported operand"299 with pytest.raises(TypeError, match=msg):300 getattr(df, op)(df)301 # reduction ops should not succeed (unless specifically defined, e.g.302 # min/max)303 s = df["value_group"]304 for op in ["kurt", "skew", "var", "std", "mean", "sum", "median"]:305 msg = f"does not support reduction '{op}'"306 with pytest.raises(TypeError, match=msg):307 getattr(s, op)(numeric_only=False)308 # mad technically works because it takes always the numeric data309 def test_numeric_like_ops_series(self):310 # numpy ops311 s = Series(Categorical([1, 2, 3, 4]))312 with pytest.raises(TypeError, match="does not support reduction 'sum'"):313 np.sum(s)314 @pytest.mark.parametrize(315 "op, str_rep",316 [317 ("__add__", r"\+"),318 ("__sub__", "-"),319 ("__mul__", r"\*"),320 ("__truediv__", "/"),321 ],322 )323 def test_numeric_like_ops_series_arith(self, op, str_rep):324 # numeric ops on a Series325 s = Series(Categorical([1, 2, 3, 4]))326 msg = f"Series cannot perform the operation {str_rep}|unsupported operand"327 with pytest.raises(TypeError, match=msg):328 getattr(s, op)(2)329 def test_numeric_like_ops_series_invalid(self):330 # invalid ufunc331 s = Series(Categorical([1, 2, 3, 4]))332 msg = "Object with dtype category cannot perform the numpy op log"333 with pytest.raises(TypeError, match=msg):...
sandwich_metrics_unittest.py
Source:sandwich_metrics_unittest.py
1# Copyright 2016 The Chromium Authors. All rights reserved.2# Use of this source code is governed by a BSD-style license that can be3# found in the LICENSE file.4import copy5import json6import os7import shutil8import subprocess9import tempfile10import unittest11import loading_trace12import page_track13import sandwich_metrics as puller14import sandwich_runner15import request_track16import tracing17_BLINK_CAT = 'blink.user_timing'18_MEM_CAT = sandwich_runner.MEMORY_DUMP_CATEGORY19_START = 'requestStart'20_LOADS = 'loadEventStart'21_LOADE = 'loadEventEnd'22_NAVIGATION_START = 'navigationStart'23_PAINT = 'firstContentfulPaint'24_LAYOUT = 'firstLayout'25_MINIMALIST_TRACE_EVENTS = [26 {'ph': 'R', 'cat': _BLINK_CAT, 'name': _NAVIGATION_START, 'ts': 10000,27 'args': {'frame': '0'}},28 {'ph': 'R', 'cat': _BLINK_CAT, 'name': _START, 'ts': 20000,29 'args': {}},30 {'cat': _MEM_CAT, 'name': 'periodic_interval', 'pid': 1, 'ph': 'v',31 'ts': 1, 'args': {'dumps': {'allocators': {'malloc': {'attrs': {'size':{32 'units': 'bytes', 'value': '1af2', }}}}}}},33 {'ph': 'R', 'cat': _BLINK_CAT, 'name': _LAYOUT, 'ts': 24000,34 'args': {'frame': '0'}},35 {'ph': 'R', 'cat': _BLINK_CAT, 'name': _PAINT, 'ts': 31000,36 'args': {'frame': '0'}},37 {'ph': 'R', 'cat': _BLINK_CAT, 'name': _LOADS, 'ts': 35000,38 'args': {'frame': '0'}},39 {'ph': 'R', 'cat': _BLINK_CAT, 'name': _LOADE, 'ts': 40000,40 'args': {'frame': '0'}},41 {'cat': _MEM_CAT, 'name': 'periodic_interval', 'pid': 1, 'ph': 'v',42 'ts': 1, 'args': {'dumps': {'allocators': {'malloc': {'attrs': {'size':{43 'units': 'bytes', 'value': 'd704', }}}}}}},44 {'ph': 'M', 'cat': '__metadata', 'pid': 1, 'name': 'process_name', 'ts': 1,45 'args': {'name': 'Browser'}}]46def TracingTrack(events):47 return tracing.TracingTrack.FromJsonDict({48 'events': events,49 'categories': (sandwich_runner._TRACING_CATEGORIES +50 [sandwich_runner.MEMORY_DUMP_CATEGORY])})51def LoadingTrace(events):52 return loading_trace.LoadingTrace('http://a.com/', {},53 page_track.PageTrack(None),54 request_track.RequestTrack(None),55 TracingTrack(events))56class PageTrackTest(unittest.TestCase):57 def testGetBrowserPID(self):58 def RunHelper(expected, events):59 self.assertEquals(expected, puller._GetBrowserPID(TracingTrack(events)))60 RunHelper(123, [61 {'ph': 'M', 'ts': 0, 'pid': 354, 'cat': 'whatever0'},62 {'ph': 'M', 'ts': 0, 'pid': 354, 'cat': 'whatever1'},63 {'ph': 'M', 'ts': 0, 'pid': 354, 'cat': '__metadata',64 'name': 'thread_name'},65 {'ph': 'M', 'ts': 0, 'pid': 354, 'cat': '__metadata',66 'name': 'process_name', 'args': {'name': 'Renderer'}},67 {'ph': 'M', 'ts': 0, 'pid': 123, 'cat': '__metadata',68 'name': 'process_name', 'args': {'name': 'Browser'}},69 {'ph': 'M', 'ts': 0, 'pid': 354, 'cat': 'whatever0'}])70 with self.assertRaises(ValueError):71 RunHelper(123, [72 {'ph': 'M', 'ts': 0, 'pid': 354, 'cat': 'whatever0'},73 {'ph': 'M', 'ts': 0, 'pid': 354, 'cat': 'whatever1'}])74 def testGetBrowserDumpEvents(self):75 NAME = 'periodic_interval'76 def RunHelper(trace_events, browser_pid):77 trace_events = copy.copy(trace_events)78 trace_events.append({79 'pid': browser_pid,80 'cat': '__metadata',81 'name': 'process_name',82 'ph': 'M',83 'ts': 0,84 'args': {'name': 'Browser'}})85 return puller._GetBrowserDumpEvents(TracingTrack(trace_events))86 TRACE_EVENTS = [87 {'pid': 354, 'ts': 1000, 'cat': _MEM_CAT, 'ph': 'v', 'name': NAME},88 {'pid': 354, 'ts': 2000, 'cat': _MEM_CAT, 'ph': 'V'},89 {'pid': 672, 'ts': 3000, 'cat': _MEM_CAT, 'ph': 'v', 'name': NAME},90 {'pid': 123, 'ts': 4000, 'cat': _MEM_CAT, 'ph': 'v', 'name': 'foo'},91 {'pid': 123, 'ts': 5000, 'cat': _MEM_CAT, 'ph': 'v', 'name': NAME},92 {'pid': 123, 'ts': 6000, 'cat': _MEM_CAT, 'ph': 'V'},93 {'pid': 672, 'ts': 7000, 'cat': _MEM_CAT, 'ph': 'v', 'name': NAME},94 {'pid': 354, 'ts': 8000, 'cat': _MEM_CAT, 'ph': 'v', 'name': 'foo'},95 {'pid': 123, 'ts': 9000, 'cat': 'whatever1', 'ph': 'v', 'name': NAME},96 {'pid': 123, 'ts': 10000, 'cat': _MEM_CAT, 'ph': 'v', 'name': NAME},97 {'pid': 354, 'ts': 11000, 'cat': 'whatever0', 'ph': 'R'},98 {'pid': 672, 'ts': 12000, 'cat': _MEM_CAT, 'ph': 'v', 'name': NAME}]99 bump_events = RunHelper(TRACE_EVENTS, 123)100 self.assertEquals(2, len(bump_events))101 self.assertEquals(5, bump_events[0].start_msec)102 self.assertEquals(10, bump_events[1].start_msec)103 bump_events = RunHelper(TRACE_EVENTS, 354)104 self.assertEquals(1, len(bump_events))105 self.assertEquals(1, bump_events[0].start_msec)106 bump_events = RunHelper(TRACE_EVENTS, 672)107 self.assertEquals(3, len(bump_events))108 self.assertEquals(3, bump_events[0].start_msec)109 self.assertEquals(7, bump_events[1].start_msec)110 self.assertEquals(12, bump_events[2].start_msec)111 with self.assertRaises(ValueError):112 RunHelper(TRACE_EVENTS, 895)113 def testGetWebPageTrackedEvents(self):114 trace_events = puller._GetWebPageTrackedEvents(TracingTrack([115 {'ph': 'R', 'ts': 0000, 'args': {}, 'cat': 'whatever',116 'name': _START},117 {'ph': 'R', 'ts': 1000, 'args': {'frame': '0'}, 'cat': 'whatever',118 'name': _LOADS},119 {'ph': 'R', 'ts': 2000, 'args': {'frame': '0'}, 'cat': 'whatever',120 'name': _LOADE},121 {'ph': 'R', 'ts': 3000, 'args': {}, 'cat': _BLINK_CAT,122 'name': _START},123 {'ph': 'R', 'ts': 4000, 'args': {'frame': '0'}, 'cat': _BLINK_CAT,124 'name': _LOADS},125 {'ph': 'R', 'ts': 5000, 'args': {'frame': '0'}, 'cat': _BLINK_CAT,126 'name': _LOADE},127 {'ph': 'R', 'ts': 7000, 'args': {}, 'cat': _BLINK_CAT,128 'name': _START},129 {'ph': 'R', 'ts': 8000, 'args': {'frame': '0'}, 'cat': _BLINK_CAT,130 'name': _LOADS},131 {'ph': 'R', 'ts': 9000, 'args': {'frame': '0'}, 'cat': _BLINK_CAT,132 'name': _LOADE},133 {'ph': 'R', 'ts': 11000, 'args': {'frame': '0'}, 'cat': 'whatever',134 'name': _START},135 {'ph': 'R', 'ts': 12000, 'args': {'frame': '0'}, 'cat': 'whatever',136 'name': _LOADS},137 {'ph': 'R', 'ts': 13000, 'args': {'frame': '0'}, 'cat': 'whatever',138 'name': _LOADE},139 {'ph': 'R', 'ts': 14000, 'args': {}, 'cat': _BLINK_CAT,140 'name': _START},141 {'ph': 'R', 'ts': 10000, 'args': {'frame': '0'}, 'cat': _BLINK_CAT,142 'name': _NAVIGATION_START}, # Event out of |start_msec| order.143 {'ph': 'R', 'ts': 6000, 'args': {'frame': '0'}, 'cat': 'whatever',144 'name': _NAVIGATION_START},145 {'ph': 'R', 'ts': 15000, 'args': {}, 'cat': _BLINK_CAT,146 'name': _START},147 {'ph': 'R', 'ts': 16000, 'args': {'frame': '1'}, 'cat': _BLINK_CAT,148 'name': _LOADS},149 {'ph': 'R', 'ts': 17000, 'args': {'frame': '0'}, 'cat': _BLINK_CAT,150 'name': _LOADS},151 {'ph': 'R', 'ts': 18000, 'args': {'frame': '1'}, 'cat': _BLINK_CAT,152 'name': _LOADE},153 {'ph': 'R', 'ts': 19000, 'args': {'frame': '0'}, 'cat': _BLINK_CAT,154 'name': _LOADE},155 {'ph': 'R', 'ts': 20000, 'args': {}, 'cat': 'whatever',156 'name': _START},157 {'ph': 'R', 'ts': 21000, 'args': {'frame': '0'}, 'cat': 'whatever',158 'name': _LOADS},159 {'ph': 'R', 'ts': 22000, 'args': {'frame': '0'}, 'cat': 'whatever',160 'name': _LOADE},161 {'ph': 'R', 'ts': 23000, 'args': {}, 'cat': _BLINK_CAT,162 'name': _START},163 {'ph': 'R', 'ts': 24000, 'args': {'frame': '0'}, 'cat': _BLINK_CAT,164 'name': _LOADS},165 {'ph': 'R', 'ts': 25000, 'args': {'frame': '0'}, 'cat': _BLINK_CAT,166 'name': _LOADE}]))167 self.assertEquals(3, len(trace_events))168 self.assertEquals(14, trace_events['requestStart'].start_msec)169 self.assertEquals(17, trace_events['loadEventStart'].start_msec)170 self.assertEquals(19, trace_events['loadEventEnd'].start_msec)171 def testExtractDefaultMetrics(self):172 metrics = puller._ExtractDefaultMetrics(LoadingTrace(173 _MINIMALIST_TRACE_EVENTS))174 self.assertEquals(4, len(metrics))175 self.assertEquals(20, metrics['total_load'])176 self.assertEquals(5, metrics['js_onload_event'])177 self.assertEquals(4, metrics['first_layout'])178 self.assertEquals(11, metrics['first_contentful_paint'])179 def testExtractDefaultMetricsBestEffort(self):180 metrics = puller._ExtractDefaultMetrics(LoadingTrace([181 {'ph': 'R', 'ts': 10000, 'args': {'frame': '0'}, 'cat': _BLINK_CAT,182 'name': _NAVIGATION_START},183 {'ph': 'R', 'ts': 11000, 'args': {'frame': '0'}, 'cat': 'whatever',184 'name': _START}]))185 self.assertEquals(4, len(metrics))186 self.assertEquals(puller._FAILED_CSV_VALUE, metrics['total_load'])187 self.assertEquals(puller._FAILED_CSV_VALUE, metrics['js_onload_event'])188 self.assertEquals(puller._FAILED_CSV_VALUE, metrics['first_layout'])189 self.assertEquals(puller._FAILED_CSV_VALUE,190 metrics['first_contentful_paint'])191 def testExtractMemoryMetrics(self):192 metrics = puller._ExtractMemoryMetrics(LoadingTrace(193 _MINIMALIST_TRACE_EVENTS))194 self.assertEquals(2, len(metrics))195 self.assertEquals(30971, metrics['browser_malloc_avg'])196 self.assertEquals(55044, metrics['browser_malloc_max'])197 def testComputeSpeedIndex(self):198 def point(time, frame_completeness):199 return puller.CompletenessPoint(time=time,200 frame_completeness=frame_completeness)201 completness_record = [202 point(0, 0.0),203 point(120, 0.4),204 point(190, 0.75),205 point(280, 1.0),206 point(400, 1.0),207 ]208 self.assertEqual(120 + 70 * 0.6 + 90 * 0.25,209 puller._ComputeSpeedIndex(completness_record))210 completness_record = [211 point(70, 0.0),212 point(150, 0.3),213 point(210, 0.6),214 point(220, 0.9),215 point(240, 1.0),216 ]217 self.assertEqual(80 + 60 * 0.7 + 10 * 0.4 + 20 * 0.1,218 puller._ComputeSpeedIndex(completness_record))219 completness_record = [220 point(90, 0.0),221 point(200, 0.6),222 point(150, 0.3),223 point(230, 1.0),224 ]225 with self.assertRaises(ValueError):226 puller._ComputeSpeedIndex(completness_record)227if __name__ == '__main__':...
prepare_cocofied_lvis.py
Source:prepare_cocofied_lvis.py
1#!/usr/bin/env python32# -*- coding: utf-8 -*-3# Copyright (c) Facebook, Inc. and its affiliates.4import copy5import json6import os7from collections import defaultdict8# This mapping is extracted from the official LVIS mapping:9# https://github.com/lvis-dataset/lvis-api/blob/master/data/coco_to_synset.json10COCO_SYNSET_CATEGORIES = [11 {"synset": "person.n.01", "coco_cat_id": 1},12 {"synset": "bicycle.n.01", "coco_cat_id": 2},13 {"synset": "car.n.01", "coco_cat_id": 3},14 {"synset": "motorcycle.n.01", "coco_cat_id": 4},15 {"synset": "airplane.n.01", "coco_cat_id": 5},16 {"synset": "bus.n.01", "coco_cat_id": 6},17 {"synset": "train.n.01", "coco_cat_id": 7},18 {"synset": "truck.n.01", "coco_cat_id": 8},19 {"synset": "boat.n.01", "coco_cat_id": 9},20 {"synset": "traffic_light.n.01", "coco_cat_id": 10},21 {"synset": "fireplug.n.01", "coco_cat_id": 11},22 {"synset": "stop_sign.n.01", "coco_cat_id": 13},23 {"synset": "parking_meter.n.01", "coco_cat_id": 14},24 {"synset": "bench.n.01", "coco_cat_id": 15},25 {"synset": "bird.n.01", "coco_cat_id": 16},26 {"synset": "cat.n.01", "coco_cat_id": 17},27 {"synset": "dog.n.01", "coco_cat_id": 18},28 {"synset": "horse.n.01", "coco_cat_id": 19},29 {"synset": "sheep.n.01", "coco_cat_id": 20},30 {"synset": "beef.n.01", "coco_cat_id": 21},31 {"synset": "elephant.n.01", "coco_cat_id": 22},32 {"synset": "bear.n.01", "coco_cat_id": 23},33 {"synset": "zebra.n.01", "coco_cat_id": 24},34 {"synset": "giraffe.n.01", "coco_cat_id": 25},35 {"synset": "backpack.n.01", "coco_cat_id": 27},36 {"synset": "umbrella.n.01", "coco_cat_id": 28},37 {"synset": "bag.n.04", "coco_cat_id": 31},38 {"synset": "necktie.n.01", "coco_cat_id": 32},39 {"synset": "bag.n.06", "coco_cat_id": 33},40 {"synset": "frisbee.n.01", "coco_cat_id": 34},41 {"synset": "ski.n.01", "coco_cat_id": 35},42 {"synset": "snowboard.n.01", "coco_cat_id": 36},43 {"synset": "ball.n.06", "coco_cat_id": 37},44 {"synset": "kite.n.03", "coco_cat_id": 38},45 {"synset": "baseball_bat.n.01", "coco_cat_id": 39},46 {"synset": "baseball_glove.n.01", "coco_cat_id": 40},47 {"synset": "skateboard.n.01", "coco_cat_id": 41},48 {"synset": "surfboard.n.01", "coco_cat_id": 42},49 {"synset": "tennis_racket.n.01", "coco_cat_id": 43},50 {"synset": "bottle.n.01", "coco_cat_id": 44},51 {"synset": "wineglass.n.01", "coco_cat_id": 46},52 {"synset": "cup.n.01", "coco_cat_id": 47},53 {"synset": "fork.n.01", "coco_cat_id": 48},54 {"synset": "knife.n.01", "coco_cat_id": 49},55 {"synset": "spoon.n.01", "coco_cat_id": 50},56 {"synset": "bowl.n.03", "coco_cat_id": 51},57 {"synset": "banana.n.02", "coco_cat_id": 52},58 {"synset": "apple.n.01", "coco_cat_id": 53},59 {"synset": "sandwich.n.01", "coco_cat_id": 54},60 {"synset": "orange.n.01", "coco_cat_id": 55},61 {"synset": "broccoli.n.01", "coco_cat_id": 56},62 {"synset": "carrot.n.01", "coco_cat_id": 57},63 {"synset": "frank.n.02", "coco_cat_id": 58},64 {"synset": "pizza.n.01", "coco_cat_id": 59},65 {"synset": "doughnut.n.02", "coco_cat_id": 60},66 {"synset": "cake.n.03", "coco_cat_id": 61},67 {"synset": "chair.n.01", "coco_cat_id": 62},68 {"synset": "sofa.n.01", "coco_cat_id": 63},69 {"synset": "pot.n.04", "coco_cat_id": 64},70 {"synset": "bed.n.01", "coco_cat_id": 65},71 {"synset": "dining_table.n.01", "coco_cat_id": 67},72 {"synset": "toilet.n.02", "coco_cat_id": 70},73 {"synset": "television_receiver.n.01", "coco_cat_id": 72},74 {"synset": "laptop.n.01", "coco_cat_id": 73},75 {"synset": "mouse.n.04", "coco_cat_id": 74},76 {"synset": "remote_control.n.01", "coco_cat_id": 75},77 {"synset": "computer_keyboard.n.01", "coco_cat_id": 76},78 {"synset": "cellular_telephone.n.01", "coco_cat_id": 77},79 {"synset": "microwave.n.02", "coco_cat_id": 78},80 {"synset": "oven.n.01", "coco_cat_id": 79},81 {"synset": "toaster.n.02", "coco_cat_id": 80},82 {"synset": "sink.n.01", "coco_cat_id": 81},83 {"synset": "electric_refrigerator.n.01", "coco_cat_id": 82},84 {"synset": "book.n.01", "coco_cat_id": 84},85 {"synset": "clock.n.01", "coco_cat_id": 85},86 {"synset": "vase.n.01", "coco_cat_id": 86},87 {"synset": "scissors.n.01", "coco_cat_id": 87},88 {"synset": "teddy.n.01", "coco_cat_id": 88},89 {"synset": "hand_blower.n.01", "coco_cat_id": 89},90 {"synset": "toothbrush.n.01", "coco_cat_id": 90},91]92def cocofy_lvis(input_filename, output_filename):93 """94 Filter LVIS instance segmentation annotations to remove all categories that are not included in95 COCO. The new json files can be used to evaluate COCO AP using `lvis-api`. The category ids in96 the output json are the incontiguous COCO dataset ids.97 Args:98 input_filename (str): path to the LVIS json file.99 output_filename (str): path to the COCOfied json file.100 """101 with open(input_filename, "r") as f:102 lvis_json = json.load(f)103 lvis_annos = lvis_json.pop("annotations")104 cocofied_lvis = copy.deepcopy(lvis_json)105 lvis_json["annotations"] = lvis_annos106 # Mapping from lvis cat id to coco cat id via synset107 lvis_cat_id_to_synset = {cat["id"]: cat["synset"] for cat in lvis_json["categories"]}108 synset_to_coco_cat_id = {x["synset"]: x["coco_cat_id"] for x in COCO_SYNSET_CATEGORIES}109 # Synsets that we will keep in the dataset110 synsets_to_keep = set(synset_to_coco_cat_id.keys())111 coco_cat_id_with_instances = defaultdict(int)112 new_annos = []113 ann_id = 1114 for ann in lvis_annos:115 lvis_cat_id = ann["category_id"]116 synset = lvis_cat_id_to_synset[lvis_cat_id]117 if synset not in synsets_to_keep:118 continue119 coco_cat_id = synset_to_coco_cat_id[synset]120 new_ann = copy.deepcopy(ann)121 new_ann["category_id"] = coco_cat_id122 new_ann["id"] = ann_id123 ann_id += 1124 new_annos.append(new_ann)125 coco_cat_id_with_instances[coco_cat_id] += 1126 cocofied_lvis["annotations"] = new_annos127 for image in cocofied_lvis["images"]:128 for key in ["not_exhaustive_category_ids", "neg_category_ids"]:129 new_category_list = []130 for lvis_cat_id in image[key]:131 synset = lvis_cat_id_to_synset[lvis_cat_id]132 if synset not in synsets_to_keep:133 continue134 coco_cat_id = synset_to_coco_cat_id[synset]135 new_category_list.append(coco_cat_id)136 coco_cat_id_with_instances[coco_cat_id] += 1137 image[key] = new_category_list138 coco_cat_id_with_instances = set(coco_cat_id_with_instances.keys())139 new_categories = []140 for cat in lvis_json["categories"]:141 synset = cat["synset"]142 if synset not in synsets_to_keep:143 continue144 coco_cat_id = synset_to_coco_cat_id[synset]145 if coco_cat_id not in coco_cat_id_with_instances:146 continue147 new_cat = copy.deepcopy(cat)148 new_cat["id"] = coco_cat_id149 new_categories.append(new_cat)150 cocofied_lvis["categories"] = new_categories151 with open(output_filename, "w") as f:152 json.dump(cocofied_lvis, f)153 print("{} is COCOfied and stored in {}.".format(input_filename, output_filename))154if __name__ == "__main__":155 dataset_dir = os.path.join(os.getenv("DETECTRON2_DATASETS", "datasets"), "lvis")156 for s in ["lvis_v0.5_train", "lvis_v0.5_val"]:157 print("Start COCOfing {}.".format(s))158 cocofy_lvis(159 os.path.join(dataset_dir, "{}.json".format(s)),160 os.path.join(dataset_dir, "{}_cocofied.json".format(s)),...
evaluation.py
Source:evaluation.py
1import numpy as np2import keras3from sklearn import metrics4from mlxtend.evaluate import confusion_matrix5from sklearn.metrics import roc_auc_score, precision_recall_curve, auc6# one is agitation and zero is not-agitation7# here positive is agitation and negative is not-agitation8def decision(y_pred, thresh=0.5):9 return np.where(y_pred[:,1] >= thresh, 1, 0)10 #return np.argmax(y_pred, axis=1)11# number of positive cases which have been predicted negative / number of positive cases12def false_rejection_rate(y_true, y_pred, thresh=0.5):13 pred = decision(y_pred, thresh)14 num_p = np.count_nonzero(y_true == 1.)15 FN = [i for i in range(0, len(pred)) if y_true[i] == 1. and pred[i] == 0.]16 num_f_n = len(FN)17 return num_f_n/num_p18# 1 - (number of negative cases which predicted negative / number of negative cases)19def false_acceptance_rate(y_true, y_pred, thresh=0.5):20 pred = decision(y_pred, thresh)21 num_n = np.count_nonzero(y_true == 0.)22 TN = [i for i in range(0, len(pred)) if pred[i] == 0. and y_true[i] == 0.]23 num_t_n = len(TN)24 return 1-(num_t_n/num_n)25def HalfTotalErrorRate(y_true, y_pred, thresh=0.5):26 alpha = 0.527 if y_true.ndim != 1:28 y_true = np.argmax(y_true, axis=1)29 far = false_acceptance_rate(y_true, y_pred, thresh)30 frr = false_rejection_rate(y_true, y_pred, thresh)31 return (alpha * far) + (alpha * frr)32def CategoricalTruePositive(y_true, y_pred, thresh=0.5):33 y_true = np.argmax(y_true, axis=1)34 y_pred = decision(y_pred, thresh)35 true_poss = np.logical_and(np.equal(y_true, 1), np.equal(y_pred, 1))36 true_poss = np.sum(true_poss)37 return true_poss38def CategoricalTrueNegative(y_true, y_pred, thresh=0.5):39 y_true = np.argmax(y_true, axis=1)40 y_pred = decision(y_pred, thresh)41 42 true_neg = np.logical_and(np.equal(y_true, 0), np.equal(y_pred, 0))43 true_neg = np.sum(true_neg)44 return true_neg45 46def CategoricalFalseNegative(y_true, y_pred, thresh=0.5):47 y_true = np.argmax(y_true, axis=1)48 y_pred = decision(y_pred, thresh)49 false_neg = np.logical_and(np.equal(y_true, 1), np.equal(y_pred, 0))50 false_neg = np.sum(false_neg)51 return false_neg52def CategoricalFalsePositive(y_true, y_pred, thresh=0.5):53 y_true = np.argmax(y_true, axis=1)54 y_pred = decision(y_pred, thresh)55 false_poss = np.logical_and(np.equal(y_true, 0), np.equal(y_pred, 1))56 false_poss = np.sum(false_poss)57 return false_poss58class CategoricalEvaluation(keras.callbacks.Callback):59 def __init__(self, model, validation_data, thresh=0.5):60 super().__init__()61 self.model = model62 self.y_true = validation_data[1]63 self.x_val = validation_data[0]64 self.thresh = thresh65 self.CTP = []66 self.CTN = []67 self.CFN = []68 self.CFP = []69 self.HTER = []70 self.recall = []71 self.specificity = []72 self.precision = []73 def on_epoch_end(self, epoch, logs={}):74 y_pred = self.model.predict(self.x_val)75 cat_true_positive = CategoricalTruePositive(self.y_true, y_pred, self.thresh)76 cat_true_negative = CategoricalTrueNegative(self.y_true, y_pred, self.thresh)77 cat_false_negative = CategoricalFalseNegative(self.y_true, y_pred, self.thresh)78 cat_false_positive = CategoricalFalsePositive(self.y_true, y_pred, self.thresh)79 half_total_error_rate = HalfTotalErrorRate(self.y_true, y_pred, self.thresh)80 cat_recall = cat_true_positive / (cat_true_positive + cat_false_negative)81 cat_spec = cat_true_negative / (cat_true_negative + cat_false_positive)82 cat_prec = cat_true_positive / (cat_true_positive + cat_false_positive)83 84 self.CTP.append(cat_true_positive)85 self.CTN.append(cat_true_negative)86 self.CFN.append(cat_false_negative)87 self.CFP.append(cat_false_positive)88 self.HTER.append(half_total_error_rate)89 self.recall.append(cat_recall)90 self.specificity.append(cat_spec)91 self.precision.append(cat_prec)92 93 print("True Positive:", cat_true_positive)94 print("True Negative:", cat_true_negative)95 print("False Negative:", cat_false_negative)96 print("False Positive:", cat_false_positive)97 print("Recall/Sensitivity:", cat_recall)98 print("Specificity:", cat_spec)99 print("Precision:", cat_prec)100 print("Half Total Error Rate:", half_total_error_rate)101def CategoricalAccuracy(thresh=0.5):102 def cat_accuracy(y_true, y_pred):103 y_pred = tf.cast(tf.greater_equal(y_pred[:, 1], thresh), tf.float32)104 y_true = tf.cast(tf.argmax(y_true, axis=1), tf.float32)105 return K.mean(K.equal(y_true, K.round(y_pred)))106 107 return cat_accuracy108def evaluation(y_true, y_pred):109 true_positive = CategoricalTruePositive(y_true, y_pred)110 true_negative = CategoricalTrueNegative(y_true, y_pred)111 false_negative = CategoricalFalseNegative(y_true, y_pred)112 false_positive = CategoricalFalsePositive(y_true, y_pred)113 recall = true_positive / (true_positive + false_negative)114 spec = true_negative / (true_negative + false_positive)115 prec = true_positive / (true_positive + false_positive)116 roc_auc = roc_auc_score(y_true, y_pred[:, 1])117 precision, rec, _ = precision_recall_curve(y_true, y_pred[:, 1])118 pr_rec = auc(rec, precision)119 f1 = 2 * (recall * prec) / (prec + recall)120 acc = (true_positive + true_negative)/(true_negative + true_positive + false_negative + false_positive)121 return {'TP': true_positive,122 'TN': true_negative,123 'FN': false_negative,124 'FP': false_positive,125 'Accuracy': acc,126 'Recall': recall,127 'Specificity': spec,128 'Precision': prec,129 'F1': f1,130 'AUC': roc_auc,131 'prec-rec': pr_rec132 }133def categorical_evaluation(model, x, true, thresh=0.5):134 y_pred = model.predict_proba(x)135 cat_true_positive = CategoricalTruePositive(true, y_pred, thresh)136 cat_true_negative = CategoricalTrueNegative(true, y_pred, thresh)137 cat_false_negative = CategoricalFalseNegative(true, y_pred, thresh)138 cat_false_positive = CategoricalFalsePositive(true, y_pred, thresh)139 half_total_error_rate = HalfTotalErrorRate(true, y_pred, thresh)140 cat_recall = cat_true_positive / (cat_true_positive + cat_false_negative)141 cat_spec = cat_true_negative / (cat_true_negative + cat_false_positive)142 cat_prec = cat_true_positive / (cat_true_positive + cat_false_positive)143 auc_roc = roc_auc_score(true, y_pred[:, 1])144 precision, recall, thresholds = precision_recall_curve(true, y_pred[:, 1])145 pr_rec = auc(recall, precision)146 acc = (cat_true_positive + cat_true_negative)/(cat_true_negative + cat_true_positive + cat_false_negative + cat_false_positive)147 f1 = 2 * (cat_recall * cat_prec)/(cat_recall + cat_prec)148 return {'TP': cat_true_positive,149 'TN': cat_true_negative,150 'FN': cat_false_negative,151 'FP': cat_false_positive,152 'Accuracy': acc,153 'Recall': cat_recall,154 'Specificity': cat_spec,155 'Precision': cat_prec,156 'F1': f1,157 'HTER': half_total_error_rate,158 'auc': auc_roc,159 'prec-rec': pr_rec...
category.py
Source:category.py
1# -*- coding: utf-8 -*-2from requests.auth import HTTPBasicAuth3from sqlalchemy.orm import sessionmaker4from data_updater.models import server as server_models5from config import CLIENT_SETTINGS6from .utils import request_data, timestamp_to_date_str, updated_request, timestamp_cur, print_json7from .cache import update_cache8url = CLIENT_SETTINGS['root_url'].rstrip('/') + '/' + CLIENT_SETTINGS['category_url'].lstrip('/')9category_problem_url = CLIENT_SETTINGS['category_problem_url'].rstrip('/')10username = CLIENT_SETTINGS['username']11password = CLIENT_SETTINGS['password']12auth = HTTPBasicAuth(username, password)13cache_name = 'category_updater_cache'14Session = sessionmaker(bind=server_models.engine)15session = Session()16def request_category_list(request_url):17 return request_data(url=request_url, auth=auth)18def request_category_problem_list(request_url):19 return request_data(url=request_url, auth=auth)20def write_category(cat_json):21 cat = session.query(server_models.Category).filter_by(id=cat_json['id']).first()22 if cat is None:23 cat_model = server_models.Category(id=cat_json['id'],24 title=cat_json['title'],25 introduction=cat_json['introduction'],26 source=cat_json['source'],27 author=cat_json['author'],28 number_problem=cat_json['number_problem'])29 session.add(cat_model)30 print('created cat %s: %s' % (cat_json['id'], cat_json['title']))31 else:32 cat.title = cat_json['title']33 cat.introduction = cat_json['introduction']34 cat.source = cat_json['source']35 cat.author = cat_json['author']36 cat.number_problem = cat_json['number_problem']37 print('updated env %s: %s' % (cat_json['id'], cat_json['title']))38 session.commit()39def write_category_problems(cat_id, cp_json):40 cp = session.query(41 server_models.CategoryProblemRelation).filter_by(id=cp_json['id']).first()42 problem = session.query(43 server_models.Problem).filter_by(id=cp_json['problem']['id']).first()44 if problem is None:45 if cp is not None:46 session.delete(cp)47 elif cp is None:48 cat_model = server_models.CategoryProblemRelation(id=cp_json['id'],49 category_id=cat_id,50 problem_id=cp_json['problem']['id'],51 directory=cp_json['directory'])52 session.add(cat_model)53 print('created cat_problem %s: %s' % (cp_json['id'], cp_json['problem']['title']))54 else:55 cp.category_id = cat_id,56 cp.problem_id = cp_json['problem']['id']57 cp.directory = cp_json['directory']58 print('updated cat_problem %s: %s' % (cp_json['id'], cp_json['problem']['title']))59 session.commit()60def update_category(cat_json):61 write_category(cat_json)62 cat_id = int(cat_json['id'])63 request_url = url.rstrip('/') + ('/%s/' % (cat_json['id'])) + category_problem_url + '/'64 while request_url is not None:65 chance_left = 366 cp_list = None67 while cp_list is None and chance_left > 0:68 print('requesting %s, tried %s ......' % (request_url, 3-chance_left))69 cp_list = request_category_problem_list(request_url)70 chance_left -= 171 if cp_list is None:72 request_url = None73 else:74 for cp in cp_list['results']:75 write_category_problems(cat_id, cp)76 request_url = cp_list['next']77def update_categories(update_all=False):78 time = timestamp_cur()79 request_url = url if update_all else updated_request(url, cache_name)80 while request_url is not None:81 chance_left = 382 cat_list = None83 while cat_list is None and chance_left > 0:84 print('requesting %s, tried %s ......' % (request_url, 3-chance_left))85 cat_list = request_category_list(request_url)86 chance_left -= 187 if cat_list is None:88 request_url = None89 else:90 for c in cat_list['results']:91 update_category(c)92 request_url = cat_list['next']...
test_dataset_wrapper.py
Source:test_dataset_wrapper.py
1import bisect2import math3from collections import defaultdict4from unittest.mock import MagicMock5import numpy as np6from mmdet.datasets import (ClassBalancedDataset, ConcatDataset, CustomDataset,7 RepeatDataset)8def test_dataset_wrapper():9 CustomDataset.load_annotations = MagicMock()10 CustomDataset.__getitem__ = MagicMock(side_effect=lambda idx: idx)11 dataset_a = CustomDataset(12 ann_file=MagicMock(), pipeline=[], test_mode=True, img_prefix='')13 len_a = 1014 cat_ids_list_a = [15 np.random.randint(0, 80, num).tolist()16 for num in np.random.randint(1, 20, len_a)17 ]18 dataset_a.data_infos = MagicMock()19 dataset_a.data_infos.__len__.return_value = len_a20 dataset_a.get_cat_ids = MagicMock(21 side_effect=lambda idx: cat_ids_list_a[idx])22 dataset_b = CustomDataset(23 ann_file=MagicMock(), pipeline=[], test_mode=True, img_prefix='')24 len_b = 2025 cat_ids_list_b = [26 np.random.randint(0, 80, num).tolist()27 for num in np.random.randint(1, 20, len_b)28 ]29 dataset_b.data_infos = MagicMock()30 dataset_b.data_infos.__len__.return_value = len_b31 dataset_b.get_cat_ids = MagicMock(32 side_effect=lambda idx: cat_ids_list_b[idx])33 concat_dataset = ConcatDataset([dataset_a, dataset_b])34 assert concat_dataset[5] == 535 assert concat_dataset[25] == 1536 assert concat_dataset.get_cat_ids(5) == cat_ids_list_a[5]37 assert concat_dataset.get_cat_ids(25) == cat_ids_list_b[15]38 assert len(concat_dataset) == len(dataset_a) + len(dataset_b)39 repeat_dataset = RepeatDataset(dataset_a, 10)40 assert repeat_dataset[5] == 541 assert repeat_dataset[15] == 542 assert repeat_dataset[27] == 743 assert repeat_dataset.get_cat_ids(5) == cat_ids_list_a[5]44 assert repeat_dataset.get_cat_ids(15) == cat_ids_list_a[5]45 assert repeat_dataset.get_cat_ids(27) == cat_ids_list_a[7]46 assert len(repeat_dataset) == 10 * len(dataset_a)47 category_freq = defaultdict(int)48 for cat_ids in cat_ids_list_a:49 cat_ids = set(cat_ids)50 for cat_id in cat_ids:51 category_freq[cat_id] += 152 for k, v in category_freq.items():53 category_freq[k] = v / len(cat_ids_list_a)54 mean_freq = np.mean(list(category_freq.values()))55 repeat_thr = mean_freq56 category_repeat = {57 cat_id: max(1.0, math.sqrt(repeat_thr / cat_freq))58 for cat_id, cat_freq in category_freq.items()59 }60 repeat_factors = []61 for cat_ids in cat_ids_list_a:62 cat_ids = set(cat_ids)63 repeat_factor = max({category_repeat[cat_id] for cat_id in cat_ids})64 repeat_factors.append(math.ceil(repeat_factor))65 repeat_factors_cumsum = np.cumsum(repeat_factors)66 repeat_factor_dataset = ClassBalancedDataset(dataset_a, repeat_thr)67 assert len(repeat_factor_dataset) == repeat_factors_cumsum[-1]68 for idx in np.random.randint(0, len(repeat_factor_dataset), 3):69 assert repeat_factor_dataset[idx] == bisect.bisect_right(...
coco_annotation.py
Source:coco_annotation.py
1import json2from collections import defaultdict3name_box_id = defaultdict(list)4id_name = dict()5f = open(6 "mscoco2017/annotations/instances_train2017.json",7 encoding='utf-8')8data = json.load(f)9annotations = data['annotations']10for ant in annotations:11 id = ant['image_id']12 name = 'mscoco2017/train2017/%012d.jpg' % id13 cat = ant['category_id']14 if cat >= 1 and cat <= 11:15 cat = cat - 116 elif cat >= 13 and cat <= 25:17 cat = cat - 218 elif cat >= 27 and cat <= 28:19 cat = cat - 320 elif cat >= 31 and cat <= 44:21 cat = cat - 522 elif cat >= 46 and cat <= 65:23 cat = cat - 624 elif cat == 67:25 cat = cat - 726 elif cat == 70:27 cat = cat - 928 elif cat >= 72 and cat <= 82:29 cat = cat - 1030 elif cat >= 84 and cat <= 90:31 cat = cat - 1132 name_box_id[name].append([ant['bbox'], cat])33f = open('train.txt', 'w')34for key in name_box_id.keys():35 f.write(key)36 box_infos = name_box_id[key]37 for info in box_infos:38 x_min = int(info[0][0])39 y_min = int(info[0][1])40 x_max = x_min + int(info[0][2])41 y_max = y_min + int(info[0][3])42 box_info = " %d,%d,%d,%d,%d" % (43 x_min, y_min, x_max, y_max, int(info[1]))44 f.write(box_info)45 f.write('\n')...
Using AI Code Generation
1const strykerParent = require('stryker-parent');2strykerParent.cat();3const strykerParent = require('stryker-parent');4strykerParent.dog();5const strykerParent = require('stryker-parent');6strykerParent.cat();7const strykerParent = require('stryker-parent');8strykerParent.dog();9const strykerParent = require('stryker-parent');10strykerParent.cat();11const strykerParent = require('stryker-parent');12strykerParent.dog();13const strykerParent = require('stryker-parent');14strykerParent.cat();15const strykerParent = require('stryker-parent');16strykerParent.dog();17const strykerParent = require('stryker-parent');18strykerParent.cat();19const strykerParent = require('stryker-parent');20strykerParent.dog();21const strykerParent = require('stryker-parent');22strykerParent.cat();23const strykerParent = require('stryker-parent');24strykerParent.dog();25const strykerParent = require('stryker-parent');26strykerParent.cat();27const strykerParent = require('stryker-parent
Using AI Code Generation
1var strykerParent = require('stryker-parent');2var stryker = new strykerParent();3stryker.cat();4var strykerChild = require('stryker-child');5var stryker = new strykerChild();6stryker.cat();7var strykerChild2 = require('stryker-child2');8var stryker = new strykerChild2();9stryker.cat();10var strykerChild3 = require('stryker-child3');11var stryker = new strykerChild3();12stryker.cat();13var strykerChild4 = require('stryker-child4');14var stryker = new strykerChild4();15stryker.cat();16var strykerChild5 = require('stryker-child5');17var stryker = new strykerChild5();18stryker.cat();19var strykerChild6 = require('stryker-child6');20var stryker = new strykerChild6();21stryker.cat();22var strykerChild7 = require('stryker-child7');23var stryker = new strykerChild7();24stryker.cat();25var strykerChild8 = require('stryker-child8');26var stryker = new strykerChild8();27stryker.cat();28var strykerChild9 = require('stryker-child9');29var stryker = new strykerChild9();30stryker.cat();31var strykerChild10 = require('stryker-child10');
Using AI Code Generation
1var strykerParent = require('stryker-parent');2console.log(strykerParent.cat('Hello', 'World'));3var strykerParent = require('stryker-parent');4console.log(strykerParent.cat('Hello', 'World'));5import strykerParent = require('stryker-parent');6console.log(strykerParent.cat('Hello', 'World'));
Using AI Code Generation
1var cat = require('stryker-parent').cat;2console.log('The cat says: ' + cat());3var cat = require('stryker-parent').cat;4console.log('The cat says: ' + cat());5var cat = require('stryker-parent').cat;6console.log('The cat says: ' + cat());7var cat = require('stryker-parent').cat;8console.log('The cat says: ' + cat());9var cat = require('stryker-parent').cat;10console.log('The cat says: ' + cat());11var cat = require('stryker-parent').cat;12console.log('The cat says: ' + cat());13var cat = require('stryker-parent').cat;14console.log('The cat says: ' + cat());15var cat = require('stryker-parent').cat;16console.log('The cat says: ' + cat());17var cat = require('stryker-parent').cat;18console.log('The cat says: ' + cat());19var cat = require('stryker-parent').cat;20console.log('The cat says: ' + cat());21var cat = require('stryker-parent').cat;22console.log('The cat says: ' + cat());23var cat = require('stryker-parent').cat;24console.log('The cat says: ' + cat());25var cat = require('stryker-parent').cat;26console.log('The cat says: '
Using AI Code Generation
1var stryker = require('stryker-parent');2var strykerCat = new stryker.Cat();3strykerCat.meow();4var stryker = require('stryker-parent');5var strykerCat = new stryker.Cat();6strykerCat.meow();7var stryker = require('stryker-parent');8var strykerCat = new stryker.Cat();9strykerCat.meow();10var stryker = require('stryker-parent');11var strykerCat = new stryker.Cat();12strykerCat.meow();13var stryker = require('stryker-parent');14var strykerCat = new stryker.Cat();15strykerCat.meow();16var stryker = require('stryker-parent');17var strykerCat = new stryker.Cat();18strykerCat.meow();19var stryker = require('stryker-parent');20var strykerCat = new stryker.Cat();21strykerCat.meow();22var stryker = require('stryker-parent');23var strykerCat = new stryker.Cat();24strykerCat.meow();25var stryker = require('stryker-parent');26var strykerCat = new stryker.Cat();27strykerCat.meow();28var stryker = require('stryker-parent');29var strykerCat = new stryker.Cat();30strykerCat.meow();31var stryker = require('stryker-parent');
Using AI Code Generation
1var strykerParent = require('stryker-parent');2var cat = strykerParent.cat;3var cat = strykerParent.cat;4cat('meow');5var strykerParent = require('stryker-parent');6var cat = strykerParent.cat;7var cat = strykerParent.cat;8cat('meow');9var strykerParent = require('stryker-parent');10var cat = strykerParent.cat;11var cat = strykerParent.cat;12cat('meow');13var strykerParent = require('stryker-parent');14var cat = strykerParent.cat;15var cat = strykerParent.cat;16cat('meow');17var strykerParent = require('stryker-parent');18var cat = strykerParent.cat;19var cat = strykerParent.cat;20cat('meow');21var strykerParent = require('stryker-parent');22var cat = strykerParent.cat;23var cat = strykerParent.cat;24cat('meow');25var strykerParent = require('stryker-parent');26var cat = strykerParent.cat;27var cat = strykerParent.cat;28cat('meow');29var strykerParent = require('stryker-parent');30var cat = strykerParent.cat;31var cat = strykerParent.cat;32cat('meow');33var strykerParent = require('stryker-parent');34var cat = strykerParent.cat;35var cat = strykerParent.cat;36cat('meow');37var strykerParent = require('stryker-parent');38var cat = strykerParent.cat;
Using AI Code Generation
1const strykerParent = require('stryker-parent');2console.log(strykerParent.cat());3{4 "dependencies": {5 }6}7const strykerParent = require('stryker-parent');8console.log(strykerParent.cat());9const strykerParent = require('stryker-parent');10console.log(strykerParent.cat());11{12 "dependencies": {13 }14}15const strykerParent = require('stryker-parent');16console.log(strykerParent.cat());17const strykerParent = require('stryker-parent');18console.log(strykerParent.cat());19{20 "dependencies": {21 }22}23const strykerParent = require('stryker-parent');24console.log(strykerParent.cat());25const strykerParent = require('stryker-parent');26console.log(strykerParent.cat());27{
Using AI Code Generation
1var parent = require('stryker-parent');2var str = 'foo';3var parent = require('stryker-parent');4var str = 'foo';5var parent = require('stryker-parent');6var str = 'foo';7var parent = require('stryker-parent');8var str = 'foo';9var parent = require('stryker-parent');10var str = 'foo';11var parent = require('stryker-parent');12var str = 'foo';13var parent = require('stryker-parent');14var str = 'foo';15var parent = require('stryker-parent');16var str = 'foo';17var parent = require('stryker-parent');18var str = 'foo';19var parent = require('stryker-parent');20var str = 'foo';21var parent = require('stryker-parent');22var str = 'foo';
Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!