How to use _is_supported method in lisa

Best Python code snippet using lisa_python

test_storage.py

Source:test_storage.py Github

copy

Full Screen

...86 # TODO: Add pycosio public functions tests87 # Only if mocked88 if self._storage_mock is not None:89 self._test_mock_only()90 def _is_supported(self, feature):91 """92 Return True if a feature is supported.93 Args:94 feature (str): Feature to support.95 Returns:96 bool: Feature is supported.97 """98 return feature not in self._unsupported_operations99 @staticmethod100 def _get_id():101 """102 Return an unique ID.103 Returns:104 str: id105 """106 return 'pycosio%s' % (str(_uuid()).replace('-', ''))107 def _test_raw_io(self):108 """109 Tests raw IO.110 """111 from os import SEEK_END, SEEK_CUR112 size = 100113 file_name = 'raw_file0.dat'114 file_path = self.base_dir_path + file_name115 self._to_clean(file_path)116 content = _urandom(size)117 # Open file in write mode118 assert not self._system.exists(file_path), 'Raw write, file not exists'119 if self._is_supported('write'):120 with self._raw_io(file_path, 'wb',121 **self._system_parameters) as file:122 assert self._system.getsize(file_path) == 0, \123 'Raw write, file must exist but empty'124 # Get file file type for later125 is_seekable = file.seekable()126 try:127 max_flush_size = file.MAX_FLUSH_SIZE128 except AttributeError:129 max_flush_size = 0130 # Test: Write blocs of data131 assert file.write(content[:10]) == 10, \132 'Raw write, written size match'133 if is_seekable:134 assert file.write(b'\0' * 10) == 10, \135 'Raw write, written size match'136 else:137 assert file.write(content[10:20]) == 10, \138 'Raw write, written size match'139 assert file.write(content[20:]) == 80, \140 'Raw write, written size match'141 # Test: tell142 if is_seekable:143 assert file.tell() == size,\144 'Raw write, tell match writen size'145 # Test write seek back and write146 assert file.seek(10) == 10, \147 'Raw write, seek position match'148 assert file.write(content[10:20]) == 10, \149 'Raw write, written size match'150 assert file.tell() == 20,\151 'Raw write, tell match ending positon'152 else:153 # Test not seekable raises Unsupported exception154 with _pytest.raises(_UnsupportedOperation):155 file.tell()156 with _pytest.raises(_UnsupportedOperation):157 file.seek(0)158 # Test: read in write mode is not supported159 with _pytest.raises(_UnsupportedOperation):160 file.read()161 with _pytest.raises(_UnsupportedOperation):162 file.readinto(bytearray(100))163 else:164 is_seekable = False165 max_flush_size = 0166 # Test: Unsupported167 with _pytest.raises(_UnsupportedOperation):168 self._raw_io(file_path, 'wb', **self._system_parameters)169 # Create pre-existing file170 if self._storage_mock:171 self._storage_mock.put_object(172 self.locator, self.base_dir_name + file_name, content)173 # Open file in read mode174 with self._raw_io(file_path, **self._system_parameters) as file:175 # Test: read_all176 assert file.readall() == content, 'Raw read all, content match'177 assert file.tell() == size, 'Raw read all, tell match'178 # Test: seek and read_all179 assert file.seek(10) == 10, 'Raw seek 10 & read all, seek match'180 assert file.readall() == content[10:],\181 'Raw seek 10 & read all, content match'182 assert file.tell() == size,\183 'Raw seek 10 & read all, tell match'184 # Test: seek from current position & read_all185 assert file.seek(-50, SEEK_CUR) == 50, \186 'Raw seek from current & read all, seek match'187 assert file.readall() == content[-50:],\188 'Raw seek from current & read all, content match'189 assert file.tell() == size,\190 'Raw seek from current & read all, tell match'191 # Test: seek with bad whence value192 with _pytest.raises(ValueError):193 file.seek(0, 10)194 # Test: Cannot write in read mode195 with _pytest.raises(_UnsupportedOperation):196 file.write(b'0')197 # Test: Flush has no effect in read mode198 file.flush()199 # Test: _read_range200 assert file.seek(0) == 0, 'Raw seek 0, seek match'201 buffer = bytearray(40)202 assert file.readinto(buffer) == 40,\203 'Raw read into, returned size match'204 assert bytes(buffer) == content[:40], 'Raw read into, content match'205 assert file.tell() == 40, 'Raw read into, tell match'206 buffer = bytearray(40)207 assert file.readinto(buffer) == 40,\208 'Raw read into from 40, returned size match'209 assert bytes(buffer) == content[40:80],\210 'Raw read into from 40, content match'211 assert file.tell() == 80, 'Raw read into from 40, tell match'212 buffer = bytearray(40)213 assert file.readinto(buffer) == 20,\214 'Raw read into partially over EOF, returned size match'215 assert bytes(buffer) == content[80:] + b'\0' * 20,\216 'Raw read into partially over EOF, content match'217 assert file.tell() == size,\218 'Raw read into partially over EOF, tell match'219 buffer = bytearray(40)220 assert file.readinto(buffer) == 0,\221 'Raw read into over EOF, returned size match'222 assert bytes(buffer) == b'\0' * 40,\223 'Raw read into over EOF, content match'224 assert file.tell() == size,\225 'Raw read into over EOF, tell match'226 file.seek(-10, SEEK_END)227 buffer = bytearray(20)228 assert file.readinto(buffer) == 10,\229 'Raw seek from end & read into, returned size match'230 assert bytes(buffer) == content[90:] + b'\0' * 10,\231 'Raw seek from end & read into, content match'232 assert file.tell() == size,\233 'Raw seek from end & read into, tell match'234 # Test: Append mode235 if self._is_supported('write'):236 # Test: Appending on existing file237 with self._raw_io(file_path, mode='ab',238 **self._system_parameters) as file:239 file.write(content)240 with self._raw_io(file_path, **self._system_parameters) as file:241 assert file.readall() == content + content,\242 'Raw append, previous content read match'243 # Test: Appending on not existing file244 file_name = 'raw_file1.dat'245 file_path = self.base_dir_path + file_name246 self._to_clean(file_path)247 with self._raw_io(file_path, mode='ab',248 **self._system_parameters) as file:249 file.write(content)250 with self._raw_io(file_path, **self._system_parameters) as file:251 assert file.readall() == content,\252 'Raw append, file create content match'253 # Test: Seek out of file and write254 if is_seekable:255 file_name = 'raw_file2.dat'256 file_path = self.base_dir_path + file_name257 self._to_clean(file_path)258 with self._raw_io(file_path, 'wb',259 **self._system_parameters) as file:260 file.seek(256)261 file.write(b'\x01')262 with self._raw_io(file_path, 'rb',263 **self._system_parameters) as file:264 assert file.readall() == b'\0' * 256 + b'\x01',\265 'Raw seek, null padding read'266 # Test: write big file267 if self._is_supported('write') and max_flush_size:268 file_name = 'raw_file3.dat'269 file_path = self.base_dir_path + file_name270 self._to_clean(file_path)271 size = max_flush_size * 4272 content = _urandom(size)273 with self._raw_io(file_path, 'wb',274 **self._system_parameters) as file:275 file.write(content)276 with self._raw_io(file_path, 'rb',277 **self._system_parameters) as file:278 assert file.readall() == content,\279 'Raw Write big file, content match'280 # Test exclusive write mode281 if self._is_supported('write'):282 file_name = 'raw_file4.dat'283 file_path = self.base_dir_path + file_name284 self._to_clean(file_path)285 # Create file286 with self._raw_io(file_path, 'xb', **self._system_parameters):287 pass288 # File already exists289 with _pytest.raises(FileExistsError):290 self._raw_io(file_path, 'xb', **self._system_parameters)291 def _test_buffered_io(self):292 """293 Tests buffered IO.294 """295 from pycosio.io import ObjectBufferedIOBase296 # Set buffer size297 buffer_size = 16 * 1024298 # Test: write data, not multiple of buffer299 file_name = 'buffered_file0.dat'300 file_path = self.base_dir_path + file_name301 self._to_clean(file_path)302 size = int(4.5 * buffer_size)303 content = _urandom(size)304 if self._is_supported('write'):305 with self._buffered_io(file_path, 'wb', buffer_size=buffer_size,306 **self._system_parameters) as file:307 file.write(content)308 else:309 # Test: Unsupported310 with _pytest.raises(_UnsupportedOperation):311 self._buffered_io(file_path, 'wb', buffer_size=buffer_size,312 **self._system_parameters)313 # Create pre-existing file314 if self._storage_mock:315 self._storage_mock.put_object(316 self.locator, self.base_dir_name + file_name, content)317 # Test: Read data, not multiple of buffer318 with self._buffered_io(file_path, 'rb', buffer_size=buffer_size,319 **self._system_parameters) as file:320 assert content == file.read(),\321 'Buffered read, not multiple of buffer size'322 # Test: write data, multiple of buffer323 file_name = 'buffered_file1.dat'324 file_path = self.base_dir_path + file_name325 self._to_clean(file_path)326 size = int(5 * buffer_size)327 content = _urandom(size)328 if self._is_supported('write'):329 with self._buffered_io(file_path, 'wb', buffer_size=buffer_size,330 **self._system_parameters) as file:331 file.write(content)332 # Test: Flush manually333 file.flush()334 # Test: read in write mode is not supported335 with _pytest.raises(_UnsupportedOperation):336 file.read()337 with _pytest.raises(_UnsupportedOperation):338 file.read1()339 with _pytest.raises(_UnsupportedOperation):340 file.readinto(bytearray(100))341 with _pytest.raises(_UnsupportedOperation):342 file.readinto1(bytearray(100))343 with _pytest.raises(_UnsupportedOperation):344 file.peek()345 # Test: Unsupported if not seekable346 if not file.seekable():347 with _pytest.raises(_UnsupportedOperation):348 file.tell()349 with _pytest.raises(_UnsupportedOperation):350 file.seek(0)351 else:352 # Create pre-existing file353 if self._storage_mock:354 self._storage_mock.put_object(355 self.locator, self.base_dir_name + file_name, content)356 # Test: Read data, multiple of buffer357 with self._buffered_io(file_path, 'rb', buffer_size=buffer_size,358 **self._system_parameters) as file:359 # Test full data read360 assert content == file.read(),\361 'Buffered read, multiple of buffer size'362 # Test: seek363 assert file.seek(10) == 10, 'Buffered read, seek'364 assert file.tell() == 10, 'Buffered read, tell match seek'365 # Test: peek:366 assert content[10:110] == file.peek(100), \367 'Buffered read, peek content match'368 assert file.tell() == 10, \369 'Buffered read, peek tell match'370 # Test: Cannot write in read mode371 with _pytest.raises(_UnsupportedOperation):372 file.write(b'0')373 # Test: Flush has no effect in read mode374 file.flush()375 # Check if pycosio subclass376 is_pycosio_subclass = isinstance(file, ObjectBufferedIOBase)377 # Test: Buffer limits and default values378 if is_pycosio_subclass:379 with self._buffered_io(380 file_path, **self._system_parameters) as file:381 assert file._buffer_size == file.DEFAULT_BUFFER_SIZE, \382 'Buffered, Default buffer size'383 # Get limits values384 minimum_buffer_size = file.MINIMUM_BUFFER_SIZE385 maximum_buffer_size = file.MAXIMUM_BUFFER_SIZE386 # Get current max buffers387 calculated_max_buffers = file._max_buffers388 # Test: Minimum buffer size389 if minimum_buffer_size > 1:390 with self._buffered_io(391 file_path, buffer_size=minimum_buffer_size // 2,392 **self._system_parameters) as file:393 assert file._buffer_size == minimum_buffer_size, \394 'Buffered, Minimum buffer size'395 # Test: Maximum buffer size396 if maximum_buffer_size:397 with self._buffered_io(398 file_path, buffer_size=maximum_buffer_size * 2,399 **self._system_parameters) as file:400 assert file._buffer_size == maximum_buffer_size, \401 'Buffered, Maximum buffer size'402 # Test: Maximum buffer count403 assert calculated_max_buffers, \404 'Buffered, calculated buffer count not 0'405 max_buffers = calculated_max_buffers * 2406 with self._buffered_io(407 file_path, mode='rb', max_buffers=max_buffers,408 **self._system_parameters) as file:409 assert file._max_buffers == max_buffers, \410 'Buffered, Maximum buffer count'411 def _test_system_locator(self):412 """413 Test system internals related to locators.414 """415 system = self._system416 # Test: Create locator417 if self._is_supported('mkdir'):418 system.make_dir(self.locator_url)419 self._to_clean(self.locator)420 else:421 # Test: Unsupported422 with _pytest.raises(_UnsupportedOperation):423 system.make_dir(self.locator_url)424 # Create a preexisting locator425 if self._storage_mock:426 self._storage_mock.put_locator(self.locator)427 # Test: Check locator listed428 if self._is_supported('listdir'):429 for name, header in system._list_locators():430 if name == self.locator and isinstance(header, dict):431 break432 else:433 _pytest.fail('Locator "%s" not found' % self.locator)434 # Test: Check locator header return a mapping435 assert hasattr(system.head(path=self.locator), '__getitem__'), \436 'List locators, header is mapping'437 else:438 # Test: Unsupported439 with _pytest.raises(_UnsupportedOperation):440 system._list_locators()441 # Test: remove locator442 tmp_locator = self._get_id()443 self._to_clean(tmp_locator)444 if self._is_supported('mkdir'):445 system.make_dir(tmp_locator)446 elif self._storage_mock:447 self._storage_mock.put_locator(tmp_locator)448 if self._is_supported('remove'):449 if self._is_supported('listdir'):450 assert tmp_locator in [451 name for name, _ in system._list_locators()],\452 'Remove locator, locator exists'453 system.remove(tmp_locator)454 if self._is_supported('listdir'):455 assert tmp_locator not in [456 name for name, _ in system._list_locators()],\457 'Remove locator, locator not exists'458 else:459 # Test: Unsupported460 with _pytest.raises(_UnsupportedOperation):461 system.remove(tmp_locator)462 def _test_system_objects(self):463 """464 Test system internals related to objects.465 """466 from pycosio._core.exceptions import ObjectNotFoundError467 system = self._system468 if self._is_supported('mkdir'):469 # Create parent directory470 system.make_dir(self.base_dir_path)471 self._to_clean(self.base_dir_path)472 # Test: Make a directory (With trailing /)473 dir_name0 = 'directory0/'474 dir_path0 = self.base_dir_path + dir_name0475 system.make_dir(dir_path0)476 self._to_clean(dir_path0)477 if self._is_supported('listdir'):478 assert dir_path0 in self._list_objects_names(), \479 'Create directory, exists (with "/")'480 # Test: Check directory header481 assert hasattr(system.head(path=dir_path0), '__getitem__'), \482 'Head directory, header is mapping'483 # Test: Make a directory (Without trailing /)484 dir_name1 = 'directory1'485 dir_path1 = self.base_dir_path + dir_name1486 system.make_dir(dir_path1)487 dir_path1 += '/'488 self._to_clean(dir_path1)489 if self._is_supported('listdir'):490 assert dir_path1 in self._list_objects_names(), \491 'Create directory, exists (without "/")'492 # Test: Listing empty directory493 assert len(tuple(system.list_objects(dir_path0))) == 0, \494 'List objects, empty directory'495 # Write a sample file496 file_name = 'sample_1K.dat'497 file_path = self.base_dir_path + file_name498 self._to_clean(file_path)499 file_url = self.base_dir_url + file_name500 size = 1024501 content = _urandom(size)502 if self._is_supported('write'):503 with self._raw_io(file_path, mode='w',504 **self._system_parameters) as file:505 # Write content506 file.write(content)507 elif self._storage_mock:508 # Create pre-existing file509 self._storage_mock.put_object(510 self.locator, self.base_dir_name + file_name, content)511 # Estimate creation time512 create_time = _time()513 # Test: Check file header514 assert hasattr(system.head(path=file_path), '__getitem__'), \515 'Head file, header is mapping'516 # Test: Check file size517 try:518 assert system.getsize(file_path) == size, \519 'Head file, size match'520 except _UnsupportedOperation:521 # May not be supported on all files, if supported522 if self._is_supported('getsize'):523 raise524 # Test: Check file modification time525 try:526 file_time = system.getmtime(file_path)527 if self._is_supported('write'):528 assert file_time == _pytest.approx(create_time, 2), \529 'Head file, modification time match'530 except _UnsupportedOperation:531 # May not be supported on all files, if supported532 if self._is_supported('getmtime'):533 raise534 # Test: Check file creation time535 try:536 file_time = system.getctime(file_path)537 if self._is_supported('write'):538 assert file_time == _pytest.approx(create_time, 2), \539 'Head file, creation time match'540 except _UnsupportedOperation:541 # May not be supported on all files, if supported542 if self._is_supported('getctime'):543 raise544 # Test: Check path and URL handling545 with self._raw_io(file_path, **self._system_parameters) as file:546 assert file.name == file_path, 'Open file, path match'547 with self._raw_io(file_url, **self._system_parameters) as file:548 assert file.name == file_url, 'Open file, URL match'549 # Write some files550 files = set()551 files.add(file_path)552 for i in range(11):553 if i < 10:554 # Files in directory555 file_name = 'file%d.dat' % i556 path = self.base_dir_path + file_name557 rel_path = self.base_dir_name + file_name558 else:559 # File in locator560 rel_path = self._get_id() + '.dat'561 path = '%s/%s' % (self.locator, rel_path)562 files.add(path)563 self._to_clean(path)564 if self._is_supported('write'):565 with self._raw_io(566 path, mode='w', **self._system_parameters) as file:567 file.flush()568 elif self._storage_mock:569 # Create pre-existing file570 self._storage_mock.put_object(self.locator, rel_path, b'')571 # Test: List objects572 if self._is_supported('listdir'):573 objects = tuple(system.list_objects(self.locator))574 objects_list = set(575 '%s/%s' % (self.locator, name) for name, _ in objects)576 for file in files:577 assert file in objects_list, 'List objects, file name match'578 for _, header in objects:579 assert hasattr(header, '__getitem__'),\580 'List objects, file header is mapping'581 # Test: List objects, with limited output582 max_request_entries = 5583 entries = len(tuple(system.list_objects(584 max_request_entries=max_request_entries)))585 assert entries == max_request_entries, \586 'List objects, Number of entries match'587 # Test: List objects, no objects found588 with _pytest.raises(ObjectNotFoundError):589 list(system.list_objects(590 self.base_dir_path + 'dir_not_exists/'))591 # Test: List objects on locator root, no objects found592 with _pytest.raises(ObjectNotFoundError):593 list(system.list_objects(self.locator + '/dir_not_exists/'))594 # Test: List objects, locator not found595 with _pytest.raises(ObjectNotFoundError):596 list(system.list_objects(self._get_id()))597 else:598 # Test: Unsupported599 with _pytest.raises(_UnsupportedOperation):600 list(system.list_objects(self.base_dir_path))601 # Test: copy602 copy_path = file_path + '.copy'603 self._to_clean(copy_path)604 if self._is_supported('copy'):605 system.copy(file_path, copy_path)606 assert system.getsize(copy_path) == size, 'Copy file, size match'607 else:608 # Test: Unsupported609 with _pytest.raises(_UnsupportedOperation):610 system.copy(file_path, copy_path)611 # Test: Normal file is not symlink612 assert not system.islink(file_path), 'Symlink, file is not symlink'613 # Test: Symlink614 if self._is_supported('symlink'):615 link_path = self.base_dir_path + 'symlink'616 # TODO: Tests once create symlink implemented617 # Test: Is symlink618 #assert system.islink(link_path)619 #assert system.islink(header=system.head(link_path)620 # Test: Remove file621 if self._is_supported('remove'):622 if self._is_supported('listdir'):623 assert file_path in self._list_objects_names(), \624 'Remove file, file exists'625 system.remove(file_path)626 if self._is_supported('listdir'):627 assert file_path not in self._list_objects_names(), \628 'Remove file, file not exists'629 else:630 # Test: Unsupported631 with _pytest.raises(_UnsupportedOperation):632 system.remove(file_path)633 def _test_mock_only(self):634 """635 Tests that can only be performed on mocks636 """637 file_name = 'mocked.dat'638 # Create a file639 file_path = self.base_dir_path + file_name640 self._to_clean(file_path)641 content = _urandom(20)642 if self._is_supported('write'):643 with self._raw_io(644 file_path, mode='w', **self._system_parameters) as file:645 file.write(content)646 file.flush()647 elif self._storage_mock:648 # Create pre-existing file649 self._storage_mock.put_object(650 self.locator, self.base_dir_name + file_name, content)651 # Test: Read not block other exceptions652 with self._storage_mock.raise_server_error():653 with _pytest.raises(OSError):654 self._raw_io(file_path, **self._system_parameters).read(10)655 def _list_objects_names(self):656 """...

Full Screen

Full Screen

test_policy_templates_plugin.py

Source:test_policy_templates_plugin.py Github

copy

Full Screen

...16 def test_plugin_must_be_instance_of_base_plugin_class(self):17 self.assertTrue(isinstance(self.plugin, BasePlugin))18 def test_must_only_support_function_resource(self):19 function_type = "AWS::Serverless::Function"20 self.assertTrue(self.plugin._is_supported(function_type))21 def test_must_not_support_non_function_resources(self):22 resource_type = "AWS::Serverless::Api"23 self.assertFalse(self.plugin._is_supported(resource_type))24 @patch("samtranslator.plugins.policies.policy_templates_plugin.FunctionPolicies")25 def test_on_before_transform_resource_must_work_on_every_policy_template(self, function_policies_class_mock):26 is_supported_mock = Mock()27 self.plugin._is_supported = is_supported_mock28 is_supported_mock.return_value = True29 function_policies_obj_mock = MagicMock()30 function_policies_class_mock.return_value = function_policies_obj_mock31 function_policies_class_mock.POLICIES_PROPERTY_NAME = "Policies"32 template1 = {33 "MyTemplate1": {34 "Param1": "value1"35 }36 }37 template2 = {...

Full Screen

Full Screen

piglit-wrapper

Source:piglit-wrapper Github

copy

Full Screen

1#!/usr/bin/python32# This file is part of Checkbox.3#4# Copyright 2015 Canonical Ltd.5# Written by:6# Zygmunt Krynicki <zygmunt.krynicki@canonical.com>7#8# Checkbox is free software: you can redistribute it and/or modify9# it under the terms of the GNU General Public License version 3,10# as published by the Free Software Foundation.11#12# Checkbox is distributed in the hope that it will be useful,13# but WITHOUT ANY WARRANTY; without even the implied warranty of14# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the15# GNU General Public License for more details.16#17# You should have received a copy of the GNU General Public License18# along with Checkbox. If not, see <http://www.gnu.org/licenses/>.19"""20Tool for running piglit tests.21This script is designed to wrap ``piglit run`` to provide consistent outcome22and testing experience for plainbox.23.. warning::24 This script was written to work with ``piglit-0~git20150312-530724b-1`` and25 may not work with older or more recent version.26"""27import argparse28import collections29import gettext30import json31import logging32import os33import subprocess34import sys35import tempfile36_ = gettext.gettext37_logger = logging.getLogger("piglit-wrapper")38class PiglitWrapper:39 """ Wrapper around the piglit tool. """40 OUTCOME_CRASH = 'crash'41 OUTCOME_FAIL = 'fail'42 OUTCOME_PASS = 'pass'43 OUTCOME_SKIP = 'skip'44 def __init__(self, tests):45 """46 Initialize the wrapper around piglit.47 :param tests:48 List of patterns to match tests against.49 Only tests matching any of the patterns will be started.50 """51 self._tests = tests52 self._is_supported = None # Optional[bool]53 self._outcome_stats = collections.defaultdict(int) # Dict[str, int]54 # Dict[str, List[str]]55 self._outcome_list = collections.defaultdict(list)56 self._test_count = None # Optional[int]57 @property58 def is_supported(self):59 """ Flag indicating if this version if piglit is supported. """60 return bool(self._is_supported)61 @property62 def outcome_stats(self):63 """ Mapping from test outcome to number of occurrences. """64 return self._outcome_stats65 @property66 def outcome_list(self):67 """ Mapping from test outcome to a list of tests. """68 return self._outcome_list69 @property70 def test_count(self):71 """ Total number of tests. """72 return self._test_count73 @property74 def is_successful(self):75 """ Flag indicating if the run was successful. """76 return (self._outcome_stats[self.OUTCOME_FAIL] == 0 and77 self._outcome_stats[self.OUTCOME_CRASH] == 0)78 def run(self, dirname=None):79 """ Run piglit and all the desired tests. """80 if dirname is not None:81 self._run_in_directory(dirname)82 else:83 with tempfile.TemporaryDirectory() as tmpdir:84 _logger.info(_("Created temporary directory: %s"), tmpdir)85 self._run_in_directory(tmpdir)86 def _run_in_directory(self, dirname):87 """ Run piglit and all the desired tests in a specific directory. """88 cmd = [89 # Run piglit90 "piglit", "run",91 # Using the json backend that we understand92 "--backend=json"]93 for test in self._tests:94 # Include tests that we've been asked to run.95 cmd.extend(["-t", test])96 # Out of all the tests in general.97 cmd.append("all")98 # Save results to a hard-coded file in this directory99 cmd.append(dirname)100 _logger.info(_("Starting program: %r"), cmd)101 subprocess.call(102 # redirect stdout to /dev/null as we don't care about the103 # spinner that piglit prints104 cmd, stdout=subprocess.DEVNULL)105 # NOTE: the "results.json" filename is hard-coded into piglit106 result_filename = os.path.join(dirname, "results.json")107 self._analyze_results(result_filename)108 def _analyze_results(self, result_filename):109 """ Analyze raw piglit json data. """110 if not os.path.isfile(result_filename):111 self._is_supported = False112 _logger.errr(_("Piglit didn't create the test result file?"))113 return114 _logger.info(_("Analyzing piglit test results from %s"),115 result_filename)116 with open(result_filename, 'rt', encoding='UTF-8') as stream:117 result_json = json.load(stream)118 version = result_json.get('results_version')119 if version == 4:120 self._is_supported = True121 self._analyze_v4(result_json)122 else:123 self._is_supported = False124 _logger.errr(_("Unsupported piglit result format (%r)"), version)125 def _analyze_v4(self, result_json):126 """ Analyze raw piglit json data (format 4). """127 _logger.info(_("Analyzing piglit test results (format 4)"))128 self._test_count = len(result_json['tests'])129 for test_id, test_result in result_json['tests'].items():130 outcome = test_result['result']131 self._outcome_stats[outcome] += 1132 self._outcome_list[outcome].append(test_id)133def main():134 """ Main function. """135 gettext.textdomain('plainbox-provider-piglit')136 gettext.bindtextdomain('plainbox-provider-piglit',137 os.getenv('PLAINBOX_PROVIDER_LOCALE_DIR'))138 parser = argparse.ArgumentParser(139 description=_("Tool for running piglit tests"))140 parser.add_argument(141 '-d', '--dirname', metavar=_("DIR"), default=None,142 help=_("save piglit results to DIR"))143 parser.add_argument(144 "--test", "-t", metavar=_("PATTERN"), required=True, action='append',145 help=_("run piglit tests matching given PATTERN"))146 parser.add_argument(147 "--verbose", "-v",148 action='store_true',149 help=_("be more verbose during testing"))150 ns = parser.parse_args()151 logging.basicConfig(152 level=logging.INFO if ns.verbose else logging.WARNING,153 format="{name}:{levelname}: {message}", style='{')154 piglit = PiglitWrapper(ns.test)155 piglit.run(ns.dirname)156 if not piglit.is_supported:157 print(_("This version of piglit is not supported"))158 return 2159 stats = piglit.outcome_stats160 print(_("Summary of results (by outcome)"))161 for outcome in sorted(stats.keys()):162 print(" - {}: {}".format(outcome, stats[outcome]))163 if ns.verbose:164 for test_id in sorted(piglit.outcome_list[outcome]):165 print(" * {}".format(test_id))166 if piglit.is_successful:167 print(_("Tests successful"))168 return 0169 else:170 print(_("Tests unsuccessful"))171 return 1172if __name__ == "__main__":...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run lisa automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful