How to use filter_suites method in Lemoncheesecake

Best Python code snippet using lemoncheesecake

parse-tests.py

Source:parse-tests.py Github

copy

Full Screen

1#!/usr/bin/env python32# -*- coding: utf-8 -*-3"""OpenShift tests discovery4 Discovery e2e tests for each suite, parse, classify5 and export to json.6TODO: add examples.7Using it on executor.sh, add:8# Simple filter to use like those on openshif-tests9# $ openshift-tests run --dry-run all |grep '\[sig-storage\]' |openshift-tests run -f -10elif [[ ! -z ${CUSTOM_TEST_FILTER_SIG:-} ]]; then11 os_log_info "Generating tests for SIG [${CUSTOM_TEST_FILTER_SIG}]..."12 mkdir tmp/13 ./parse-tests.py \14 --filter-suites all \15 --filter-key sig \16 --filter-value "${CUSTOM_TEST_FILTER_SIG}"17 os_log_info "#executor>Running"18 openshift-tests run \19 --junit-dir ${RESULTS_DIR} \20 -f ./tmp/openshift-e2e-suites.txt \21 | tee -a "${RESULTS_PIPE}" || true22 """23import sys24import json25import csv26import re27import argparse28import subprocess29import logging30#from this import d31bin_openshift_tests="openshift-tests"32base_output_file="openshift-tests-suites"33default_empty=("-"*3)34#35# Gather and Parser36#37def gather_suite_tests(suite):38 try:39 resp = subprocess.check_output(f"{bin_openshift_tests} run --dry-run {suite}", shell=True)40 return resp.decode("utf-8").split('\n')41 except subprocess.CalledProcessError as e:42 if e.returncode == 127:43 print("Please make sure the 'openshift-tests' binary exists")44 print("Did you build it by running ./build.sh script?")45 else:46 print(f"One or more error was found when collecting the list of tests for suite [{suite}]")47 print(f"Make sure you are able to run this command: {e.cmd}")48 sys.exit(e.returncode)49def parser_suite_tests(suite, tests):50 """51 Extract metadata from test name52 """53 parsed_tests = []54 for test_name in tests:55 if test_name.strip() is "":56 continue57 test = {58 "name": test_name,59 "name_parsed": "",60 "tags": [],61 "filters": {62 "in_kubernetes_conformance": default_empty,63 "in_openshift_conformance": default_empty,64 }65 }66 name = test_name67 # extract tags ('[<any char>]') from test name68 m_tags = re.findall(r'\[(.*?)\]+', test_name)69 for tag in m_tags:70 # discovery name: remove 'tags'71 name = name.replace(f"[{tag}]", "")72 t = tag.split(':')73 # build filters74 build_filters_from_tags(test['filters'], t)75 # set empty keys76 if len(t) == 1:77 test['tags'].append({t[0]: ''})78 continue79 # ToDo: tag could be a tuple80 test['tags'].append({t[0]: ' '.join(t[1:])})81 # Save the parsed name (without tags)82 test['name_parsed'] = name.strip('"').strip()83 parsed_tests.append(test)84 return parsed_tests85#86# Filter87#88def build_filters_from_tags(filters, tag):89 if tag[0] == 'Conformance':90 filters['is_conformance'] = True91 return92 if tag[0].startswith('sig'):93 filters['sig'] = tag[0]94 return95 if (tag[0] == 'Suite') and tag[1] == 'k8s':96 filters['suite_k8s'] = True97 return98 if (tag[0] == 'Suite'):99 filters['suite'] = (' '.join(tag[1:]))100 return101 if (tag[0] == 'suite_cmd'):102 return103def build_filters_intersection(suites, suite1, suite2):104 """105 Check if tests from suite1 is in suite2106 """107 for s in suites:108 if s['name'] == suite1:109 tests_suite1 = s['tests']110 if s['name'] == suite2:111 tests_suite2 = s['tests']112 filter_name = (f"in_{suite1.replace('/', '_')}")113 for t1 in tests_suite1:114 #t1['filters'][filter_name] = ''115 for t2 in tests_suite2:116 #t2['filters'][filter_name] = False117 if t1['name'] == t2['name']:118 t2['filters'][filter_name] = True119def to_tags_str(tags):120 """121 Build inline tags - as original: [key(|:value)]122 """123 tags_str = ""124 for t in tags:125 for key in t:126 if t[key] == '':127 tags_str+=(f"[{key}] ")128 continue129 tags_str+=(f"[{key}:{t[key]}] ")130 return tags_str131def build_field_filters(suites, filter_field_prefix):132 filter_k = {}133 for s in suites:134 for t in s['tests']:135 for f in t['filters']:136 filter_k[f"{filter_field_prefix}{f}"] = ''137 return list(filter_k.keys())138def filter_kv(payload, kv):139 new_suite = {140 "name": "filtered",141 "tests": []142 }143 k, v = kv144 for s in payload['suites']:145 for t in s['tests']:146 if k in t['filters']:147 if t['filters'][k] == v:148 new_suite['tests'].append(t)149 return {150 "suites": [new_suite]151 }152#153# Exporters154#155def export_to_csv(payload, odir):156 """Export tests to CSV table with properly filters discovered by metadata157 """158 with open(f'{odir}/{base_output_file}.csv', 'w', newline='') as csvfile:159 fieldnames = ['suite', 'name_alias', 'tags', 'name']160 ffield_prefix = "f_"161 ffilters = build_field_filters(payload['suites'], ffield_prefix)162 fieldnames = fieldnames + ffilters163 writer = csv.DictWriter(csvfile, fieldnames=fieldnames, delimiter=';')164 writer.writeheader()165 for suite in payload['suites']:166 for test in suite['tests']:167 row = {168 'suite': suite['name'],169 'name_alias': test['name_parsed'],170 'tags': to_tags_str(test['tags']),171 'name': test['name']172 }173 for f in ffilters:174 row[f] = (test['filters'].get(f.strip(ffield_prefix), default_empty))175 writer.writerow(row)176 print(f"CSV file saved in {odir}/{base_output_file}.csv")177def export_to_json(payload, odir):178 """Export tests as json with it's metadata179 """180 with open(f'{odir}/{base_output_file}.json', 'w') as outfile:181 json.dump(payload, outfile)182 print(f"Json file saved in {odir}/{base_output_file}.json")183def export_to_txt(payload, odir):184 """Export tests name to text file to be able to reproduce on 'openshift-tests run -f'.185 """186 with open(f'{odir}/{base_output_file}.txt', 'w') as outfile:187 for s in payload['suites']:188 for t in s['tests']:189 outfile.write(f"{t['name']}\n")190 print(f"Text file saved in {odir}/{base_output_file}.txt")191#192# Exporter entity193#194class TestsExporter(object):195 suites = []196 payload = {197 "suites": []198 }199 output = {200 "file": "",201 "dir": "",202 "types": {203 "json": False,204 "csv": False,205 "txt": False206 }207 }208 def __init__(self, suites=[]):209 self.suites = suites210 def gather_tests(self):211 for suite_name in self.suites:212 tests = gather_suite_tests(suite_name)213 parsed_tests = parser_suite_tests(suite_name, tests)214 self.payload["suites"].append({215 "name": suite_name,216 "tests": parsed_tests217 })218 def build_filter_intersection(self):219 # improve filters220 build_filters_intersection(self.payload['suites'], self.suites[0], self.suites[1])221 build_filters_intersection(self.payload['suites'], self.suites[1], self.suites[0])222 def export_default(self, out_dir):223 export_to_csv(self.payload, out_dir)224 export_to_json(self.payload, out_dir)225 def export_filter(self, kv, out_dir):226 filtered_payload = filter_kv(self.payload, kv)227 export_to_csv(filtered_payload, out_dir)228 export_to_json(filtered_payload, out_dir)229 export_to_txt(filtered_payload, out_dir)230 def set_outputs(self, args):231 if args.output:232 self.output_file = args.output233 if args.output_dir:234 self.output_dir = args.output_dir235 if args.output_types:236 self.output_types = args.output_types237#238# compare239#240def run_test_compare(args):241 tests = args.compare.split(',')242 if len(tests) != 2:243 logging.info("It's allowed only to compare two lists")244 sys.exit(1)245 test1 = tests[0].split('=')246 if len(test1) != 2:247 logging.info("first test has incorrect format: test_name=test_file")248 sys.exit(1)249 test2 = tests[1].split('=')250 if len(test2) != 2:251 logging.info("second test has incorrect format: test_name=test_file")252 sys.exit(1)253 with open(test1[1], 'r') as f:254 test1_list = f.read().split('\n')255 with open(test2[1], 'r') as f:256 test2_list = f.read().split('\n')257 print(f"t1 name: {test1[0]}")258 print(f"t2 name: {test2[0]}")259 print(f"Total t1: {len(test1_list)}")260 print(f"Total t2: {len(test2_list)}")261 t1_not_t2 = list()262 t2_not_t1 = list()263 for t1 in test1_list:264 if t1 not in test2_list:265 t1_not_t2.append(t1)266 for t2 in test2_list:267 if t2 not in test1_list:268 t2_not_t1.append(t2)269 print(f"Total t1 not in t2: {len(t1_not_t2)}")270 print(f"Total t2 not in t1: {len(t2_not_t1)}")271#272# main273#274def main():275 parser = argparse.ArgumentParser(description='OpenShift Partner Certification Tool - Tests parser.')276 parser.add_argument('--filter-suites', dest='filter_suites',277 default="openshift/conformance,kubernetes/conformance",278 help='openshift-tests suite to run the filter, sepparated by comma.')279 parser.add_argument('--filter-key', dest='filter_k',280 help='filter by key')281 parser.add_argument('--filter-value', dest='filter_v',282 help='filter value of key')283 parser.add_argument('--output', dest='output',284 help='output file path to save the results')285 parser.add_argument('--output-dir', dest='output_dir',286 default="./tmp",287 help='output file path to save the results')288 parser.add_argument('--output-types', dest='output_types',289 default="json,csv,txt",290 help='output types to export')291 parser.add_argument('--compare-tests-files', dest='compare',292 default="",293 help='Compare test files: aws-parallel=aws-parallel.txt,none-parallel=none-parallel.txt')294 args = parser.parse_args()295 if args.compare != "":296 return run_test_compare(args)297 texporter = TestsExporter()298 texporter.set_outputs(args)299 if not(args.filter_suites):300 # discovery suites by default:301 texporter.suites = ["openshift/conformance", "kubernetes/conformance"]302 else:303 texporter.suites = args.filter_suites.split(',')304 # Collect tests305 texporter.gather_tests()306 if args.filter_k:307 texporter.export_filter((args.filter_k, args.filter_v), args.output_dir)308 sys.exit(0)309 texporter.export_default(args.output_dir)310 sys.exit(0)311if __name__ == "__main__":...

Full Screen

Full Screen

utils.py

Source:utils.py Github

copy

Full Screen

...20 if all(suite.is_empty() for suite in suites):21 raise UserError("No test is defined in your lemoncheesecake project.")22 project.metadata_policy.check_suites_compliance(suites)23 if test_filter:24 suites = filter_suites(suites, test_filter)25 if len(suites) == 0:26 raise UserError("The filter does not match any test")27 return suites28def auto_detect_reporting_backends():29 try:30 project = load_project()31 return project.reporting_backends.values()32 except ProjectNotFound:33 return get_reporting_backends()34def add_report_path_cli_arg(cli_parser):35 cli_parser.add_argument("report_path", nargs='?', help="Report file or directory")36def get_report_path(cli_args):37 # first attempt: has the report path been specified on the CLI ?38 if cli_args.report_path:...

Full Screen

Full Screen

extract_stats.py

Source:extract_stats.py Github

copy

Full Screen

1# Copyright (C) 2014-2021 The Debsources developers2# <qa-debsources@lists.alioth.debian.org>.3# See the AUTHORS file at the top-level directory of this distribution and at4# https://salsa.debian.org/qa/debsources/blob/master/AUTHORS5#6# This file is part of Debsources. Debsources is free software: you can7# redistribute it and/or modify it under the terms of the GNU Affero General8# Public License as published by the Free Software Foundation, either version 39# of the License, or (at your option) any later version. For more information10# see the COPYING file at the top-level directory of this distribution and at11# https://salsa.debian.org/qa/debsources/blob/master/COPYING12from debsources import statistics13def extract_stats(filter_suites=None, filename="cache/stats.data"):14 """15 Extracts information from the collected stats.16 If filter_suites is None, all the information are extracted.17 Otherwise suites must be an array of suites names (can contain "total").18 e.g. extract_stats(filter_suites=["total", "debian_wheezy"])19 """20 res = dict()21 stats = statistics.load_metadata_cache(filename)22 for key, value in stats.items():23 splits = key.split(".")24 # if this key/value is in the required suites, we add it25 if filter_suites is None or splits[0] in filter_suites:26 res[key] = value...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Lemoncheesecake automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful