How to use screen_cv2 method in ATX

Best Python code snippet using ATX Github


Full Screen

1import numpy as np2import datetime as dt3import mss4import toml5from debufftracker import errors as customErrors6from debufftracker import status7import time8import os9from threading import Thread101112class ConfigReader:13 """14 This class contains functions to read and return configuration Data15 """16 def __init__(self):17 self.__config_path = os.path.join("resources", "config.toml")18 self.__toml_content = toml.load(f=self.__config_path)192021 def get_imagetransformation_config(self):22 """23 Get config for image transformation2425 :return: self.__toml_content["imagetransformation"], dictionary with image transformation config from config.toml26 :rtype: dict27 """28 allowed_colors = ["color"]29 if self.__toml_content["imagetransformation"]["color_type"].lower() not in allowed_colors:30 raise customErrors.ColorConfigError(self.__toml_content["imagetransformation"]["color_type"])31 return self.__toml_content["imagetransformation"]323334 def get_debuff_configs(self, status_type): # ailment/curse/ground35 """36 Returns Config of a status type. Status type37 :param status_type: Name of the status (ailment/curse/ground)38 :type status_type: str39 :return: status_config, a dictionary containing the config data of a status from config.toml40 :rtype: dict41 """42 status_config = self.__toml_content[status_type]43 return status_config444546class ScreenTracker:47 """48 This class contains functions to track the screen content49 """50 def __init__(self):51 self._config_reader = ConfigReader()52 self.image_config = self._config_reader.get_imagetransformation_config()53 self.status_instances = {}5455 def create_removestatus_dict(self):56 """57 Iterates over each status type in ["ailment", "curse", "ground"] and adds the status specific config58 to dictionary relevant_dicts. Then return relevant dict59 :return: relevant_dicts, a dictionary with status configs.60 :rtype: Dictionary61 """62 def get_relevant_dicts(d):63 """64 A helpfunction, only callable inside create_removestatus_dict, to "flatten" a dictionary and65 only return results where remove_debuff is True6667 :param d: dictionary that contains sub dictionaries. Each subdictionary represents a status config68 :return: big_dict. Da Dictionary that only contains configs where subdict["remove_debuff"] == True)69 :rtype: Dictionary70 """71 big_dict = {}72 for key in d.keys():73 # "Flatten" dictionary if True74 if (d[key]["key"] !="") and (d[key]["remove_debuff"] == True):75 big_dict[key] = d[key]76 elif (d[key]["key"] =="") and (d[key]["remove_debuff"] == True):77 raise customErrors.StatusConfigError("if remove_debuff is true, then keybinding must be set")7879 return big_dict8081 relevant_dicts = {}82 status_types = ["ailment", "curse", "ground"]83 for status_type in status_types:84 status_type_all_dict = self._config_reader.get_debuff_configs(status_type=status_type)85 status_type_remove_dict = get_relevant_dicts(status_type_all_dict)86 relevant_dicts.update(status_type_remove_dict)8788 self.__removestatus_dicts = relevant_dicts #dict contains dicts89 # dict structure90 # removestatus_dicts=\91 # {92 # "shocK":93 # {94 # "type" : "shock",95 # }96 # }9798 return relevant_dicts99100101 def create_status_instances(self):102 """103 Create instances of status.Status and add them to a dictionary self.__status_instances.104 Using this dictionary enables managing those instances, when necessary105106 :return: None107 """108109 # config example needed to initiate status classes110 # config = \111 # {112 # "type" : "bleed",113 # "flask" : "1",114 # "color_type" : "gray",115 # "remove_debuff" : True116 # }117118 try:119 remove_status_dicts = self.__removestatus_dicts120 except:121 remove_status_dicts = self.create_removestatus_dict()122123 status_instances_dict = {}124 for status_type in remove_status_dicts.keys():125 #print(remove_status_dicts)126 status_config = remove_status_dicts[status_type]127 #add color_type to config. This is required to read the template with the correct method (gray/color)128 status_config["color_type"] = self.image_config["color_type"]129 status_instance = status.Status(status_config)130 status_instances_dict[status_type] = status_instance131132 self.status_instances = status_instances_dict133134135 def manage_status_instances(self):136 """137 Takes a partial screenshot, then iterates over the status.Status instances and checks if a harmful effect of138 type of instance was found. If so, remove the effect. Threads will be joined to prevent chaotic behaviour.139140 :return: debuffs_dict, a dict that contains the negative effect and a dt stamp when it was recognized141 :rtype: Dictionary142 """143144 # screen = self.grab_transform_screen()146 debuffs_dict = {}147 thread_list = []148 for status_name in self.status_instances.keys():149 status_instance = self.status_instances[status_name]150 # each instance is run as a seperate Thread151 t = Thread(, args=(screen, ))152 thread_list.append(t)153 t.start()154155 # wait for threads to finish. Not waiting caused chaotic behavior.156 for t in thread_list:157 t.join()158159 return debuffs_dict160161 def run(self):162 """163 Infinitive loop that calls self.manage_status_instances() which causes any found negative effects to be removed.164165 :return: None166 """167 continue_run = True168 print("Debuff Tracker started")169 while continue_run==True:170 self.manage_status_instances()171 time.sleep(1)172173 def grab_transform_screen(self):174 """175 Make a partial Screenshot, transform to screenshot to numpy array and return transformed screenshot.176177 :return: screen_cv2, partial screenshot that contains all 3 color channels. Order is BGR178 :rtype: np.array179 """180181 # I compared 3 methods over 1000 iterations:182 # pyautogui: take screenshot, then cut and transform (avg time 0:00:00.054545)183 # PIL: take partial screenshot, then transform (avg time 0:00:00.035084)184 # mss: take partial screenshot, then transform (avg time 0:00:00.013324)185 # mss is lightweight and fast186 with mss.mss() as sct:187 # The screen part to capture188 monitor_area = \189 {190 "top": 0,191 "left": 0,192 "width": self.image_config["width"],193 "height": self.image_config["height"]194 }195 screen = sct.grab(monitor_area)196 screen_cv2 = np.array(screen)197 screen_cv2 = screen_cv2[:,:,:3] # 4th channel contains value 255 (uint8). Remove fourth channel198199 end_dt = fname = str(end_dt).replace(":", "") + ".png"201 p = os.path.join(os.getcwd(), os.pardir, "resources", "track_screen", fname)202203 return screen_cv2204205206if __name__ == "__main__":207 current_dir = os.path.dirname( os.path.abspath(__file__))208 project_dir = os.path.join(current_dir, os.path.pardir)209210 # set project source folder as working directory211 os.chdir(project_dir)212213 screentracker = ScreenTracker()214 screentracker.create_status_instances() ...

Full Screen

Full Screen Github


Full Screen

1import cv2, numpy23# 从source图片中查找wanted图片所在的位置,当置信度大于accuracy时返回找到的最大置信度位置的左上角坐标4def locate(source, wanted, accuracy=0.90):5 screen_cv2 = cv2.imread(source)6 wanted_cv2 = cv2.imread(wanted)78 result = cv2.matchTemplate(screen_cv2, wanted_cv2, cv2.TM_CCOEFF_NORMED)9 min_val, max_val, min_loc, max_loc = cv2.minMaxLoc(result)1011 if max_val >= accuracy:12 return max_loc13 else:14 return None1516# 从source图片中查找wanted图片所在的位置,当置信度大于accuracy时返回找到的所有位置的左上角坐标(自动去重)17def locate_all(source, wanted, accuracy=0.90):18 loc_pos = []19 screen_cv2 = cv2.imread(source)20 wanted_cv2 = cv2.imread(wanted)2122 result = cv2.matchTemplate(screen_cv2, wanted_cv2, cv2.TM_CCOEFF_NORMED)23 location = numpy.where(result >= accuracy)2425 ex, ey = 0, 026 for pt in zip(*location[::-1]):27 x = pt[0]28 y = pt[1]2930 if (x - ex) + (y - ey) < 15: # 去掉邻近重复的点31 continue32 ex, ey = x, y3334 loc_pos.append([int(x), int(y)])353637 return loc_pos3839# 给定目标尺寸大小和目标左上角顶点坐标,即可给出目标中心的坐标40def centerOfTouchArea(wantedSize, topLeftPos):41 tlx, tly = topLeftPos42 h_src, w_src, tongdao = wantedSize43 if tlx < 0 or tly < 0 or w_src <=0 or h_src <= 0:44 return None ...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:


You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run ATX automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?