Best Python code snippet using localstack_python
test_stepfunctions.py
Source:test_stepfunctions.py  
...538                    raise ShortCircuitWaitException("Statemachine execution failed")539                else:540                    return status == "SUCCEEDED"541            return _assert_execution_success542        def _retry_execution():543            # start state machine execution544            # AWS initially straight up fails until the permissions seem to take effect545            # so we wait until the statemachine is at least running546            result = stepfunctions_client.start_execution(547                stateMachineArn=machine_arn, input='{"Name": "' f"{topic_name}" '"}'548            )549            assert wait_until(assert_execution_success(result["executionArn"]))550            describe_result = stepfunctions_client.describe_execution(551                executionArn=result["executionArn"]552            )553            output = describe_result["output"]554            assert topic_name in output555            result = stepfunctions_client.describe_state_machine_for_execution(556                executionArn=result["executionArn"]...raid_integrity_data.py
Source:raid_integrity_data.py  
...144            for device in devices:145                # Update the state as 'check' for RAID device file146                result = self._update_raid_device_file(device)147                if result == "failed":148                    self._retry_execution(self._update_raid_device_file, device)149                logger.info("RAID device state is changed to 'check'")150                # Check RAID device array state is 'idle' or not151                result = self._check_raid_state(device)152                if result == "failed":153                    logger.warn("'Idle' state not found for RAID device:{}"154                                    .format(device))155                    # Retry to check RAID state156                    self._retry_execution(self._check_raid_state, device)157                logger.info("'idle' state is found in Raid device:{}."158                             .format(device))159                # Check Mismatch count in RAID device files.160                result = self._check_mismatch_count(device)161                if result == "failed":162                    # Persist RAID device fault state and send alert163                    fault_status_file = self.DEFAULT_RAID_DATA_PATH + device + "_" + RaidDataConfig.RAID_MISMATCH_FAULT_STATUS.value164                    if os.path.exists(fault_status_file):165                        with open(fault_status_file, 'r') as fs:166                            data = fs.read().rstrip()167                        if self.FAULT_RESOLVED in data:168                            self.alert_type = self.FAULT169                            self._alert_msg = "RAID disks present in %s RAID array"\170                                ", needs synchronization. If fault persists for "\171                                "more than 2 days, Please contact Seagate support."%device172                            self._send_json_msg(self.alert_type, device, self._alert_msg)173                            self._update_fault_state_file(device, self.FAULT, fault_status_file)174                            self._scan_frequency = self.MIN_SCAN_FREQUENCY175                    else:176                        self.alert_type = self.FAULT177                        self._alert_msg = "RAID disks present in %s RAID array"\178                                ", needs synchronization. If fault persists for "\179                                "more than 2 days, Please contact Seagate support."%device180                        self._send_json_msg(self.alert_type, device, self._alert_msg)181                        self._update_fault_state_file(device, self.FAULT, fault_status_file)182                        self._scan_frequency = self.MIN_SCAN_FREQUENCY183                    # Retry to check mismatch_cnt184                    self._retry_execution(self._check_mismatch_count, device)185                logger.debug("No mismatch count is found in Raid device:{}"186                            .format(device))187        except Exception as ae:188            raise Exception(f"Failed in monitoring RAID health, {ae}")189    def _get_devices(self):190        try:191            mdstat_file = RaidDataConfig.MDSTAT_FILE.value192            with open (mdstat_file, 'r') as fp:193                content = fp.readlines()194            device_array = []195            for line in content:196                if "active" in line:197                    device = line.split(":")[0].rstrip()198                    device_array.append(device)199            if len(device_array) == 0:200                logger.error("No RAID device found in mdstat file.")201            return device_array202        except Exception as ae:203            raise Exception(f"Failed to get the device array, {ae}")204    def _check_mismatch_count(self, device):205        try:206            status = None207            mismatch_cnt_file = RaidDataConfig.MISMATCH_COUNT_FILE.value208            MISMATCH_COUNT_COMMAND = 'cat ' + self.raid_dir + device +\209                                     mismatch_cnt_file210            logger.debug('Executing MISMATCH_CNT_COMMAND:{}'211                         .format(MISMATCH_COUNT_COMMAND))212            response, error = self._run_command(MISMATCH_COUNT_COMMAND)213            if error:214                logger.error("Error in cmd{} in raid health monitor"215                            .format(MISMATCH_COUNT_COMMAND))216            if response == RaidDataConfig.MISMATCH_COUNT_RESPONSE.value:217                logger.debug("No mismatch count is found")218                status = "success"219                with open(self.output_file, 'a') as raid_file:220                    raid_file.write(RaidDataConfig.MISMATCH_COUNT_RESPONSE.value)221                fault_status_file = self.DEFAULT_RAID_DATA_PATH + device + "_"+ RaidDataConfig.RAID_MISMATCH_FAULT_STATUS.value222                if os.path.exists(fault_status_file):223                    with open(fault_status_file, 'r') as fs:224                        data = fs.read().rstrip()225                    if self.FAULT in data:226                        faulty_device = data.split(":")[0].rstrip()227                        if device == faulty_device:228                            self.alert_type = self.FAULT_RESOLVED229                            self._alert_msg = "RAID disks present in %s RAID array are synchronized." %device230                            self._send_json_msg(self.alert_type, device, self._alert_msg)231                            self._update_fault_state_file(device, self.FAULT_RESOLVED, fault_status_file)232                            self._scan_frequency = Conf.get(SSPL_CONF,233                                    f"{self.RAIDIntegritySensor}>{self.SCAN_FREQUENCY}",234                                    self.DEFAULT_SCAN_FREQUENCY)235                            self._scan_frequency = max(self._scan_frequency,236                                                       self.MIN_SCAN_FREQUENCY)237            else:238                status = "failed"239                logger.debug("Mismatch found in {} file in raid_integrity_data!"240                             .format(mismatch_cnt_file))241            return status242        except Exception as ae:243            logger.error("Failed in checking mismatch_cnt in RAID file. ERROR:{}"244                         .format(str(ae)))245            raise246    def _check_raid_state(self, device):247        try:248            status = None249            raid_check = 0250            sync_action_file = RaidDataConfig.SYNC_ACTION_FILE.value251            while raid_check <= RaidDataConfig.MAX_RETRIES.value:252                self.output_file = self._get_unique_filename(RaidDataConfig.RAID_RESULT_FILE_PATH.value, device)253                STATE_COMMAND = 'cat ' + self.raid_dir + device +\254                                sync_action_file255                logger.debug('Executing STATE_COMMAND:{}'.format(STATE_COMMAND))256                response, error = self._run_command(STATE_COMMAND)257                if error:258                    logger.warn("Error in cmd{} in raid health monitor"259                                .format(STATE_COMMAND))260                    raid_check += 1261                else:262                    if response == RaidDataConfig.STATE_COMMAND_RESPONSE.value:263                        status = "success"264                        with open(self.output_file, 'w') as raid_file:265                            raid_file.write(RaidDataConfig.STATE_COMMAND_RESPONSE.value + "\n")266                        break267                    else:268                        status = "failed"269                        raid_check += 1270                        time.sleep(WAIT_BEFORE_RETRY)271            return status272        except Exception as ae:273            logger.error("Failed in checking RAID device state. ERROR:{}"274                        .format(str(ae)))275            raise276    def _update_raid_device_file(self, device):277        try:278            status = "failed"279            raid_check = 0280            sync_action_file = RaidDataConfig.SYNC_ACTION_FILE.value281            while raid_check <= RaidDataConfig.MAX_RETRIES.value:282                CHECK_COMMAND = "echo 'check' |sudo tee " + self.raid_dir +\283                                device + sync_action_file + " > /dev/null"284                logger.debug('Executing CHECK_COMMAND:{}'.format(CHECK_COMMAND))285                response, error = self._run_command(CHECK_COMMAND)286                if error:287                    logger.warn("Failed in executing command:{}."288                                .format(error))289                    raid_check += 1290                    time.sleep(1)291                else:292                    logger.debug("RAID device state is changed to 'check' with response : {}".format(response))293                    status = "success"294                    break295            return status296        except Exception as ae:297            logger.error("Failed to update RAID File. ERROR:{}"298                         .format(str(ae)))299            raise300    def _retry_execution(self, function_call, device):301        while True:302            logger.debug("Executing function:{} after {} time interval"303                         .format(function_call, self.retry_interval))304            time.sleep(self.retry_interval)305            result = function_call(device)306            if result == self.SUCCESS:307                return308    def _get_unique_filename(self, filename, device):309        unique_timestamp = datetime.now().strftime("%d-%m-%Y_%I-%M-%S-%p")310        unique_filename = f"{filename}_{device}_{unique_timestamp}.txt"311        return unique_filename312    def _send_json_msg(self, alert_type, resource_id, error_msg):313        """Transmit data to NodeDataMsgHandler to be processed and sent out"""314        epoch_time = str(int(time.time()))...mssqlconnector.py
Source:mssqlconnector.py  
...35            print "EXCEPTION: ", e36            raise e37        except pymssql.InterfaceError, e:38            print "EXCEPTION: ", e39            cursor = self._retry_execution(sql)40            return cursor41        except Exception, e:42            print "EXCEPTION: ", e43            raise e44    def _set_connection(self):45        self.close_connection()46        return self.get_connection()47    def _retry_execution(self, sql):48        connection = self._set_connection()49        cursor = connection.cursor()50        cursor.execute(sql)...Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!
