How to use list_rules method in localstack

Best Python code snippet using localstack_python

discrimination.py

Source:discrimination.py Github

copy

Full Screen

1# coding: utf-82# python 3.53import sys4import os5sys.path.append('/Users/ooki/git/research_dr/python/MLEM2')6sys.path.append(os.path.dirname(os.path.abspath("__file__"))+'/../MLEM2')7from sklearn.metrics import accuracy_score8import copy9import importlib10import mlem211import LERS12importlib.reload(mlem2) 13importlib.reload(LERS) 14from rules_stat import getNumRulesClass15from rules_stat import getRulesValueCount16 17# =====================================18# 公正配慮すべき属性list_sをdecision_tableから削除する19# =====================================20def delDiscriminativeAttributes(decision_table, list_s):21 return(decision_table.drop(list_s, axis=1))22# =====================================23# Rules のうち 属性attr / 基本条件 e(属性attrの値v) を含むルールセットの数を返す24# =====================================25def getNumRulesIncludeAttr(list_rules, attr) :26 rules = [r for r in list_rules if attr in r.getKey()]27 return(len(rules))28def getNumRulesIncludeE(list_rules, attr, v) :29 rules = [r for r in list_rules if r.getValue(attr) == v]30 return(len(rules))31def getNumRulesClassIncludeAttr(list_rules, attr, cls) :32 rules = [r for r in list_rules if (attr in r.getKey()) and r.getConsequent() == cls]33 return(len(rules))34def getNumRulesClassIncludeE(list_rules, attr, v, cls) :35 rules = [r for r in list_rules if r.getValue(attr) == v and r.getConsequent() == cls]36 return(len(rules))37def getNumRulesIncludeMultipleE(list_rules, dict_attribute_value):38 tmp_rules = list_rules39 for attr in dict_attribute_value.keys():40 for v in dict_attribute_value[attr] : 41 tmp_rules = [r for r in tmp_rules if r.getValue(attr) == v]42 return(len(tmp_rules))43def getNumRulesClassIncludeMultipleE(list_rules, dict_attribute_value, cls):44 tmp_rules = list_rules45 for attr in dict_attribute_value.keys():46 for v in dict_attribute_value[attr] : 47 tmp_rules = [r for r in tmp_rules if r.getValue(attr) == v and r.getConsequent() == cls]48 return(len(tmp_rules))49# ======================================50# 分割表a, b, c, d を返す51# ======================================52def getContingencyTable(list_rules, dict_attribute_value, CLASSES):53 N = len(list_rules)54 n1 = getNumRulesClass(list_rules, CLASSES["bad"])55 n2 = getNumRulesClass(list_rules, CLASSES["good"])56 a = getNumRulesClassIncludeMultipleE(list_rules, dict_attribute_value, CLASSES["bad"])57 b = n1 - a58 c = getNumRulesClassIncludeMultipleE(list_rules, dict_attribute_value, CLASSES["good"])59 d = n2 - c60 return(a,b,c,d) 61 62# =====================================63# Rules のうち 属性attr / 基本条件 e(属性attrの値v) を含むルールセットを返す64# ===================================== 65def getRulesIncludeAttr(list_rules, attr) :66 rules = [r for r in list_rules if attr in r.getKey()]67 return(rules)68def getRulesIncludeE(list_rules, attr, v) :69 rules = [r for r in list_rules if r.getValue(attr) == v]70 return(rules)71 72# =====================================73# Rules のうち 属性attr / 基本条件e を 含まないルールセットを返す74# ===================================== 75def getRulesExcludeAttr(list_rules, attr) :76 rules = [r for r in list_rules if not attr in r.getKey()]77 return(rules)78 79def getRulesExcludeE(list_rules, attr, v) :80 rules = [r for r in list_rules if r.getValue(attr) != v]81 return(rules)82# =====================================83# Rules のうち 属性attr / 基本条件e を 削除したルールセットを返す84# Rule の 属性attr / 基本条件 e を削除したルールを返す85# =====================================86def getRulesDelAttr(list_rules, attr) :87 rules = [delAttrFromRule(r, attr) for r in list_rules]88 return(rules)89 90def getRulesDelE(list_rules, attr, v) :91 rules = [delEFromRule(r, attr, v) for r in list_rules]92 return(rules)93 94def delAttrFromRule(rule, attr) :95 rule_new = copy.deepcopy(rule)96 rule_new.delKey(attr)97 return(rule_new)98def delEFromRule(rule, attr, v) :99 if rule.getValue(attr) == v : return(delAttrFromRule(rule, attr))100 else : return(rule)101 102# =====================================103# alpha差別的な Rule を含まないルールセットを返す104# alpha差別的な Rule の 基本条件 e を削除したルールを返す105# ===================================== 106def getAlphaRulesExcludeE(list_rules, attr, v, decision_table, list_judgeNominal, alpha = 0) :107 rules = [r for r in list_rules if getElift(r, attr, v, decision_table, list_judgeNominal) <= alpha ]108 return(rules)109def getAlphaRulesDelE(list_rules, attr, v, decision_table, list_judgeNominal, alpha = 0) :110 rules = [delEFromAlphaRule(r, attr, v, decision_table, list_judgeNominal, alpha = 0) for r in list_rules]111 return(rules)112 113def delEFromAlphaRule(rule, attr, v, decision_table, list_judgeNominal, alpha = 0):114 if rule.getValue(attr) == v :115 elift = getElift(rule, attr, v, decision_table, list_judgeNominal)116 if elift > alpha : return(delAttrFromRule(rule, attr))117 else : return(rule)118 else : 119 return(rule)120# =====================================121# M差別的な Rule の を含まない / 基本条件 e を削除したルールセットを返す122# ===================================== 123def getMRulesFUN(list_rules, attr, v, target_cls, DELFUN, m = 0) :124 num_target_cls, num_other_cls, list_num_other_cls = 0, 0, []125 classes = mlem2.getEstimatedClass(list_rules)126 for cls in classes :127 if cls == target_cls :128 num_target_cls = getNumRulesClassIncludeE(list_rules, attr, v, cls)129 else :130 list_num_other_cls.append(getNumRulesClassIncludeE(list_rules, attr, v, cls))131 num_other_cls = sum(list_num_other_cls) / len(list_num_other_cls) #複数クラスの場合を考慮132 if (num_target_cls / (num_target_cls + num_other_cls)) > m : #m保護なら133 return(list_rules)134 else :135 return(DELFUN(list_rules, attr, v))136# =====================================137# 配慮変数sをもつ対象だけの決定表を作る138# =====================================139def createDTSuppoterdbyRule(list_rules, attr, v, cls, decision_table):140 target_indice = []141 target_rules = [r for r in list_rules if r.getValue(attr) == v and r.getConsequent() == cls]142 for rule in target_rules:143 target_indice.extend(rule.getSupport())144 target_indice = list(set(target_indice))145 target_indice = sorted(target_indice)146 new_decision_table = decision_table_train.ix[target_indice]147 new_decision_class = new_decision_table[new_decision_table.columns[-1]].values.tolist()148 return(new_decision_table, new_decision_class)149# 有利な決定クラスのルールを減らす関数 配慮変数sを150# =====================================151# Rule の 配慮変数s での decision_tableにおける elift152# =====================================153def getElift(rule, attr, v, decision_table, list_judgeNominal):154 supp, conf = LERS.getSupportConfidence(rule, decision_table, list_judgeNominal)155 rule_s = delEFromRule(rule, attr, v)156 supp_s, conf_s = LERS.getSupportConfidence(rule_s, decision_table, list_judgeNominal)157 if conf_s == 0: elift = 999158 else : elift = conf / conf_s159 return(elift)160 161# =====================================162# Rule の 配慮変数s での decision_tableにおける slift163# =====================================164def getSlift(rule, s, decision_table, operator):165 conditions = mlem2.getConditionValues(decision_table, s)166 clifts = [getClift(rule, s, c, decision_table) for c in conditions]167 slift = operator(clifts)168 return(slift)169# =====================================170# Rule の 配慮変数s と 代替する変数c での decision_tableにおける clift171# =====================================172def getClift(rule, s, c, decision_table, list_judgeNominal):173 supp, conf = LERS.getSupportConfidence(rule, decision_table,list_judgeNominal)174 rule_c = mlem2.delEfromRule(rule,s)175 rule_c = rule_c.setValue(s,c)176 supp_c, conf_c = LERS.getSupportConfidence(rule_c, decision_table, list_judgeNominal)177 clift = conf / conf_c178 return(clift)179# ====================================180# Attribute Value dict を stringにして返す181# ====================================182def strAttributeValue(ATTRIBUTE_VALUE) :183 list_string = []184 for i in ATTRIBUTE_VALUE :185 list_string.append(i+"-".join(ATTRIBUTE_VALUE[i]))186 return("+".join(list_string))187# ====================================188# Attribute Value dict を stringにして返す189# ====================================190def getItemSet(rule_value) :191 itemset = set()192 for attr in rule_value :193 itemset.add(attr+"-".join(rule_value[attr]))194 return(itemset)195def jaccard(set1, set2):196 set_and = set1 & set2197 set_or = set1 | set2198 if len(set_or) == 0 :199 return(0)200 else :201 return(len(set_and)/len(set_or))202# ========================================203# main204# ========================================205if __name__ == "__main__":206 # 設定207 DIR_UCI = '/mnt/data/uci/'208 FILENAME = 'german_credit_categorical' 209 iter1 = 1210 iter2 = 1211 212 # rule induction213 rules = mlem2.getRulesByMLEM2(FILENAME, iter1, iter2)214 # test data215 filepath = DIR_UCI+FILENAME+'/'+FILENAME+'-test'+str(iter1)+'-'+str(iter2)+'.tsv'216 decision_table_test = mlem2.getDecisionTable(filepath)217 decision_table_test = decision_table_test.dropna()218 decision_class = decision_table_test[decision_table_test.columns[-1]].values.tolist()219 # nominal data220 filepath = DIR_UCI+FILENAME+'/'+FILENAME+'.nominal'221 list_nominal = mlem2.getNominalList(filepath)222 list_judgeNominal = mlem2.getJudgeNominal(decision_table_test, list_nominal)223 224 # predict by LERS225 predictions = LERS.predictByLERS(rules, decision_table_test, list_judgeNominal)226 227 # 正答率を求める228 accuracy_score(decision_class, predictions)229 230 # rules の数を求める231 num = len(rules)232 # 平均の長さを求める233 mean_length = mlem2.getMeanLength(rules)234 # train data setup235 decision_table_train, decision_class = getData(FILENAME, iter1, iter2, T = "train")236 list_judgeNominal = getJudgeNominal(decision_table_train, FILENAME)237 # 平均支持度と平均確信度を求める238 mean_support, mean_conf = LERS.getSupportConfidenceRules(rules, decision_table_train, list_judgeNominal)239 # AccとRecallを求める240 acc_recall = LERS.getAccurayRecall(rules, decision_table_train, list_judgeNominal)241 for i,c in enumerate(mlem2.getEstimatedClass(rules)):242 print(str(acc_recall[i][0])+","+str(acc_recall[i][1]))243 244 ###### 公正配慮のテスト 245 246 # 基本条件を含むルールセット247 rules_sex_2 = mlem2.getRulesIncludeE(rules, "Sex_Marital_Status", "2.0")248 rules_sex_4 = mlem2.getRulesIncludeE(rules, "Sex_Marital_Status", "4.0") 249 # 条件を含まないルールセット 250 rules_exclude_sex = mlem2.getRulesExcludeAttr(rules, "Sex_Marital_Status")251 # 基本条件を含まないルールセット 252 rules_exclude_sex_1 = mlem2.getRulesExcludeE(rules, "Sex_Marital_Status", "1.0")253 # 条件を削除したルールセット254 rules_del_value = mlem2.getRulesDelAttr(rules, "Value_Savings_Stocks") 255 # 基本条件を削除したルールセット256 rules_del_value_1 = mlem2.getRulesDelE(rules, "Value_Savings_Stocks", "1.0") 257 258 # 条件を1つ削除する例259 rule = mlem2.delAttrFromRule(rules[12],'No_of_dependents')260 rule = mlem2.delAttrFromRule(rules[12],'Concurrent_Credits')261 262 263 # ====264 265 # read data266 filepath = '/mnt/data/uci/'+FILENAME+'/'+FILENAME+'-train'+str(iter1)+'-'+str(iter2)+'.tsv'267 decision_table = mlem2.getDecisionTable(filepath)268 decision_table = decision_table.dropna()269 decision_table.index = range(decision_table.shape[0])270 # read nominal271 filepath = '/mnt/data/uci/'+'/'+FILENAME+'/'+FILENAME+'.nominal'272 list_nominal = mlem2.getNominalList(filepath)273 274 # ルールを満たすやつ ほとんどないな。。275 match_objects = decision_table.apply(lambda obj: isExplainRule(obj, rules[12], list_judgeNominal), axis=1) 276 # confidence277 getConfidence(rule, decision_table, list_judgeNominal)278 279 rules_sex_2 = mlem2.getRulesIncludeE(rules, "Sex_Marital_Status","2.0")...

Full Screen

Full Screen

IDS_deterministic_local.py

Source:IDS_deterministic_local.py Github

copy

Full Screen

1# code for IDS with deterministic local search2# requires installation of python package apyori: https://pypi.org/project/apyori/3import numpy as np4import pandas as pd5import math6from .apyori import apriori7# rule is of the form if A == a and B == b, then class_18# one of the member variables is itemset - a set of patterns {(A,a), (B,b)}9# the other member variable is class_label (e.g., class_1)10class rule:11 12 def __init__(self,feature_list,value_list,class_label):13 self.itemset = set()14 self.class_label = None15 self.add_item(feature_list,value_list)16 self.set_class_label(class_label)17 self._cover = None18 self._correct_cover = None19 self._incorrect_cover = None20 def add_item(self,feature_list,value_list):21 22 if len(feature_list) != len(value_list):23 print("Some error in inputting feature value pairs")24 return25 for i in range(0,len(feature_list)):26 self.itemset.add((feature_list[i],value_list[i]))27 28 def print_rule(self):29 s = "If "30 for item in self.itemset:31 s += str(item[0]) + " == " +str(item[1]) + " and "32 s = s[:-5]33 s += ", then "34 s += str(self.class_label)35 print(s)36 37 def all_predicates_same(self, r):38 return self.itemset == r.itemset39 40 def class_label_same(self,r):41 return self.class_label == r.class_label42 43 def set_class_label(self,label):44 self.class_label = label45 46 def get_length(self):47 return len(self.itemset)48 49 def get_cover(self, df, reuse=True):50 if reuse and self._cover is not None:51 return self._cover52 for pattern in self.itemset:53 df = df[df[pattern[0]] == int(pattern[1])]54 if reuse:55 self._cover = df.index.values56 return df.index.values57 def get_correct_cover(self, df, Y, reuse=True):58 if reuse and self._correct_cover is not None:59 return self._correct_cover, self._cover60 indexes_points_covered = self.get_cover(df) # indices of all points satisfying the rule61 Y_arr = pd.Series(Y) # make a series of all Y labels62 labels_covered_points = list(Y_arr[indexes_points_covered]) # get a list only of Y labels of the points covered63 correct_cover = []64 for ind in range(0,len(labels_covered_points)):65 if labels_covered_points[ind] == self.class_label:66 correct_cover.append(indexes_points_covered[ind])67 if reuse:68 self._correct_cover = correct_cover69 return correct_cover, indexes_points_covered70 71 def get_incorrect_cover(self, df, Y, reuse=True):72 if reuse and self._incorrect_cover is not None:73 return self._incorrect_cover74 correct_cover, full_cover = self.get_correct_cover(df, Y)75 ret = (sorted(list(set(full_cover) - set(correct_cover))))76 if reuse:77 self._incorrect_cover = ret78 return ret79# below function basically takes a data frame and a support threshold and returns itemsets which satisfy the threshold80def run_apriori(df, support_thres):81 # the idea is to basically make a list of strings out of df and run apriori api on it 82 # return the frequent itemsets83 dataset = []84 for i in range(0,df.shape[0]):85 temp = []86 # active_cols = df.columns.values[df.iloc[i].values == 1]87 # for col_name in active_cols:88 # temp.append(col_name)89 for col_name in df.columns:90 temp.append(col_name + "=" + str(df[col_name][i]))91 dataset.append(temp)92 #results = list(apriori(dataset, min_support=support_thres))93 results = []94 for ii,i in enumerate(apriori(dataset, min_support=support_thres)):95 if (ii+1) % 1000 == 0: print(ii)96 results.append(i)97 list_itemsets = []98 for ele in results:99 temp = []100 for pred in ele.items:101 temp.append(pred)102 list_itemsets.append(temp)103 return list_itemsets104# This function converts a list of itemsets (stored as list of lists of strings) into rule objects105def createrules(freq_itemsets, labels_set):106 # create a list of rule objects from frequent itemsets 107 list_of_rules = []108 for one_itemset in freq_itemsets:109 feature_list = []110 value_list = []111 for pattern in one_itemset:112 # feature_list.append(pattern)113 # value_list.append(1)114 fea_val = pattern.split("=")115 feature_list.append(fea_val[0])116 value_list.append(fea_val[1])117 for each_label in labels_set:118 temp_rule = rule(feature_list,value_list,each_label)119 list_of_rules.append(temp_rule)120 return list_of_rules121# compute the maximum length of any rule in the candidate rule set122def max_rule_length(list_rules):123 len_arr = []124 for r in list_rules:125 len_arr.append(r.get_length())126 return max(len_arr)127# compute the number of points which are covered both by r1 and r2 w.r.t. data frame df128def overlap(r1, r2, df):129 return sorted(list(set(r1.get_cover(df)).intersection(set(r2.get_cover(df)))))130# computes the objective value of a given solution set131def func_evaluation(soln_set, list_rules, df, Y, lambda_array):132 # evaluate the objective function based on rules in solution set 133 # soln set is a set of indexes which when used to index elements in list_rules point to the exact rules in the solution set134 # compute f1 through f7 and we assume there are 7 lambdas in lambda_array135 f = [] #stores values of f1 through f7; 136 137 # f0 term138 f0 = len(list_rules) - len(soln_set) # |S| - size(R)139 f.append(f0)140 141 # f1 term142 Lmax = max_rule_length(list_rules)143 sum_rule_length = 0.0144 for rule_index in soln_set:145 sum_rule_length += list_rules[rule_index].get_length()146 147 f1 = Lmax * len(list_rules) - sum_rule_length148 f.append(f1)149 # f2 term - intraclass overlap150 sum_overlap_intraclass = 0.0151 for r1_index in soln_set:152 for r2_index in soln_set:153 if r1_index >= r2_index:154 continue155 if list_rules[r1_index].class_label == list_rules[r2_index].class_label:156 sum_overlap_intraclass += len(overlap(list_rules[r1_index], list_rules[r2_index],df))157 f2 = df.shape[0] * len(list_rules) * len(list_rules) - sum_overlap_intraclass158 f.append(f2)159 # f3 term - interclass overlap160 sum_overlap_interclass = 0.0161 for r1_index in soln_set:162 for r2_index in soln_set:163 if r1_index >= r2_index:164 continue165 if list_rules[r1_index].class_label != list_rules[r2_index].class_label:166 sum_overlap_interclass += len(overlap(list_rules[r1_index], list_rules[r2_index],df))167 f3 = df.shape[0] * len(list_rules) * len(list_rules) - sum_overlap_interclass168 f.append(f3)169 # f4 term - coverage of all classes170 classes_covered = set() # set171 for index in soln_set:172 classes_covered.add(list_rules[index].class_label)173 f4 = len(classes_covered)174 f.append(f4)175 # f5 term - accuracy176 sum_incorrect_cover = 0.0177 for index in soln_set:178 sum_incorrect_cover += len(list_rules[index].get_incorrect_cover(df,Y))179 f5 = df.shape[0] * len(list_rules) - sum_incorrect_cover180 f.append(f5)181 #f6 term - cover correctly with at least one rule182 atleast_once_correctly_covered = set()183 for index in soln_set:184 correct_cover, full_cover = list_rules[index].get_correct_cover(df,Y)185 atleast_once_correctly_covered = atleast_once_correctly_covered.union(set(correct_cover))186 f6 = len(atleast_once_correctly_covered)187 f.append(f6)188 obj_val = 0.0189 for i in range(7):190 obj_val += f[i] * lambda_array[i]191 #print(f)192 return obj_val193# deterministic local search algorithm which returns a solution set as well as the corresponding objective value194def deterministic_local_search(list_rules, df, Y, lambda_array, epsilon):195 # step by step implementation of deterministic local search algorithm in the 196 # FOCS paper: https://people.csail.mit.edu/mirrokni/focs07.pdf (page 4-5)197 198 #initialize soln_set199 soln_set = set()200 n = len(list_rules)201 202 # step 1: find out the element with maximum objective function value and initialize soln set with it203 each_obj_val = []204 for ind in range(len(list_rules)):205 each_obj_val.append(func_evaluation(set([ind]), list_rules, df, Y, lambda_array))206 207 best_element = np.argmax(each_obj_val)208 soln_set.add(best_element)209 S_func_val = each_obj_val[best_element]210 print('best:', best_element)211 212 restart_step2 = False213 214 # step 2: if there exists an element which is good, add it to soln set and repeat215 while True:216 each_obj_val = []217 218 for ind in set(range(len(list_rules))) - soln_set:219 func_val = func_evaluation(soln_set.union(set([ind])), list_rules, df, Y, lambda_array)220 221 if func_val > (1.0 + epsilon/(n*n)) * S_func_val:222 soln_set.add(ind)223 print("Adding rule "+str(ind))224 S_func_val = func_val225 restart_step2 = True226 break227 228 if restart_step2:229 restart_step2 = False230 continue231 232 for ind in soln_set:233 func_val = func_evaluation(soln_set - set([ind]), list_rules, df, Y, lambda_array)234 if func_val > (1.0 + epsilon/(n*n)) * S_func_val:235 soln_set.remove(ind)236 print("Removing rule "+str(ind))237 S_func_val = func_val238 restart_step2 = True239 break240 241 if restart_step2:242 restart_step2 = False243 continue244 s1 = func_evaluation(soln_set, list_rules, df, Y, lambda_array)245 s2 = func_evaluation(set(range(len(list_rules))) - soln_set, list_rules, df, Y, lambda_array)246 print('s1', s1)247 print('s2', s2)248 if s1 >= s2:249 return soln_set, s1250 else: ...

Full Screen

Full Screen

rules_list.py

Source:rules_list.py Github

copy

Full Screen

1from list_keywords import LEVELS2from definition import *3import re4# After the level 45# so what is not in quotes is code,6# and any keyword in the code can be colored independently7# of what is around it, so we use a general function8# This general function uses LEVELS9def rule_all(level):10 # get keyword by level11 data_level = LEVELS[level]12 # initialize with extra rules13 list_rules = data_level["extra_rules"]14 # Rule for comments :15 list_rules.append( { 'regex': '#.*$', 'token': 'comment', 'next': 'start' } )16 ## Rule for quoted string :17 # complete18 list_rules.append( { 'regex': '\"[^\"]*\"', 'token': 'constant.character', 'next': 'start' } )19 list_rules.append( { 'regex': "\'[^\']*\'", 'token': 'constant.character', 'next': 'start' } )20 # incomplete21 list_rules.append( { 'regex': '\"[^\"]*$', 'token': 'constant.character', 'next': 'start' } )22 list_rules.append( { 'regex': "\'[^\']*$", 'token': 'constant.character', 'next': 'start' } )23 # Rule for blanks marks :24 list_rules.append( { 'regex': '_\\?_', 'token': 'invalid', 'next': 'start' })25 list_rules.append( { 'regex': '(^| )(_)(?= |$)', 'token': ['text','invalid'], 'next': 'start' } )26 # Rules for numbers27 if (data_level["number"]) :28 if (data_level["number_with_decimal"]) :29 number_regex = '(' + DIGIT + '*\\.?' + DIGIT + '+)'30 else:31 number_regex = '(' + DIGIT + '+)'32 list_rules.append({'regex': START_WORD + number_regex + END_WORD, 'token': ['text','variable'], 'next':'start'} )33 # Special case of an number directly followed by a number 34 for command in data_level["space_before"]: 35 list_rules.append({36 'regex': START_WORD + get_translated_keyword(command) + number_regex + END_WORD,37 'token': ['text','keyword','variable'],38 'next': 'start',39 })40 for command in data_level["no_space"]:41 list_rules.append({42 'regex': get_translated_keyword(command) + number_regex + END_WORD,43 'token': ['keyword','variable'],44 'next': 'start',45 })46 # Rules for commands of space_before_and_after 47 # These are the keywords that must be "alone" so neither preceded nor followed directly by a word 48 for command in data_level["space_before_and_after"]:49 list_rules.append({50 'regex': START_WORD + get_translated_keyword(command) + END_WORD,51 'token': ["text","keyword"],52 'next': "start", 53 })54 55 # Rules for commands of no_space 56 # These are the keywords that are independent of the context (formerly the symbols57 # In particular, even if they are between 2 words, the syntax highlighting will select them58 for command in data_level["no_space"]:59 list_rules.append({60 'regex': get_translated_keyword(command),61 'token': ["keyword"],62 'next': "start", 63 })64 # Rules for commands of space_before 65 # This category of keywords allows you to have keywords that are not preced66 # by another word, but that can be followed immediately by another word. (see the PR #2413)*/67 for command in data_level["space_before"]:68 list_rules.append({69 'regex': START_WORD + get_translated_keyword(command),70 'token': ["text","keyword"],71 'next': "start", 72 })73 # Rules for commands of space_after 74 # This category of keywords allows you to have keywords that can be preceded immediate75 # by another word, but that are not followed by another word.*/76 for command in data_level["space_after"]:77 list_rules.append({78 'regex': get_translated_keyword(command) + END_WORD,79 'token': ["keyword"],80 'next': "start", 81 })82 # Rules for constants (colors, directions)83 for command in data_level['constant']:84 list_rules.append({85 'regex': START_WORD + get_translated_keyword(command) + END_WORD,86 'token': ["text",TOKEN_CONSTANT],87 'next': "start", 88 })...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run localstack automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful