Best Python code snippet using fMBT_python
decisionTree.py
Source:decisionTree.py  
1from __future__ import division2from pacman import Directions3from game import Agent4import api5import random6import game7import util8import sys9import os10import csv11import math12import numpy as np13from uuid import uuid414import copy15# My classifier is a decision tree with post-pruning to reducing over-fitting.16# The decision tree is built recursively, selecting the attribute that minimises average gini score.17# The data is split 80/20, with 80% used to build the tree, and the remaining data is held back to implement post-pruning.18# Post-pruning is implemented by iteratively pruning nodes at the bottom of the tree whenever pruning improves accuracy on the 20%.19# No external machine learning libraries (e.g. SKLearn) have been used.20# My code includes tree learning, tree pruning, prediction and train/test splitting21def getMinValueOfDictionary(d):22    # Utility function: given a dictionary it will return a list of the keys associated with the min value.23    minValue = min(d.values())24    keys = [k for k, v in d.items() if v == minValue]25    return keys26def modalValue(array):27    # Utility function: returns the most common item in a list28    return max(set(array), key=list(array).count)29def gini(c):30    # The gini measure for identifying the best attribute to split on31    # The function takes a 1D numpy array and returns a float which is the gini measure for that array32    # Count the frequency of each class and store a total so we can calculate the probabilities33    y_counts = np.unique(c, return_counts=True)[1]34    size = np.sum(y_counts)35    # Calculate the probabilities, and store a list of the sum of the squares of them36    probabilities = [value / size for value in y_counts]37    H = [p ** 2 for p in probabilities]38    # Return 1 - sum(probabilities)39    return 1 - np.sum(H)40def splitBranch(combineddata, attribute):41    # Takes a combined numpy array and returns two, split on the passed attribute (always assumed to only contain 0s or 1s).42    # First we filter the data, splitting on whether the attribute is 0 or 143    split1 = combineddata[combineddata[:, attribute] == 0]44    split2 = combineddata[combineddata[:, attribute] == 1]45    # Then we remove the attribute we used as we don't use it again46    split1 = np.delete(split1, attribute, axis=1)47    split2 = np.delete(split2, attribute, axis=1)48    # Return a tuple with the two new datasets49    return (split1, split2)50def expectedGini(combineddata, attribute):51    # This function calculates the expected gini index across the two resultant datasets for a branch.52    # It assumes both the data and the target are labelled 0/1.53    # It will not work for non-binary attributes54    class1, class2 = splitBranch(combineddata, attribute)55    class1_size = len(class1) / (len(class1) + len(class2))56    class2_size = len(class2) / (len(class1) + len(class2))57    return class1_size * gini(class1[:, -1]) + class2_size * gini(class2[:, -1])58def bestAttribute(dataset):59    # This method takes a 2D numpy array and returns the attribute on which a binary split will result in the lowest average gini index60    columns = range(len(dataset[0]))  # Simply creates a list of attribute IDs of the required length61    # Create a dictionary, keyed by the attributes, with the entropy/gini for each62    expectedGinis = {columns[t]: expectedGini(dataset, t) for t in range(len(columns) - 1)}63    # There may be more than one 'best attribute' where they have the same expected gini value, in which case we select one of the highest at random64    bestattribute = random.choice(getMinValueOfDictionary(expectedGinis))65    return bestattribute66def shouldTerminate(dataset):67    # This method supports the control of the iteration that builds the decision tree. It makes this decision based on the dataset passed to it.68    # We terminate if either all the samples have the same class (case 1) OR if we have run out of examples (case 2) OR we have run out of attributes (case 3)69    # Split the combined dataset in to the X's and the labels70    classes = dataset[:, -1]71    attributes = dataset[:, 0:-1]72    # Case 173    doITerminate = all(classes[0] == thisClass for thisClass in classes)74    if doITerminate: return True75    # Case 276    if len(attributes) == 0:77        return True78    # Case 379    else:80        doITerminate = all(all(attributes[0] == att) for att in attributes)81        return doITerminate82def plurality(classes):83    # This method takes a list of classes, and returns the most common one. It is used for classification at leaf nodes.84    return max(set(classes), key=list(classes).count)85def classification(dataset, parentdataset):86    # If the classes for the data at the leaf node are all the same, then we return that class87    if len(np.unique(dataset[:, -1])) == 1:88        return dataset[0, -1]89    # If we have more than one class in the dataset, we take the most common result in the dataset (plurality)90    elif len(np.unique(dataset[:, -1])) > 1:91        return plurality(dataset[:, -1])92    # If we have nothing in the dataset (no examples), then we take plurality from the parent93    else:94        return plurality(parentdataset[:, -1])95class Node:96    # The objects in my tree are nodes which can be either 'branchingNode' or 'leafNode.'  This class is a parent class for both which contains pruning logic.97    # Nodes are referenced in the tree by a uuid which is generated by the uuid library on demand.98    def pruneTreeAt(self, uuid):99        # This method will return a new tree, but pruned at the node supplied. If passed a node ID that doesn't exist, it will simply return the existing tree.100        if self.id == uuid:101            return LeafNode(self.dataset, id=self.id)102        elif self.isLeaf:103            return self104        else:105            return BranchingNode(self.attributeToSplitOn, self.leftTree.pruneTreeAt(uuid),106                                 self.rightTree.pruneTreeAt(uuid), dataset=self.dataset, id=self.id)107class BranchingNode(Node):108    # Branching nodes are those in the tree that are not leaves (i.e. have children).109    def __init__(self, attributeToSplitOn, leftTree, rightTree, dataset=None, isRoot=False, id=None):110        self.isRoot = isRoot # This is true only for the first node created111        self.isLeaf = False112        self.id = str(uuid4()) if id is None else id # We give uuids to every node, so we can reference them later during pruning. If a node id is passed in to the constructor, e.g. after pruning it will preserve the old ID.113        self.dataset = dataset # Store the dataset used to branch on this node. It only gets used in the case of pruning, when the branching node gets converted in to a leaf node.114        # Record which attribute was selected at this point and allocate the two subtrees115        self.attributeToSplitOn = attributeToSplitOn # Store the attribute that was used to branch on this node116        # Store the child nodes of this node117        self.leftTree = leftTree118        self.rightTree = rightTree119    def predict(self, x):120        # Once the tree exists, predict will recursively call itself, traversing the tree until it finds a leaf node, which will return a prediction121        # It takes parameter 'x' which is a feature vector of data122        value = x[self.attributeToSplitOn]  # Get the value of the feature vector at the relevant attribute123        x = np.delete(x, self.attributeToSplitOn)  # Because we delete attributes as we built the tree, we must also delete them from the feature vector to ensure alignment124        # The branching statement below traverses left or right down the tree depending on 'value'125        if value == 0:126            return self.leftTree.predict(x)127        elif value == 1:128            return self.rightTree.predict(x)129    def isPrunable(self):130    # Prunable nodes are those with two leaf children. This method is used by getPrunableNodes()131        return self.leftTree.isLeaf and self.rightTree.isLeaf132class LeafNode(Node):133    # Leaf nodes are those nodes at the bottom of the tree with no children.134    def __init__(self, dataset, parentDataset=None, id=None):135        self.isLeaf = True136        self.id = str(uuid4()) if id is None else id # Similar to branching nodes, we assign uuids to each node.137        # We store the predicted class associated with the classification method. We must pass both the dataset, and the parent's dataset. The latter is only used if the former is empty.138        self.predictedClass = classification(dataset, parentDataset)139    # Predict will be called by the branching node above the leaf node. We can simply return the value we stored in the attribute when creating the tree.140    def predict(self, x):141        # This method returns the predicted class that the constructor stored in the leaf node142        return self.predictedClass143    def isPrunable(self):144        # Leaf nodes are never prunable145        return False146# The decision tree is build recursively. The method checks for the terminating conditions, and otherwise recursively calls itself to split the data using the best attribute.147def generateTree(dataset, parentDataset=None, isRoot=False):148    # First we check for termination conditions149    if (shouldTerminate(dataset)):150        return LeafNode(dataset, parentDataset=parentDataset)151    # Otherwise we choose the attribute to split on, split the dataset and recursively call the function on the two childen.152    else:153        attributeToSplitOn = bestAttribute(dataset)154        splitDataset = splitBranch(dataset, attributeToSplitOn)155        # As well as the two new datasets, we pass the current dataset incase they need to classify using plurality on their parent156        return BranchingNode(attributeToSplitOn, generateTree(splitDataset[0], dataset),157                             generateTree(splitDataset[1], dataset), dataset=dataset, isRoot=isRoot)158class Tree:159    # This class is a wrapper for the node objects, with some additional methods that get called on the entire tree160    # We use this method if we are rebuilding an existing tree (e.g. after pruning)161    def fromNode(self, node):162        self.tree = node163    # We use this method to preprocess the data and then generate the tree.164    def fit(self, data, target):165        # Convert the data to numpy arrays if it isn't already166        data = np.array(data)167        self.target = np.array(target)168        # This will drop any columns for which all values are the same. These don't contain any information.169        # This means the classifier can be safely extended to e.g. random forests where we don't select from all attributes at every node.170        # If we had multiple completely sparse attributes, then random forests fails when all the attributes it considers at a node are sparse.171        self.data = data[:, np.invert(np.all(data == data[0, :], axis=0))]172        self.droppedAttributes = np.arange(data.shape[1])[np.all(data == data[0, :], axis=0)]173        # We combine the two arrays in to one big one, call the generateTree method and store the result174        combineddata = np.concatenate((self.data, self.target[:, None]), axis=1)175        self.tree = generateTree(combineddata, isRoot=True)176    def predict(self, x):177        # This method passes through to the top node of the tree178        return self.tree.predict(x)179    def test(self, X_test, y_test):180        # This method returns the accuracy of the tree on a new dataset. This is required for pruning.181        predictions = [self.predict(x) for x in X_test] # Create predictions for all values in the dataset182        predictions_vs_actuals = np.unique((y_test - predictions) == 0, return_counts=True) # Count the number of correct predictions and store in a dictionary183        predictions_dict = dict(zip(predictions_vs_actuals[0], predictions_vs_actuals[1]))184        total = np.sum(predictions_vs_actuals)185        # Next line manages an edge case where no predictions were correct. Without this the following calculation would have a key error.186        if True not in predictions_dict.keys(): predictions_dict[True] = 0187        # Return the accuracy score188        accuracy = predictions_dict[True] / total189        return accuracy190def getPrunableNodes(node):191    # This function is used for pruning. Given a tree node, it returns all the nodes that could be pruned below it.192    prunableNodes = [] # Create an empty list to store the results of the inner function193    # This inner function is used because we search the tree recursively. If we returned within the outer function, it would stop searching too early.194    def getPrunableNodes_inner(node):195        if node.isPrunable():196            prunableNodes.append(node.id)197        if not node.isLeaf:198            getPrunableNodes_inner(node.leftTree)199            getPrunableNodes_inner(node.rightTree)200    getPrunableNodes_inner(node)201    return prunableNodes202def pruneTree(tree, X_test, y_test):203    # This function implements post-pruning on a tree to reduce overfitting. It takes a tree, and some new data which we held back.204    # It starts at the bottom of the tree, checking 'prunable' nodes. It will compare accuracy before and after pruning a node, and if pruning increases accuracy it will retain it.205    # The function returns a newly pruned tree.206    # We create an initial baseline for accuracy207    treeAccuracy = tree.test(X_test, y_test)208    # This control variable manages the while loop209    continueIterating = True210    while continueIterating:211        continueIterating = False212        # Fetch all prunable nodes on the current tree213        prunableNodes = getPrunableNodes(tree.tree)214        # Iterate through the prunable nodes215        for prunableNode in prunableNodes:216            # Create a new tree with the node pruned217            prunedTree = Tree()218            prunedTree.fromNode(tree.tree.pruneTreeAt(prunableNode))219            prunedTreeAccuracy = prunedTree.test(X_test, y_test)220            # Compare accuracy to the baseline. If 'as good' or better, then we adopt the new tree221            if prunedTreeAccuracy >= treeAccuracy:222                tree = copy.deepcopy(prunedTree)223                continueIterating = True  # Because we changed the tree, there may be new prunable nodes to check.224    return tree225def train_test_split(data, target, test_proportion):226    # This function splits the data in to two, according to the proportions passed in the parameter227    # If necessary, convert the data to numpy objects228    data = np.array(data)229    target = np.array(target)230    # We create a list of random integers which will be the integers of the rows we select in to each group.231    shuffledInts = np.random.permutation(len(data))232    # We chop the list of shuffled integers in the proportions passed in the parameter233    testIndices = shuffledInts[:int(test_proportion * len(data))]234    trainIndices = shuffledInts[int(test_proportion * len(data)):]235    # We return four numpy arrays as a tuple236    return (data[trainIndices], data[testIndices], target[trainIndices], target[testIndices])237# An agent that runs a classifier to decide what to do.238class ClassifierAgent(Agent):239    # Constructor. This gets run when the agent starts up.240    def __init__(self):241        print "Initialising"242    # Take a string of digits and convert to an array of243    # numbers. Exploits the fact that we know the digits are in the244    # range 0-4.245    #246    # There are undoubtedly more elegant and general ways to do this,247    # exploiting ASCII codes. This function and the code that reads the file in to data and target is not mine. Line 383 onwards is.248    def convertToArray(self, numberString):249        numberArray = []250        for i in range(len(numberString) - 1):251            if numberString[i] == '0':252                numberArray.append(0)253            elif numberString[i] == '1':254                numberArray.append(1)255            elif numberString[i] == '2':256                numberArray.append(2)257            elif numberString[i] == '3':258                numberArray.append(3)259            elif numberString[i] == '4':260                numberArray.append(4)261        return numberArray262                263    # This gets run on startup. Has access to state information.264    #265    # Here we use it to load the training data.266    def registerInitialState(self, state):267        # open datafile, extract content into an array, and close.268        self.datafile = open('good-moves.txt', 'r')269        content = self.datafile.readlines()270        self.datafile.close()271        # Now extract data, which is in the form of strings, into an272        # array of numbers, and separate into matched data and target273        # variables.274        self.data = []275        self.target = []276        # Turn content into nested lists277        for i in range(len(content)):278            lineAsArray = self.convertToArray(content[i])279            dataline = []280            for j in range(len(lineAsArray) - 1):281                dataline.append(lineAsArray[j])282            self.data.append(dataline)283            targetIndex = len(lineAsArray) - 1284            self.target.append(lineAsArray[targetIndex])285        # I use my train_test_split function to split the data and target in to a training dataset, and hold back 20% which is then used for pruning286        X_train, X_test, y_train, y_test = train_test_split(self.data, self.target, 0.2)287        # I create a tree object and fit using the training data288        dtree = Tree()289        dtree.fit(X_train, y_train)290        # Prune the tree using the held back data and store the result.291        self.prunedTree = pruneTree(dtree, X_test, y_test)292        293    # Tidy up when Pacman dies (not my code)294    def final(self, state):295        print "I'm done!"296    # Turn the numbers from the feature set into actions (not my code):297    def convertNumberToMove(self, number):298        if number == 0:299            return Directions.NORTH300        elif number == 1:301            return Directions.EAST302        elif number == 2:303            return Directions.SOUTH304        elif number == 3:305            return Directions.WEST306    # Here we just run the classifier to decide what to do307    def getAction(self, state):308        # Get the current feature vector and identify the legal moves309        features = api.getFeatureVector(state)310        legal = api.legalActions(state)311        # We use the predict method of our tree to predict the best move.312        move = self.prunedTree.predict(features)313        move = self.convertNumberToMove(move)314        print move...translate.py
Source:translate.py  
1#!/usr/bin/env python32# -------------------------------------------------------------------------------3#  PROJECT: FPGA Brainfuck4# -------------------------------------------------------------------------------5#  AUTHORS: Pavel Benacek <pavel.benacek@gmail.com>6#  LICENSE: The MIT License (MIT), please read LICENSE file7#  WEBSITE: https://github.com/benycze/fpga-brainfuck/8# -------------------------------------------------------------------------------9import pdb10import readline11from lib.isa import BIsa12from lib.template import *13class BTranslationError(Exception):14    """15    Error during the translation was detected16    """17    def __init__(self,message,line,column):18        self.line = line19        self.column = column20        self.message = "Error {}:{} - {}".format(line,column,message)21        super().__init__(self.message)22class BTranslate(object):23    """24    Class for handling of translation from the Brainfuck 25    code to the BCPU code.26    """27    def __init__(self,in_file,debug,memory_map,addr_width,outfile):28        """29        Initilization of the class which takes care of the 30        translation to the BCPU.31        Parameters:32            - in_file - input file to translate (string)33            - debug - debug is enabled (bool)34            - memory_map - generate the output memory map (bool). 35                The output file will have the ${outfile}.mif36            -hex addr - required hexadecimal address width37            - Outfile - output file name (string)38        """39        self.in_file    = in_file40        self.debug      = debug41        self.memory_map = memory_map42        self.outfile    = outfile43        self.memory_map_name   = outfile + ".mif"44        self.memory_hmap_name  = outfile + ".hex"45        self.memory_addr_width = addr_width46        # Helping variables - source code parsing47        self.line_buf   = ''48        self.line_cnt   = 049        self.char_cnt   = 050        self.last_sym   = ''51        # Helping variables - memroy files52        self.mem_pos    = 053    def __get_char(self):54        """55        Return a char from the input. The method56        raises eof of the file if we are done.57        """58        # Read line if the buffer is empty59        if self.line_buf == '':60            self.line_cnt = self.line_cnt + 161            self.line_buf = self.inf.readline()62            self.char_cnt = 063        if len(self.line_buf) == 0:64            # Nothing else to read 65            return ''66        67        # Extract one character68        self.char_cnt = self.char_cnt + 169        char = self.line_buf[0]70        self.line_buf = self.line_buf[1:]71        return char72    def __process_comment(self):73        """74        Process comment - read the comment from the input untill the newline is detected75        """76        comment = ""77        while True:78            ## Read the input untill the new line is detected79            char = self.__get_char()80            if (char is '\n') or (char is ''):81                break82            comment = comment + char83        # End of the while84        if(self.debug):85            print("Lexer: Parsed comment => {}".format(comment))86    def __get_symbol(self):87        """88        Get the symbol from the input, skip comments89        Comments are beginning with // and are on one line90        The symbol is stored inside the variable self.last_sym91        """92        while True:93            # Chek if we are not done, skip white spaces94            char = self.__get_char()95            if char == '':96                # Nothing else to read97                self.last_sym = ''98                break99            if char.isspace():100                continue         101            # Check if we are working with the comment102            if char is "/":103                char = self.__get_char()104                if not(char is "/"):105                    BTranslationError("Expecting / symbol",self.line_cnt,self.char_cnt)106                # Process the commend and run the parsing again after you are done107                self.__process_comment()108                continue109            # Check if we are working with allowed110            # symbol.111            if not(BIsa.contains(char)):112                raise BTranslationError("Uknown symbol was detected",self.line_cnt,self.char_cnt)          113            # Yahoo ... we can return the symbol which is possible to translate114            if self.debug:115                print("Lexer: Parser symbol => {}".format(char))116            # Remember the symbol and escape from the function117            self.last_sym = char118            break119    def __translate_body(self):120        """121        Translate the body of the BCPU program122        Returns: The translated code (human readable form - tuple (instruction,address)123        """124        # The body consits of non-jump instructions - if the instruction [ or ] is detected125        # the following will happen:126        # 1) The [ is translated as the pass to the __translate_cycle method127        # 2) The ] is translated as the return from the method which means that the cycle128        #    is being processed inside the __translate_cycle129        #130        inst_body = []        131        # Get the symbol and analyze iff we are working with the JUMP instruction132        while True:133            self.__get_symbol()134            # Check if we have something to process135            if self.last_sym is '':136                if self.debug:137                    print("No other symbol to process, ending.")138                # No other instruction, return nop139                nop_inst = ((";",0),self.mem_pos)140                self.mem_pos = self.mem_pos + BIsa.INST_WIDTH141                inst_body.append(nop_inst)142                break143            # Check if we are working with any jump symbol, dump the body into the list144            # and escape from the cycle145            if BIsa.is_bjump(self.last_sym):146                cycle_body = self.__translate_cycle()147                inst_body.extend(cycle_body)148                continue149            if BIsa.is_ejump(self.last_sym):150                # We have a closing parenthesis inside the body, end the processing there and return151                # to the upper __translate cycle152                break153            # Check if we are working with a body instruction, we will return 154            # the error if not155            if not(BIsa.is_body_instruction(self.last_sym)):156                raise BTranslationError("Unknown symbol {}.".format(self.last_sym), self.line_cnt, self.char_cnt)157            # So far so good, add it into the list and try next symbol158            inst = ((self.last_sym,0), self.mem_pos)159            if self.debug:160                print("Dumping the instruction: {}".format(str(inst)))161            inst_body.append(inst)162            self.mem_pos = self.mem_pos + BIsa.INST_WIDTH163        # We are out ... time to dump our code164        return inst_body165    def __add_cycle_padding(self):166        """167        Add the jump padding - two no-ops168        """169        ret = []170        ret.append((("&",0), self.mem_pos))171        self.mem_pos = self.mem_pos + BIsa.INST_WIDTH172        ret.append(((";",0), self.mem_pos))173        self.mem_pos = self.mem_pos + BIsa.INST_WIDTH174        return ret175    def __translate_cycle(self):176        """177        Translate the BCPU cycle construction178        Returns: The translated code (human readable form) - tuple ((jump_instr,val),address)179        """180        inst_body = []181        # The translate cycle should detect the opening symbol [ and 182        # closing symbol ]183        # Fine ... check if we have an opening symbol184        if not(self.last_sym is '['):185            raise BTranslationError("Cycle opening [ not found, detected {}.".format(self.last_sym), self.line_cnt, self.char_cnt)186        # Remember the first address, translate the body, remember the return address and construct187        # the jump instruction.188        # Each jump needs to be predecessed by the preload operation (to store data in the execution stage) and 189        # one NOP instruction to have a fresh data in stage 2 (jump analysis)190        f_pad_jmpend = self.__add_cycle_padding()191        bAddress = self.mem_pos192        self.mem_pos = self.mem_pos + BIsa.INST_WIDTH193        body_code = self.__translate_body()194        f_pad_jmpbegin = self.__add_cycle_padding()195        eAddress = self.mem_pos196        self.mem_pos = self.mem_pos + BIsa.INST_WIDTH197        # Check if we have a closing symbol198        if not(self.last_sym is ']'):199            raise BTranslationError("Cycle closing ] not found, detected {}.".format(self.last_sym), self.line_cnt, self.char_cnt) 200        # We are done ... everything is fine. Time to dump our functionality201            # Front jump -- we need to jump to the next address behind the ]202            # Back jump -- we need to jump the address which is relatively from the ], following the ]203        fJumpOffset = eAddress - bAddress + BIsa.INST_WIDTH204        bJumpOffset = eAddress - bAddress - BIsa.INST_WIDTH205        if self.debug:206            print("bAddress = 0x{:x}".format(bAddress))207            print("eAdress = 0x{:x}".format(eAddress))208            print("fJump \"[\" value is 0x{:x}".format(fJumpOffset))209            print("bJump \"]\" value is 0x{:x}".format(bJumpOffset))210        # Check that offsets are no longer than 255 bytes211        max_jmp = 2 ** 12 - 1212        if fJumpOffset > max_jmp  or bJumpOffset > max_jmp:213            raise BTranslationError("Jump is longer than {} B.".format(max_jmp), self.line_cnt, self.char_cnt)214        # Generate the [215        inst_body.extend(f_pad_jmpend)216        fJump = (("[",fJumpOffset), bAddress)217        inst_body.append(fJump)218        # Append body to the list219        inst_body.extend(body_code)220        # Generate the ] and return the body221        inst_body.extend(f_pad_jmpbegin)222        bJump = (("]",bJumpOffset), eAddress)223        inst_body.append(bJump)224        return inst_body225    def __memory_map_to_bin(self, mem_map):226        """227        Covert the memroy map to a binary form.228        Return: Byte form of the file uploaded to the BCPU229        """230        if(self.debug):231            print("Dumping the memory map to its binary form")232        ret = bytearray()233        for m_elem,_ in mem_map:234            # Check if we are working with symbol or 235            # jump instruction. Each instruction is encoded as (inst, addr),236            # where inst can be a symbol or jump tuple (jmp,val)237            bF = None238            if BIsa.is_jump_instruction(m_elem[0]):239                bF = BIsa.translate_jump(m_elem[0],m_elem[1])240            else:241                bF = BIsa.translate_inst(m_elem[0])242            243            # Append the result of the conversion244            ret.extend(bF)245        return ret246    def __dump_inst(self,m_elem):247        """248        Translate one memory element whichis a tuple of instruction249        and data (inst, data)250        Return: Translated instruction251        """252        # Prepare data - m_elem is not a tuple iff we are not working253        # with a jump instruction254        bData   = 0255        sym     = m_elem[0]256        if BIsa.is_jump_instruction(sym):257            # It is a jump instruction258            bData = BIsa.translate_jump(m_elem[0],m_elem[1])259        else:260            # It is an instruction261            bData = BIsa.translate_inst(sym)262        return bData263    def __dump_mem_map(self,mem_map):264        """265        Store the memory map into the file. The format of the 266        file is MIF (https://www.intel.com/content/www/us/en/programmable/quartushelp/13.0/mergedProjects/reference/glossary/def_mif.htm)267        All tempaltes are defined in the template.py file.268        """269        ret = mif_hdr_template.format(self.outfile, len(mem_map) * BIsa.INST_WIDTH, 8)270        # Length of dumped data is the number of programm instructions times the instruction271        # size272        # Dump the memory layout273        for m_elem,addr in mem_map:274            bData = self.__dump_inst(m_elem)275            # Each line starts with a comment, after that we need to dump 276            # address : data277            i_arg = BIsa.get_instruction_argument(bData) # Try to decode it back278            ret += "-- Translated instruction ==> {} (parameter = 0x{} )\n".format(m_elem[0],i_arg)279            ret += mif_line_template.format(addr,bData[0])280            ret += mif_line_template.format(addr+1,bData[1])281        # End the file 282        ret += mif_end_template283        return ret284    def __dump_mem_hmap(self,mem_map):285        """286        Store the memory map in the hexadecimal format (one line per 2 bytes).287        The total number of bytes is 2**self.memory_addr_width288        """289        # Process the memory, dump it to the file290        ret = ""291        cell_addrs = 2**self.memory_addr_width292        for m_elem,_ in mem_map:293            bData = self.__dump_inst(m_elem)294            ret   += hex_line_template.format(*bData)295            cell_addrs = cell_addrs - 2296        # Fill the rest of the file with zeros297        while(cell_addrs > 0):298            ret += hex_line_template.format(0,0)299            cell_addrs = cell_addrs - 2300        return ret301    def translate(self):302        """303        Run the translation of the source code304        """305        try:306            # Open the file and process the input body307            # 308            # The program is firstly parsed and constructed to the tree 309            # where the program body is stored inside the list. After we process the whole310            # program we dump the body of the program as the as the last step of each function311            # because what we need is to resolve jump vaues (which are known after the translation.312            #313            # That is the plan - let's rock!!314            self.inf = open(self.in_file,'r')315            # Get the memory map and covert it to the binary form316            bprogram = self.__translate_body()317            # Add the program termination symbol318            iTerminate = (("x",0), self.mem_pos)319            bprogram.append(iTerminate)320            # Write the memory map if it is required321            if self.memory_map:322                print("Dumping the memory map to file {}".format(self.memory_map_name))323                mem_map_content = self.__dump_mem_map(bprogram)324                mem_map_file = open(self.memory_map_name,'w')325                mem_map_file.write(mem_map_content)326                mem_map_file.close()327                print("Dumping the memory map to file {}".format(self.memory_hmap_name))328                mem_hmap_content = self.__dump_mem_hmap(bprogram)329                mem_hmap_content_file = open(self.memory_hmap_name,'w')330                mem_hmap_content_file.write(mem_hmap_content)331                mem_hmap_content_file.close()332            # Convert the memory map (human readable to the binary form)333            bin_form = self.__memory_map_to_bin(bprogram)334            out_file = open(self.outfile,'wb')335            out_file.write(bin_form)336            out_file.close()337            if self.debug:338                print("Dumping the binary code into the file {}.".format(self.outfile))339        except IOError as e:340            print("Error during the file reading/writing operation.")341        except BTranslationError as e:342            print(str(e))343        finally:344             # Close the file345            self.inf.close()...fmbt_i.py
Source:fmbt_i.py  
...89    assert _debug(last_log_line) == '</fmbt_log>', "Log unfinished (%s)" % (last_log_line,)90    assert _debug(fmbt.isalive()) == False, "Process still alive"91    os.remove(FMBT_LOGFILE)92    return True93def iTerminate():94    os.kill(fmbt.pid, signal.SIGTERM)95    process_response_time()96    return fmbt.isalive() == False97def _validateHelp(help_lines):98    _debug(help_lines)99    assert help_lines[0].startswith('Execute actions:'), "'Execute actions' expected"100    assert help_lines[-2].startswith('Unknown command'), "'Unknown command' expected"101    assert help_lines[-1] == PROMPT, "Prompt missing"102    return True103def iHelpEmptyCommand():104    output = _run_command('')105    return _validateHelp(output)106def iHelpUnknownCommand():107    output = _run_command('?')...Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!
