How to use transition_counts method in hypothesis

Best Python code snippet using hypothesis

functions.py

Source:functions.py Github

copy

Full Screen

1#!/usr/bin/python2import numpy as np3import pdb4def initialize_mdp_data(num_states):5 """6 Return a variable that contains all the parameters/state you need for your MDP.7 Assume that no transitions or rewards have been observed.8 Initialize the value function array to small random values (0 to 0.10, say).9 Initialize the transition probabilities uniformly (ie, probability of10 transitioning for state x to state y using action a is exactly11 1/num_states).12 Initialize all state rewards and counts to zero.13 Args:14 num_states: The number of states. This value is constant.15 Returns: The initial MDP parameters. It should be a Python dict with the16 following key/value structure. You may add more key/value pairs to this17 to make things simpler, but the autograders will only consider the18 following:19 {20 'transition_probs': np.ndarray, dtype=np.float64,21 shape=(num_states, num_actions, num_states). The MDP transition22 probability for each transition.23 'transition_counts': np.ndarray, dtype=np.float64,24 shape=(num_states, num_actions, num_states). The count of the number25 of times each transition was taken (Used for tracking transitions to26 later calculate avg_reward and transition_probs)27 'avg_reward': np.ndarray, dtype=np.float64, shape=(num_states,). The28 average reward for entering each MDP state.29 'sum_reward': np.ndarray, dtype=np.float64, shape=(num_states,). The30 summed reward earned when entering each MDP state (used to track rewards for later calculating avg_reward).31 'value': np.ndarray, dtype=np.float64, shape=(num_states,). The32 state-value calculated for each MDP state (after value/policy33 iteration).34 'num_states': Int. Convenience value. This will not change throughout35 the MDP and can be calculated from the shapes of other variables.36 }37 """38 num_actions = 2 # RIGHT AND LEFT ACTIONS39 mdp_data = {40 'transition_probs': None,41 'transition_counts': None,42 'avg_reward': None,43 'sum_reward': None,44 'value': None,45 'num_states': None46 }47 mdp_data['transition_probs'] = (1.0/num_states)*np.ones((num_states, num_actions, num_states))48 mdp_data['transition_counts'] = np.zeros((num_states, num_actions, num_states))49 mdp_data['avg_reward'] = np.zeros(num_states)50 mdp_data['sum_reward'] = np.zeros(num_states)51 mdp_data['value'] = np.random.uniform(low=0.0, high=0.1, size=(num_states,))52 mdp_data['num_states'] = num_states53 return mdp_data54def choose_action(state, mdp_data):55 """56 Choose the next action (0 or 1) that is optimal according to your current57 mdp_data. When there is no optimal action, return a random action.58 Args:59 state: The current state in the MDP60 mdp_data: The parameters for your MDP. See initialize_mdp_data.61 Returns:62 int, 0 or 1. The index of the optimal action according to your current MDP.63 """64 right = mdp_data['transition_probs'][state, 0, :].dot(mdp_data['value'])65 left = mdp_data['transition_probs'][state, 1, :].dot(mdp_data['value'])66 action = 167 if right > left:68 action = 069 return action70def update_mdp_transition_counts_sum_reward(mdp_data, state, action, new_state, reward):71 """72 Update the transition count and reward sum information in your mdp_data. 73 Do not change the other MDP parameters (those get changed later).74 Record the number of times `state, action, new_state` occurs.75 Record the rewards for every `new_state`.76 Args:77 mdp_data: The parameters of your MDP. See initialize_mdp_data.78 state: The state that was observed at the start.79 action: The action you performed.80 new_state: The state after your action.81 reward: The reward after your action (i.e. reward corresponding to new_state).82 Returns:83 Nothing84 """85 mdp_data['transition_counts'][state, action, new_state] = mdp_data['transition_counts'][state, action, new_state] + 186 mdp_data['sum_reward'][new_state] = mdp_data['sum_reward'][new_state] + reward87 return88def update_mdp_transition_probs_avg_reward(mdp_data):89 """90 Update the estimated transition probabilities and average reward values in your MDP.91 Make sure you account for the case when a state-action pair has never92 been tried before, or the state has never been visited before. In that93 case, you must not change that component (and thus keep it at the94 initialized uniform distribution).95 96 Args:97 mdp_data: The data for your MDP. See initialize_mdp_data.98 Returns:99 Nothing100 """101 for i in range(len(mdp_data['transition_counts'])):102 for j in range(len(mdp_data['transition_counts'][i])):103 suma = mdp_data['transition_counts'][i, j].sum()104 if suma > 0.0:105 for k in range(len(mdp_data['transition_counts'][i][j])):106 mdp_data['transition_probs'][i, j, k] = mdp_data['transition_counts'][i, j, k]/suma107 108 for i in range(len(mdp_data['avg_reward'])):109 if mdp_data['transition_counts'][i].sum() > 0.0:110 mdp_data['avg_reward'][i] = mdp_data['sum_reward'][i]/mdp_data['transition_counts'][:,:,i].sum() 111 return112def update_mdp_value(mdp_data, tolerance, gamma):113 """114 Update the estimated values in your MDP.115 Perform value iteration using the new estimated model for the MDP.116 The convergence criterion should be based on `TOLERANCE` as described117 at the top of the file.118 119 Args:120 mdp_data: The data for your MDP. See initialize_mdp_data.121 tolerance: The tolerance to use for the convergence criterion.122 gamma: Your discount factor.123 Returns:124 Nothing125 """126 current_diff = 9e10127 while current_diff > tolerance:128 right = mdp_data['transition_probs'][:, 0, :].dot(mdp_data['value'])129 left = mdp_data['transition_probs'][:, 1, :].dot(mdp_data['value'])130 #Bellman update131 value = mdp_data['avg_reward'] + gamma*np.maximum(right, left)132 current_diff = np.max(np.abs(value - mdp_data['value']))133 mdp_data['value'] = value ...

Full Screen

Full Screen

deciphering_utils.py

Source:deciphering_utils.py Github

copy

Full Screen

...39 i0 = i140 i += 141 42 return p43def compute_transition_counts(text, char_to_ix):44 """45 Computes transition counts for a given text, useful to compute if you want to compute 46 the probabilities again and again, using compute_log_probability_by_counts.47 48 Arguments:49 text: Text as a list of characters50 51 char_to_ix: character to index mapping52 53 Returns:54 transition_counts: transition_counts[i, j] gives number of times character j follows i55 """56 N = len(char_to_ix)57 transition_counts = np.zeros((N, N))58 c1 = text[0]59 i = 060 while i < len(text)-1:61 c2 = text[i+1]62 transition_counts[char_to_ix[c1],char_to_ix[c2]] += 163 c1 = c264 i += 165 66 return transition_counts67def compute_log_probability_by_counts(transition_counts, text, permutation_map, char_to_ix, frequency_statistics, transition_matrix):68 """69 Computes the log probability of a text under a given permutation map (switching the 70 charcter c from permutation_map[c]), given the transition counts and the text71 72 Arguments:73 74 transition_counts: a matrix such that transition_counts[i, j] gives the counts of times j follows i,75 see compute_transition_counts76 77 text: text to compute probability of, should be list of characters78 79 permutation_map[c]: gives the character to replace 'c' by80 81 char_to_ix: characters to index mapping82 83 frequency_statistics: frequency of character i is stored in frequency_statistics[i]84 85 transition_matrix: probability of j following i stored at [i, j] in this matrix86 87 Returns:88 89 p: log likelihood of the given text90 """91 c0 = char_to_ix[permutation_map[text[0]]]92 p = np.log(frequency_statistics[c0])93 94 p_map_indices = {}95 for c1, c2 in permutation_map.items():96 p_map_indices[char_to_ix[c1]] = char_to_ix[c2]97 98 indices = [value for (key, value) in sorted(p_map_indices.items())]99 100 p += np.sum(transition_counts*np.log(transition_matrix[indices,:][:, indices]))101 102 return p103def compute_difference(text_1, text_2):104 """105 Compute the number of times to text differ in character at same positions106 107 Arguments:108 109 text_1: first text list of characters110 text_2: second text, should have same length as text_1111 112 Returns113 cnt: number of times the texts differ in character at same positions114 """115 116 cnt = 0117 for x, y in zip(text_1, text_2):118 if y != x:119 cnt += 1120 121 return cnt122def get_state(text, transition_matrix, frequency_statistics, char_to_ix):123 """124 Generates a default state of given text statistics125 126 Arguments:127 pretty obvious128 129 Returns:130 state: A state that can be used along with,131 compute_probability_of_state, propose_a_move,132 and pretty_state for metropolis_hastings133 134 """135 transition_counts = compute_transition_counts(text, char_to_ix)136 p_map = generate_identity_p_map(char_to_ix.keys())137 138 state = {"text" : text, "transition_matrix" : transition_matrix, 139 "frequency_statistics" : frequency_statistics, "char_to_ix" : char_to_ix,140 "permutation_map" : p_map, "transition_counts" : transition_counts}141 142 return state143def compute_probability_of_state(state):144 """145 Computes the probability of given state using compute_log_probability_by_counts146 """147 148 p = compute_log_probability_by_counts(state["transition_counts"], state["text"], state["permutation_map"], 149 state["char_to_ix"], state["frequency_statistics"], state["transition_matrix"])...

Full Screen

Full Screen

hmmlearn.py

Source:hmmlearn.py Github

copy

Full Screen

1import sys2import os3import string4def get_data(inputfile):5 sentences = []6 with open(inputfile, 'r') as f:7 txt = f.readline()8 while txt:9 sentences.append(txt.split())10 txt = f.readline()11 return sentences12def get_probabilities(sentences):13 transitions, emissions = {}, {}14 transition_counts, emission_counts = {}, {}15 start = "Initial"16 end = "Finish"17 prev_tag = "None"18 transition_counts[start] = len(sentences) 19 transitions[start] = {}20 for sentence in sentences:21 for i in range(len(sentence)):22 index = sentence[i].rfind('/')23 word = sentence[i][:index].lower()24 tag = sentence[i][index + 1:]25 # Keep track of total tag emissions26 if tag in emission_counts:27 emission_counts[tag] += 128 else:29 emission_counts[tag] = 130 # Keep track of word - tag emissions31 if word in emissions:32 if tag in emissions[word]:33 emissions[word][tag] += 134 else:35 emissions[word][tag] = 136 else:37 emissions[word] = {}38 emissions[word][tag] = 139 # Keep track of transitions40 if i == 0:41 if tag in transitions[start]:42 transitions[start][tag] += 143 else:44 transitions[start][tag] = 145 prev_tag = tag46 else:47 if prev_tag in transitions:48 if tag in transitions[prev_tag]:49 transitions[prev_tag][tag] += 150 else:51 transitions[prev_tag][tag] = 152 transition_counts[prev_tag] += 153 else:54 transitions[prev_tag] = {}55 transitions[prev_tag][tag] = 156 transition_counts[prev_tag] = 157 prev_tag = tag 58 if i == (len(sentence) - 1):59 if tag in transitions:60 if end in transitions[tag]:61 transitions[tag][end] += 162 else:63 transitions[tag][end] = 164 transition_counts[tag] += 165 else:66 transitions[tag] = {}67 transitions[tag][end] = 1 68 transition_counts[tag] = 1 69 ecount = 070 for word in emissions:71 for tag in emissions[word]:72 emissions[word][tag] = float(emissions[word][tag] / emission_counts[tag])73 ecount += 174 tcount = 075 t_types = []76 for tag_from in transitions:77 temp = 078 for tag_to in transitions[tag_from]:79 if tag_to not in t_types:80 t_types.append(tag_to)81 temp += transitions[tag_from][tag_to]82 tcount += 183 transitions[tag_from]["Total"] = temp84 tcount += 185 return transitions, emissions, ecount, tcount, t_types86def create_model(transitions, emissions, ecount, tcount, num_types):87 with open('hmmmodel.txt', 'w') as f:88 f.write("Emission probabilities - word, TAG, float - #{0}\n".format(ecount))89 for word in emissions:90 for tag in emissions[word]:91 f.write("{0} {1} {2}\n".format(word, tag, emissions[word][tag]))92 f.write("Transition probabilities - TAG1, TAG2, float - #{0}\n".format(tcount))93 f.write("Transition types - #{0}\n".format(num_types))94 for tag_from in transitions:95 for tag_to in transitions[tag_from]:96 f.write("{0} {1} {2}\n".format(tag_from, tag_to, transitions[tag_from][tag_to]))97def main(argv):98 sentences = get_data(argv[0])99 transitions, emissions, ecount, tcount, transition_types = get_probabilities(sentences)100 create_model(transitions, emissions, ecount, tcount, len(transition_types))101if __name__ == "__main__":...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run hypothesis automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful