How to use successor_states method in hypothesis

Best Python code snippet using hypothesis

search.py

Source:search.py Github

copy

Full Screen

1# search.py2# ---------3# Licensing Information: You are free to use or extend these projects for4# educational purposes provided that (1) you do not distribute or publish5# solutions, (2) you retain this notice, and (3) you provide clear6# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.7# 8# Attribution Information: The Pacman AI projects were developed at UC Berkeley.9# The core projects and autograders were primarily created by John DeNero10# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).11# Student side autograding was added by Brad Miller, Nick Hay, and12# Pieter Abbeel (pabbeel@cs.berkeley.edu).13"""14In search.py, you will implement generic search algorithms which are called by15Pacman agents (in searchAgents.py).16"""17import util18class SearchProblem:19 """20 This class outlines the structure of a search problem, but doesn't implement21 any of the methods (in object-oriented terminology: an abstract class).22 You do not need to change anything in this class, ever.23 """24 def getStartState(self):25 """26 Returns the start state for the search problem.27 """28 util.raiseNotDefined()29 def isGoalState(self, state):30 """31 state: Search state32 Returns True if and only if the state is a valid goal state.33 """34 util.raiseNotDefined()35 def getSuccessors(self, state):36 """37 state: Search state38 For a given state, this should return a list of triples, (successor,39 action, stepCost), where 'successor' is a successor to the current40 state, 'action' is the action required to get there, and 'stepCost' is41 the incremental cost of expanding to that successor.42 """43 util.raiseNotDefined()44 def getCostOfActions(self, actions):45 """46 actions: A list of actions to take47 This method returns the total cost of a particular sequence of actions.48 The sequence must be composed of legal moves.49 """50 util.raiseNotDefined()51def tinyMazeSearch(problem):52 """53 Returns a sequence of moves that solves tinyMaze. For any other maze, the54 sequence of moves will be incorrect, so only use this for tinyMaze.55 """56 from game import Directions57 s = Directions.SOUTH58 w = Directions.WEST59 return [w,w,w,w,w,w,w,w,w,w,w,w,w,w,w,w,w,w,w]60def depthFirstSearch(problem):61 """62 Search the deepest nodes in the search tree first.63 Your search algorithm needs to return a list of actions that reaches the64 goal. Make sure to implement a graph search algorithm.65 To get started, you might want to try some of these simple commands to66 understand the search problem that is being passed in:67 print "Start:", problem.getStartState()68 print "Is the start a goal?", problem.isGoalState(problem.getStartState())69 print "Start's successors:", problem.getSuccessors(problem.getStartState())70 """71 "*** YOUR CODE HERE ***"72 #The following lines are mine73 74 from game import Directions75 #b76 #Problem is an object of class SearchProblem77 start_state=problem.getStartState()78 state=start_state #-1 and -100 is to indicate that this is the start state79 80 fringe_stack=util.Stack()81 explored_states=util.Stack()82 is_state_in_fringe={start_state:True}83 successor_state_for_this_state={}84 85 successor_states=problem.getSuccessors(state)86 successor_state_for_this_state[state]=successor_states87 ancestors={state:[]} #Root state has no ancestors88 89 for i in successor_states:90 fringe_stack.push(i)91 is_state_in_fringe[i]=True92 ancestors[i[0]]=[start_state]93 state=fringe_stack.pop()94 #Explore all states to find the goal state. Don't expand the goal state itself95 while(not problem.isGoalState(state[0])):96 #ipdb.set_trace()97 98 #print state[0]99 successor_states=problem.getSuccessors(state[0])100 successor_state_for_this_state[state[0]]=successor_states101 #ipdb.set_trace()102 103 for i in successor_states:104 next_state=i[0]105 if(next_state in ancestors): #If its ancestors are already listed106 if(state[0] not in ancestors[next_state] and next_state not in ancestors[state[0]]): #Ancestors don't include current state, and no cycle is being formed107 ancestors[next_state].append(state[0])108 pass109 else:110 ancestors[next_state]=[state[0]]111 ancestors[next_state].extend(ancestors[state[0]])112 113 if(problem.isGoalState(next_state)):114 fringe_stack.push(i)115 is_state_in_fringe[next_state]=True116 break #If there is a goal state, it should be the first to pop out of the stack117 if(next_state not in is_state_in_fringe or next_state not in ancestors[state[0]] ): #Add the next state in the fringe if and only if 118 fringe_stack.push(i)119 is_state_in_fringe[next_state]=True120 #print state 121 122 explored_states.push(state)123 state=fringe_stack.pop()124 noSuccessorIsGoal=True125 #pass126 #print "Goal Found at", state[0]127 #If loop has ended, that means state=GoalState. From explored state, backtrack to obtain the start state128 if(problem.isGoalState(state[0]) and noSuccessorIsGoal):129 explored_states.push(state) #Since it wasn't added in the loop130 planning_stack=util.Stack()131 #ipdb.set_trace()132 current_state=explored_states.pop()133 planning_stack.push(current_state)134 while(len(explored_states.list)>0):135 136 predecessor_state=explored_states.pop()137 successor_states=successor_state_for_this_state[predecessor_state[0]]138 for i in successor_states:139 #if(problem.isGoalState(predecessor_state[0])):140 #noSuccessorIsGoal=False141 if(i==current_state): #Both the predecessor state and the action are the same142 current_state=predecessor_state143 planning_stack.push(current_state)144 #Now the planning stack is full. The state next to the start state is on top of the stack, stored with the action that got it there145 action_sequence=[]146 while(len(planning_stack.list)>0):147 current_state=planning_stack.pop()148 action_sequence.append(current_state[1])149 return action_sequence150 151 152 #return [Directions.SOUTH]153 util.raiseNotDefined()154def breadthFirstSearch(problem):155 """Search the shallowest nodes in the search tree first."""156 "*** YOUR CODE HERE ***"157 #The following lines are mine158 159 from game import Directions160 #b161 162 #Problem is an object of class SearchProblem163 start_state=problem.getStartState()164 state=(start_state,-1,0) #-1 and -100 is to indicate that this is the start state165 166 fringe_queue=util.Queue()167 #fringe_queue.push([state],0)168 explored_states=util.Stack()169 170 successor_states=problem.getSuccessors(state[0])171 #successor_state_for_this_state[state]=successor_states172 173 for i in successor_states:174 temp=[state]175 temp.append(i)176 fringe_queue.push(temp)177 #costs[i[0]]=i[2]178 #ipdb.set_trace() 179 180 traj=fringe_queue.pop()181 curr_state=traj[-1]182 nodes_expanded=[state[0]]183 184 while(not problem.isGoalState(curr_state[0])):185 186 if(curr_state[0] not in nodes_expanded):187 nodes_expanded.append(curr_state[0])188 successor_states=problem.getSuccessors(curr_state[0])189 190 for next_state in successor_states:191 temp=traj[:]192 temp.append(next_state)193 cost=0194 if next_state[0] not in nodes_expanded:195 196 fringe_queue.push(temp)197 #if curr_state[0]==[5,1,1,1,1,1]:198 # ipdb.set_trace()199 #ipdb.set_trace() 200 #print curr_state 201 traj=fringe_queue.pop()202 curr_state=traj[-1]203 204 #Loop has exited=> curr_state=goalState205 action_sequence=[]206 for i in traj[1:]:207 action_sequence.append(i[1])208 209 return action_sequence210 util.raiseNotDefined()211def uniformCostSearch(problem):212 """Search the node of least total cost first."""213 "*** YOUR CODE HERE ***"214 #The following lines are mine215 216 from game import Directions217 #b218 219 #Problem is an object of class SearchProblem220 start_state=problem.getStartState()221 state=(start_state,-1,0) #-1 and -100 is to indicate that this is the start state222 223 fringe_queue=util.PriorityQueue()224 #fringe_queue.push([state],0)225 explored_states=util.Stack()226 227 successor_states=problem.getSuccessors(state[0])228 #successor_state_for_this_state[state]=successor_states229 230 for i in successor_states:231 temp=[state]232 temp.append(i)233 fringe_queue.update(temp,i[2])234 #costs[i[0]]=i[2]235 #ipdb.set_trace() 236 237 traj=fringe_queue.pop()238 curr_state=traj[-1]239 nodes_expanded=[state[0]]240 241 while(not problem.isGoalState(curr_state[0])):242 243 if(curr_state[0] not in nodes_expanded):244 nodes_expanded.append(curr_state[0])245 successor_states=problem.getSuccessors(curr_state[0])246 247 for next_state in successor_states:248 temp=traj[:]249 temp.append(next_state)250 cost=0251 if next_state[0] not in nodes_expanded:252 #Calculate traj_cost253 #ipdb.set_trace() 254 for past_states in traj:255 cost+=past_states[2]256 fringe_queue.update(temp,cost+next_state[2])257 #ipdb.set_trace() 258 traj=fringe_queue.pop()259 curr_state=traj[-1]260 261 #Loop has exited=> curr_state=goalState262 action_sequence=[]263 for i in traj[1:]:264 action_sequence.append(i[1])265 266 return action_sequence267 268def nullHeuristic(state, problem=None):269 """270 A heuristic function estimates the cost from the current state to the nearest271 goal in the provided SearchProblem. This heuristic is trivial.272 """273 return 0274def aStarSearch(problem, heuristic=nullHeuristic):275 """Search the node that has the lowest combined cost and heuristic first."""276 "*** YOUR CODE HERE ***"277 "*** YOUR CODE HERE ***"278 #The following lines are mine279 280 from game import Directions281 #b282 283 #Problem is an object of class SearchProblem284 start_state=problem.getStartState()285 286 287 state=(start_state,-1,0) #-1 and -100 is to indicate that this is the start state288 289 fringe_queue=util.PriorityQueue()290 #fringe_queue.push([state],0)291 explored_states=util.Stack()292 successor_states=problem.getSuccessors(state[0])293 #successor_state_for_this_state[state]=successor_states294 295 for i in successor_states:296 temp=[state]297 temp.append(i)298 #ipdb.set_trace()299 fringe_queue.update(temp,i[2]+heuristic(i[0],problem))#This is the only difference between UCS and A* 300 301 traj=fringe_queue.pop()302 curr_state=traj[-1]303 nodes_expanded=[state[0]]304 305 while(not problem.isGoalState(curr_state[0])):306 307 if(curr_state[0] not in nodes_expanded):308 nodes_expanded.append(curr_state[0])309 successor_states=problem.getSuccessors(curr_state[0])310 311 for next_state in successor_states:312 temp=traj[:]313 temp.append(next_state)314 cost=0315 if next_state[0] not in nodes_expanded:316 #Calculate traj_cost317 318 for past_states in traj:319 cost+=past_states[2]320 fringe_queue.update(temp,cost+next_state[2]+heuristic(next_state[0],problem)) #This is the only difference between UCS and A*321 #ipdb.set_trace() 322 traj=fringe_queue.pop()323 curr_state=traj[-1]324 325 #Loop has exited=> curr_state=goalState326 action_sequence=[]327 for i in traj[1:]:328 action_sequence.append(i[1])329 330 return action_sequence331 util.raiseNotDefined()332def aStarSearchLocallyObservableTunnelVision(problem, heuristic=nullHeuristic):333 """Search the node that has the lowest combined cost and heuristic first."""334 #The following lines are mine335 #Problem is an object of class SearchProblem. Since the goal is fixed at (1,1), this is a PositionSearchProblem336 current_state=problem.getStartState()337 path=[]338 while(not problem.isGoalState(current_state)):339 current_path=aStarSearch(problem,heuristic)340 #follow current path until a wall is detected.341 for action in current_path:342 if(not problem.senseWall(current_state,action)):343 path.append(action)344 current_state=problem.getNextState(current_state,action)345 else:346 break347 if(problem.isGoalState(current_state)):348 return path349 else:350 wall_location=problem.getNextState(current_state,action)351 problem.addWall(wall_location)352 problem.setStartState(current_state)353 354 return path355def aStarSearchLocallyObservable(problem, heuristic=nullHeuristic):356 """Search the node that has the lowest combined cost and heuristic first."""357 #The following lines are mine358 #Problem is an object of class SearchProblem. Since the goal is fixed at (1,1), this is a PositionSearchProblem359 current_state=problem.getStartState()360 path=[]361 while(not problem.isGoalState(current_state)):362 current_path=aStarSearch(problem,heuristic)363 #follow current path until a wall is detected.364 for action in current_path:365 if(not problem.getNeighboringWalls(current_state)):366 path.append(action)367 current_state=problem.getNextState(current_state,action)368 else:369 break370 if(problem.isGoalState(current_state)):371 return path372 else:373 problem.setStartState(current_state) 374 return path375def dStarSearch(problem):376 def manhattanDistance(s1,s2):377 return abs(s1[0]-s2[0])+abs(s1[1]-s2[1])378 def CalculateKey(u):379 return [min(g[u],rhs[u])+manhattanDistance(start_state,u),min(g[u],rhs[u])]380 def UpdateVertex(u):381 if(not problem.isGoalState(u)): 382 rhs[u]=float('inf')383 for s,a,d in problem.getSuccessors(u):384 rhs[u]=min(rhs[u],g[s]+1)385 if(U.CheckPresence(u)):386 U.Remove(u)387 if(g[u]!=rhs[u]):388 U.update(u,CalculateKey(u))389 def ComputeShortestPath():390 while(U.TopKey()<CalculateKey(start_state) or rhs[start_state]!=g[start_state]):391 392 u=U.Top()393 k_old=U.TopKey()394 k_new=CalculateKey(u)395 U.pop()396 #print(rhs[start_state])397 if(k_old<k_new): #this means some edges have become unusable398 U.update(u,k_new)399 elif(g[u]>rhs[u]): #This means a shorter path has been found400 g[u]=rhs[u]401 for s,a,d in problem.getSuccessors(u):402 UpdateVertex(s)403 else: #This means g[u]=rhs[u] i.e vertex is locally consistent404 g_old=g[u]405 g[u]=float('inf')406 UpdateVertex(u)407 for s,a,d in problem.getSuccessors(u):408 UpdateVertex(s)409 def senseWallAt(problem,s):410 return problem.actual_walls[s[0]][s[1]]411 def knowWallAt(problem,s):412 return problem.walls[s[0]][s[1]]413 from util import PriorityQueue414 415 #Initialize416 U=PriorityQueue()417 km=0418 start_state=problem.getStartState()419 last_state=start_state #A copy is created, not a reference420 problem.getNeighboringWalls(start_state) #Update the agent model421 rhs={}422 g={}423 for state in problem.AllStates():424 rhs[state]=float('inf')425 g[state]=float('inf')426 goal=(1,1) #Predefined427 rhs[goal]=0428 U.push(goal,[manhattanDistance(goal,start_state),0])429 ComputeShortestPath()430 431 actions=[]432 while(not problem.isGoalState(start_state)):433 min_successor_value=float('inf')434 current_action=[]435 for s,a,discombombulation in problem.getSuccessors(start_state):436 437 if(1+g[s]<min_successor_value):438 start_state=s439 current_action=a440 min_successor_value=1+g[s]441 actions.append(current_action)442 443 #Scan for edge-weight changes after moving to the new start_state444 changes=problem.getNeighboringWalls(start_state)445 446 if(changes):447 km+=manhattanDistance(last_state,start_state)448 last_state=start_state449 for wall in changes:450 g[wall]=float('inf') #Since there's a wall, it's impossible to reach from the goal node451 452 #When a wall is detected, upto 4 edges become infinitely weighted. Update all their successors (predecessors)453 successors=problem.getSuccessors(wall) #No edge weight has gone down454 for s1,a1,discombombulation in successors:455 UpdateVertex(s1)456 ComputeShortestPath()457 return actions458# Abbreviations459bfs = breadthFirstSearch460dfs = depthFirstSearch461astar = aStarSearch462ucs = uniformCostSearch463astar2=aStarSearchLocallyObservable464astartv=aStarSearchLocallyObservableTunnelVision...

Full Screen

Full Screen

nim_game.py

Source:nim_game.py Github

copy

Full Screen

1import math2import time3import random4import itertools5def successors(s):6 player = s[1]7 piles = s[0]8 successor_states = []9 successors = []10 if player == 1:11 player = 212 if player == 2:13 player == 114 for i, pile in enumerate(piles):15 for remove in range(1, pile + 1):16 result = pile - remove17 if result != 0:18 next_piles = sorted(piles[:i] + [result] + piles[i + 1:])19 else:20 next_piles = sorted(piles[:i] + piles[i + 1:])21 successor_states.append(next_piles)22 successor_states.sort()23 successor_states = list(successor_states for successor_states, _ in itertools.groupby(successor_states))24 print(successor_states)25 for i in range(len(successor_states)):26 successors.append([successor_states[i], player])27 return successors28def terminal_test(state):29 if state in [([1], 1), ([], 2)]:30 terminal_state = True31 elif state in [([1], 2), ([], 1)]:32 terminal_state = True33 else:34 terminal_state = False35 return terminal_state36def utility_test(state):37 if state in [([1], 1), ([], 2)]:38 utility = 139 elif state in [([1], 2), ([], 1)]:40 utility = -141 else:42 if state[1] == -1:43 utility = 144 else:45 utility = -146 return utility47def max_value(max_state):48 v = math.inf49 terminal_state, utility = terminal_test(max_state)50 if not terminal_state:51 for s in successors(max_state):52 v = min(v, min_value(s))53 return v54 else:55 return terminal_test(max_state)56def min_value(min_state):57 v = -math.inf58 terminal_state, utility = terminal_test(min_state)59 if not terminal_state:60 for s in successors(min_state):61 v = max(v, max_value(s))62 return v63 else:64 return terminal_test(min_state)65def min_max(state):66 if state[1] == 1:67 utility = max_value(state)68 else:69 utility = min_value(state)70 if utility == 1:71 print("Win for Max!")72 if utility == -1:73 print("Win for Min!")74 return utility75def max_value_ab(min_state, a, b):76 v = 177 terminal_state = terminal_test(min_state)78 utility = utility_test(min_state)79 if not terminal_state:80 for s in successors(min_state):81 if v > utility:82 utility = v83 if v >= b:84 return utility85 if v > a:86 a = v87 v = min(v, min_value_ab(min_state, a, b))88 return utility89def min_value_ab(max_state, a, b):90 v = -191 terminal_state = terminal_test(max_state)92 utility = utility_test(max_state)93 if not terminal_state:94 for s in successors(max_state):95 if v < utility:96 utility = v97 if v <= a:98 return utility99 if v < b:100 b = v101 v = max(v, max_value_ab(max_state, a, b))102 return utility103def minimax_ab(state):104 start = time.time()105 alpha = 0106 beta = 0107 if state[1] == 1:108 utility_value = min_value_ab(state, alpha, beta)109 else:110 utility_value = max_value_ab(state, alpha, beta)111 end = time.time()112 return utility_value113def minimax_game():114 number_of_piles = int(input("Number of piles: "))115 maximum_pile_size = int(input("Maximum number of sticks: "))116 first_player = int(input("First player: 1 for computer, 2 for human: "))117 initial_piles = []118 for pile in range(0, number_of_piles):119 pile_size = random.randrange(1, maximum_pile_size + 1)120 initial_piles.append(pile_size)121 state = (sorted(initial_piles), first_player)122 while True:123 # Print game state124 print("state", state)125 if state[1] == 2:126 piles = state[0]127 pile_number = (int(input("Enter the number of pile to remove sticks from: ")) - 1)128 pile = piles[pile_number]129 pick = int(input("Number of sticks to remove: "))130 if pick <= pile:131 result = pile - pick132 if result == 0:133 new_piles = sorted(piles[:pile_number] + piles[pile_number + 1:])134 else:135 new_piles = sorted(piles[:pile_number] + [result] + piles[pile_number + 1:])136 state = (new_piles, 1)137 else:138 print("out of bound")139 break140 elif state[1] == 1:141 list_of_successors = successors(state)142 number_of_next_states = len(list_of_successors)143 for s, next_state in enumerate(list_of_successors):144 utility_value = minimax_ab(next_state)145 if utility_value == -1:146 state = next_state147 elif utility_value == 1:148 state = list_of_successors[random.randrange(0, number_of_next_states)]149 state = next_state150 if state in [([1], 1), ([], 2), ([1], 2), ([], 1)]:151 util = utility_test(state)152 if util == -1:153 print("You lost")154 break155 elif util == 1:156 print("You won")157 break...

Full Screen

Full Screen

untitled.py

Source:untitled.py Github

copy

Full Screen

1 #****************2 def getAction(self, gamestate)3 next_value, next_action = self.minimax_value(gameState, self.index, 0)4 return next_value5 def minimax_value(self, state, agent, depth):6 num_agents = state.getNumAgents()7 #fully explored, score8 if depth == self.depth and agent % num_agents == 0:9 return self.evaluationFunction(state), None10 #its pacman, or a maximizer11 if agent % num_agents == 0:12 return self.maximize_value(state, agent % num_agents, depth)13 #its a ghost, a minimizer14 return self.minimize_value(state, agent % num_agents, depth)15 def minimize_value(self, state, agent, depth):16 successor_states = [(state.generateSuccessor(agent, action), action) for action in state.getLegalActions(agent)]17 if len(successor_states) == 0:18 return self.evaluationFunction(state), None19 value = float("inf")20 value_action = None21 next_agent = agent + 122 for successor_state, action in successor_states:23 next_value, next_action = self.minimax_value(successor_state, next_agent, depth)24 if next_value < value:25 value = next_value26 value_action = action27 return value, value_action28 def maximize_value(self, state, agent, depth):29 successor_states = [(state.generateSuccessor(agent, action), action) for action in state.getLegalActions(agent)]30 if len(successor_states) == 0:31 return self.evaluationFunction(state), None32 value = -float("inf")33 value_action = None34 next_agent = agent + 135 next_depth = depth + 136 for successor_state, action in successor_states:37 next_value, next_action = self.minimax_value(successor_state, next_agent, next_depth)38 if next_value > value:39 value = next_value40 value_action = action41 return value, value_action...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run hypothesis automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful