How to use _init_state method in tempest

Best Python code snippet using tempest_python

ast_env.py

Source:ast_env.py Github

copy

Full Screen

1""" Gym environment to turn general AST tasks into garage compatible problems."""2import gym3import numpy as np4from cached_property import cached_property5from garage.envs.base import Step6from garage.envs.env_spec import EnvSpec7from ast_toolbox.rewards import ExampleAVReward8from ast_toolbox.simulators import ExampleAVSimulator9class ASTEnv(gym.Env):10 r""" Gym environment to turn general AST tasks into garage compatible problems.11 Parameters12 ----------13 open_loop : bool14 True if the simulation is open-loop, meaning that AST must generate all actions ahead of time, instead15 of being able to output an action in sync with the simulator, getting an observation back before16 the next action is generated. False to get interactive control, which requires that `blackbox_sim_state`17 is also False.18 blackbox_sim_state : bool19 True if the true simulation state can not be observed, in which case actions and the initial conditions are20 used as the observation. False if the simulation state can be observed, in which case it will be used.21 fixed_init_state : bool22 True if the initial state is fixed, False to sample the initial state for each rollout from the observaation23 space.24 s_0 : array_like25 The initial state for the simulation (ignored if `fixed_init_state` is False)26 simulator : :py:class:`ast_toolbox.simulators.ASTSimulator`27 The simulator wrapper, inheriting from `ast_toolbox.simulators.ASTSimulator`.28 reward_function : :py:class:`ast_toolbox.rewards.ASTReward`29 The reward function, inheriting from `ast_toolbox.rewards.ASTReward`.30 spaces : :py:class:`ast_toolbox.spaces.ASTSpaces`31 The observation and action space definitions, inheriting from `ast_toolbox.spaces.ASTSpaces`.32 """33 def __init__(self,34 open_loop=True,35 blackbox_sim_state=True,36 fixed_init_state=False,37 s_0=None,38 simulator=None,39 reward_function=None,40 spaces=None):41 # Constant hyper-params -- set by user42 self.open_loop = open_loop43 self.blackbox_sim_state = blackbox_sim_state # is this redundant?44 self.spaces = spaces45 # These are set by reset, not the user46 self._done = False47 self._reward = 0.048 self._step = 049 self._action = None50 self._actions = []51 self._first_step = True52 self.reward_range = (-float('inf'), float('inf'))53 self.metadata = None54 self._cum_reward = 0.055 if s_0 is None:56 self._init_state = self.observation_space.sample()57 else:58 self._init_state = s_059 self._fixed_init_state = fixed_init_state60 self.simulator = simulator61 if self.simulator is None:62 self.simulator = ExampleAVSimulator()63 self.reward_function = reward_function64 if self.reward_function is None:65 self.reward_function = ExampleAVReward()66 if hasattr(self.simulator, "vec_env_executor") and callable(getattr(self.simulator, "vec_env_executor")):67 self.vectorized = True68 else:69 self.vectorized = False70 # super().__init__(self)71 def step(self, action):72 r"""73 Run one timestep of the environment's dynamics. When end of episode74 is reached, reset() should be called to reset the environment's internal state.75 Parameters76 ----------77 action : array_like78 An action provided by the environment.79 Returns80 -------81 : :py:func:`garage.envs.base.Step`82 A step in the rollout.83 Contains the following information:84 - observation (array_like): Agent's observation of the current environment.85 - reward (float): Amount of reward due to the previous action.86 - done (bool): Is the current step a terminal or goal state, ending the rollout.87 - actions (array_like): The action taken at the current.88 - state (array_like): The cloned simulation state at the current cell, used for resetting if chosen to start a rollout.89 - is_terminal (bool): Whether or not the current cell is a terminal state.90 - is_goal (bool): Whether or not the current cell is a goal state.91 """92 self._env_state_before_action = self._env_state.copy()93 self._action = action94 self._actions.append(action)95 action_return = self._action96 # Update simulation step97 obs = self.simulator.step(self._action)98 if (obs is None) or (self.open_loop is True) or (self.blackbox_sim_state):99 obs = np.array(self._init_state)100 if self.simulator.is_terminal() or self.simulator.is_goal():101 self._done = True102 # Calculate the reward for this step103 self._reward = self.reward_function.give_reward(104 action=self._action,105 info=self.simulator.get_reward_info())106 self._cum_reward += self._reward107 # Update instance attributes108 self._step = self._step + 1109 self._simulator_state = self.simulator.clone_state()110 self._env_state = np.concatenate((self._simulator_state,111 np.array([self._cum_reward]),112 np.array([self._step])),113 axis=0)114 return Step(observation=obs,115 reward=self._reward,116 done=self._done,117 actions=action_return,118 state=self._env_state_before_action,119 is_terminal=self.simulator.is_terminal(),120 is_goal=self.simulator.is_goal())121 def simulate(self, actions):122 r"""Run a full simulation rollout.123 Parameters124 ----------125 actions : list[array_like]126 A list of array_likes, where each member is the action taken at that step.127 Returns128 -------129 int130 The step of the trajectory where a collision was found, or -1 if a collision was not found.131 dict132 A dictionary of simulation information for logging and diagnostics.133 """134 if not self._fixed_init_state:135 self._init_state = self.observation_space.sample()136 return self.simulator.simulate(actions, self._init_state)137 def reset(self):138 r"""Resets the state of the environment, returning an initial observation.139 Returns140 -------141 observation : array_like142 The initial observation of the space. (Initial reward is assumed to be 0.)143 """144 self._actions = []145 if not self._fixed_init_state:146 self._init_state = self.observation_space.sample()147 self._done = False148 self._reward = 0.0149 self._cum_reward = 0.0150 self._action = None151 self._actions = []152 self._first_step = True153 self._step = 0154 obs = np.array(self.simulator.reset(self._init_state))155 if not self._fixed_init_state:156 obs = np.concatenate((obs, np.array(self._init_state)), axis=0)157 self._simulator_state = self.simulator.clone_state()158 self._env_state = np.concatenate((self._simulator_state,159 np.array([self._cum_reward]),160 np.array([self._step])),161 axis=0)162 return obs163 @property164 def action_space(self):165 r"""Convenient access to the environment's action space.166 Returns167 -------168 : `gym.spaces.Space <https://gym.openai.com/docs/#spaces>`_169 The action space of the reinforcement learning problem.170 """171 if self.spaces is None:172 # return self._to_garage_space(self.simulator.action_space)173 return self.simulator.action_space174 else:175 return self.spaces.action_space176 @property177 def observation_space(self):178 r"""Convenient access to the environment's observation space.179 Returns180 -------181 : `gym.spaces.Space <https://gym.openai.com/docs/#spaces>`_182 The observation space of the reinforcement learning problem.183 """184 if self.spaces is None:185 # return self._to_garage_space(self.simulator.observation_space)186 return self.simulator.observation_space187 else:188 return self.spaces.observation_space189 def log(self):190 r"""Calls the simulator's `log` function.191 """192 self.simulator.log()193 def render(self, **kwargs):194 r"""Calls the simulator's `render` function, if it exists.195 Parameters196 ----------197 kwargs :198 Keyword arguments used in the simulators `render` function.199 Returns200 -------201 None or object202 Returns the output of the simulator's `render` function, or None if the simulator has no `render` function.203 """204 if hasattr(self.simulator, "render") and callable(getattr(self.simulator, "render")):205 return self.simulator.render(**kwargs)206 else:207 return None208 def close(self):209 r"""Calls the simulator's `close` function, if it exists.210 Returns211 -------212 None or object213 Returns the output of the simulator's `close` function, or None if the simulator has no `close` function.214 """215 if hasattr(self.simulator, "close") and callable(getattr(self.simulator, "close")):216 return self.simulator.close()217 else:218 return None219 @cached_property220 def spec(self):221 r"""Returns a garage environment specification.222 Returns223 -------224 :py:class:`garage.envs.env_spec.EnvSpec`225 A garage environment specification.226 """227 return EnvSpec(228 observation_space=self.observation_space,...

Full Screen

Full Screen

ar.py

Source:ar.py Github

copy

Full Screen

...33 self.base_phi = 0.034 self.theta = 0.035 self.base_theta = 0.036 self.mean_distance_to_points = 300 # cm37 self._init_state()38 def _init_state(self):39 # Create some random colors40 self.color = np.random.randint(0, 255, (100, 3))41 # Take first frame and find corners in it42 _, self.old_frame = self.video.read()43 self.old_gray = cv2.cvtColor(self.old_frame, cv2.COLOR_BGR2GRAY)44 self.p0 = cv2.goodFeaturesToTrack(self.old_gray, mask=None, **self.feature_params)45 self.good_features_size = len(self.p0)46 self.init_coords = self.p047 self.base_phi = self.phi48 self.base_theta = self.theta49 # Create a mask image for drawing purposes50 self.mask = np.zeros_like(self.old_frame)51 52 def __del__(self):53 self.video.release()54 def get_frame(self, draw_traces=False):55 success, frame = self.video.read()56 frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)57 # calculate optical flow58 p1, st, err = cv2.calcOpticalFlowPyrLK(self.old_gray, frame_gray, self.p0, None, **self.lk_params)59 # Select good points60 if p1 is None:61 self._init_state()62 return frame, self.phi, self.theta63 good_new = p1[st==1]64 good_old = self.p0[st==1]65 good_init = self.init_coords[st==1]66 delta_px = np.array([[new[0] - init[0], new[1] - init[1]] for new, init in zip(good_new, good_init)])67 if len(delta_px) == 0:68 self._init_state()69 return frame, self.phi, self.theta70 mean_delta = np.mean(delta_px, axis=0)71 if (abs(mean_delta[0]) > self.width / 2 or 72 abs(mean_delta[1]) > self.height / 2 or73 len(good_new) < self.good_features_size / 3):74 print('regenerating features')75 self._init_state()76 return frame, self.phi, self.theta77 self.theta = self.base_theta + mean_delta[0] / self.width * h_fov # 6278 self.phi = self.base_phi + mean_delta[1] / self.height * v_fov # 3779 #print('phi = {},\ttheta = {}'.format(self.phi, self.theta))80 # draw the tracks81 if draw_traces:82 for i,(new,old,init_coords) in enumerate(zip(good_new, good_old, good_init)):83 a,b = new.ravel()84 c,d = old.ravel()85 x,y = init_coords.ravel()86 self.mask = cv2.line(self.mask, (a, b), (c, d), self.color[i].tolist(), 2)87 self.mask = cv2.line(self.mask, (a, b), (x, y), self.color[i].tolist(), 2)88 frame = cv2.circle(frame, (a, b), 5, self.color[i].tolist(), -1)89 img = cv2.add(frame, self.mask)90 k = cv2.waitKey(30) & 0xff91 if k == 27:92 self._init_state()93 return img, self.phi, self.theta94 # Now update the previous frame and previous points95 self.old_gray = frame_gray.copy()96 self.p0 = good_new.reshape(-1, 1, 2)97 self.init_coords = good_init.reshape(-1, 1, 2)98 return img, self.phi, self.theta99class AugmentedSpace(object):100 def __init__(self, width, height, objects):101 self.scene = pyrender.Scene(bg_color=np.zeros(4))102 for tmesh in objects:103 tmesh.apply_transform(np.array([104 [ 1.0, 0.0, 0.0, 0.0],105 [ 0.0, 0.0, 1.0, 0.0],106 [ 0.0, 1.0, 0.0, 0.0],...

Full Screen

Full Screen

seq2seq.py

Source:seq2seq.py Github

copy

Full Screen

...36 要么层数相同, 要么encoder是n层,decoder是1层37 '''38 if mode == 0:39 ''' lstm -> lstm '''40 encoder_hidden = self._init_state(encoder_hidden)41 elif mode == 1:42 ''' gru -> gru '''43 encoder_hidden = self._init_state(encoder_hidden)44 elif mode == 2:45 ''' gru -> lstm '''46 encoder_hidden = (encoder_hidden, encoder_hidden)47 encoder_hidden = self._init_state(encoder_hidden)48 elif mode == 3:49 ''' lstm -> gru '''50 encoder_hidden = encoder_hidden[0]51 encoder_hidden = self._init_state(encoder_hidden)52 return encoder_hidden53 def _init_state(self, encoder_hidden):54 if encoder_hidden is None:55 return None56 if isinstance(encoder_hidden, tuple):57 encoder_hidden = tuple([self._cat_directions(h) for h in encoder_hidden])58 else:59 encoder_hidden = self._cat_directions(encoder_hidden)60 return encoder_hidden61 def _cat_directions(self, h):62 if self.encoder.bidirectional:63 h = torch.cat([h[0:h.size(0):2], h[1:h.size(0):2]], 2)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run tempest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful