How to use Actions method in wpt

Best JavaScript code snippet using wpt

centralized_categorical_lstm_policy.py

Source:centralized_categorical_lstm_policy.py Github

copy

Full Screen

1import akro2import torch3from torch import nn4import numpy as np5import copy6from torch.distributions import Categorical7from dicg.torch.modules import CategoricalLSTMModule, MLPEncoderModule8class CentralizedCategoricalLSTMPolicy(nn.Module):9 def __init__(self,10 env_spec,11 n_agents,12 encoder_hidden_sizes=(64, 64),13 embedding_dim=64,14 lstm_hidden_size=64,15 state_include_actions=False,16 hidden_nonlinearity=torch.tanh,17 hidden_w_init=nn.init.xavier_uniform_,18 hidden_b_init=nn.init.zeros_,19 output_nonlinearity=None,20 output_w_init=nn.init.xavier_uniform_,21 output_b_init=nn.init.zeros_,22 layer_normalization=False,23 name='CentralizedCategoricalLSTMPolicy'):24 assert isinstance(env_spec.action_space, akro.Discrete), (25 'Categorical policy only works with akro.Discrete action space.')26 super().__init__()27 self.centralized = True28 self.vectorized = True29 30 self._n_agents = n_agents31 self._obs_dim = env_spec.observation_space.flat_dim # centralized obs_dim32 self._action_dim = env_spec.action_space.n33 self._embedding_dim = embedding_dim34 self.name = name35 self.state_include_actions = state_include_actions36 self._prev_actions = None37 self._prev_hiddens = None38 self._prev_cells = None39 if state_include_actions:40 mlp_input_dim = self._obs_dim + self._action_dim * n_agents41 else:42 mlp_input_dim = self._obs_dim43 44 self.encoder = MLPEncoderModule(45 input_dim=mlp_input_dim,46 output_dim=self._embedding_dim,47 hidden_sizes=encoder_hidden_sizes,48 hidden_nonlinearity=hidden_nonlinearity,49 hidden_w_init=hidden_w_init,50 hidden_b_init=hidden_b_init,51 output_nonlinearity=output_nonlinearity,52 output_w_init=output_w_init,53 output_b_init=output_b_init,54 layer_normalization=layer_normalization)55 self.categorical_lstm_output_layer = \56 CategoricalLSTMModule(input_size=self._embedding_dim,57 output_size=self._action_dim * n_agents,58 hidden_size=lstm_hidden_size)59 def grad_norm(self):60 return np.sqrt(61 np.sum([p.grad.norm(2).item() ** 2 for p in self.parameters()]))62 # Batch forward63 def forward(self, obs_n, avail_actions_n, actions_n=None):64 obs_n = torch.Tensor(obs_n)65 n_paths = obs_n.shape[0]66 max_path_len = obs_n.shape[1]67 if self.state_include_actions:68 obs_n = obs_n.reshape(obs_n.shape[:-1] + (self._n_agents, -1))69 assert actions_n is not None70 actions_n = torch.Tensor(actions_n).unsqueeze(-1).type(torch.LongTensor)71 # actions_n.shape = (n_paths, max_path_len, n_agents, 1)72 # Convert actions_n to one hot encoding73 actions_onehot = torch.zeros(actions_n.shape[:-1] + (self._action_dim,))74 # actions_onehot.shape = (n_paths, max_path_len, n_agents, action_dim)75 actions_onehot.scatter_(-1, actions_n, 1)76 # Shift and pad actions_onehot by one time step77 actions_onehot_shifted = actions_onehot[:, :-1, :, :]78 # Use zeros as _prev_actions in the first time step79 zero_pad = torch.zeros(n_paths, 1, self._n_agents, self._action_dim)80 # Concatenate zeros to the beginning of actions81 actions_onehot_shifted = torch.cat((zero_pad, actions_onehot_shifted), dim=1)82 # Combine actions into obs83 obs_n = torch.cat((obs_n, actions_onehot_shifted), dim=-1)84 # Reshape obs_n back to concatenated centralized form85 obs_n = obs_n.reshape(n_paths, max_path_len, -1) # One giant vector86 inputs = self.encoder.forward(obs_n)87 # inputs.shape = (n_paths, max_path_len, n_agents * emb_dim) 88 # Reshape to be compliant with the input shape requirement of LSTM89 inputs = inputs.transpose(0, 1)90 # inputs.shape = (max_path_len, n_paths, n_agents * emb_dim)91 dists_n = self.categorical_lstm_output_layer.forward(inputs)[0]92 # Apply available actions mask93 avail_actions_n = avail_actions_n.reshape(94 avail_actions_n.shape[:-1] + (self._n_agents, -1))95 # Reshape back96 masked_probs = dists_n.probs.reshape(97 max_path_len, n_paths, self._n_agents, self._action_dim)98 masked_probs = masked_probs.transpose(0, 1)99 masked_probs = masked_probs * torch.Tensor(avail_actions_n) # mask100 masked_probs = masked_probs / masked_probs.sum(dim=-1, keepdim=True) # renormalize101 masked_dists_n = Categorical(probs=masked_probs) # redefine distribution102 return masked_dists_n103 def step_forward(self, obs_n, avail_actions_n):104 """105 Single step forward for stepping in envs106 """107 # obs_n.shape = (n_envs, n_agents * obs_dim)108 obs_n = torch.Tensor(obs_n)109 n_envs = obs_n.shape[0]110 if self.state_include_actions:111 obs_n = obs_n.reshape(obs_n.shape[:-1] + (self._n_agents, -1))112 if self._prev_actions is None:113 self._prev_actions = torch.zeros(n_envs, self._n_agents, self._action_dim)114 obs_n = torch.cat((obs_n, self._prev_actions), dim=-1)115 # Reshape obs_n back to concatenated centralized form116 obs_n = obs_n.reshape(n_envs, -1) # One giant vector117 # obs_n.shape = (n_envs, n_agents * (obs_dim + action_dim))118 inputs = self.encoder.forward(obs_n)119 # input.shape = (n_envs, n_agents * emb_dim)120 inputs = inputs.reshape(1, n_envs, -1)121 dists_n, next_h, next_c = self.categorical_lstm_output_layer.forward(122 inputs, self._prev_hiddens, self._prev_cells)123 self._prev_hiddens = next_h124 self._prev_cells = next_c125 # Apply available actions mask126 avail_actions_n = avail_actions_n.reshape(127 avail_actions_n.shape[:-1] + (self._n_agents, -1))128 masked_probs = dists_n.probs.squeeze(0).reshape(129 n_envs, self._n_agents, self._action_dim)130 masked_probs = masked_probs * torch.Tensor(avail_actions_n) # mask131 masked_probs = masked_probs / masked_probs.sum(axis=-1, keepdims=True) # renormalize132 masked_dists_n = Categorical(probs=masked_probs) # redefine distribution133 return masked_dists_n134 def get_actions(self, obs_n, avail_actions_n, greedy=False):135 """Independent agent actions (not using an exponential joint action space)136 137 Args:138 obs_n: list of obs of all agents in ONE time step [o1, o2, ..., on]139 E.g. 3 agents: [o1, o2, o3]140 """141 with torch.no_grad():142 dists_n = self.step_forward(obs_n, avail_actions_n)143 if not greedy:144 actions_n = dists_n.sample().numpy()145 else:146 actions_n = np.argmax(dists_n.probs.numpy(), axis=-1)147 agent_infos_n = {}148 agent_infos_n['action_probs'] = [dists_n.probs[i].numpy() 149 for i in range(len(actions_n))]150 if self.state_include_actions:151 # actions_onehot.shape = (n_envs, self._n_agents, self._action_dim)152 actions_onehot = torch.zeros(len(obs_n), self._n_agents, self._action_dim)153 actions_onehot.scatter_(154 -1, torch.Tensor(actions_n).unsqueeze(-1).type(torch.LongTensor), 1) 155 self._prev_actions = actions_onehot156 return actions_n, agent_infos_n157 def reset(self, dones):158 if all(dones): # dones is synched159 self._prev_actions = None160 self._prev_hiddens = None161 self._prev_cells = None162 def entropy(self, observations, avail_actions, actions=None):163 # print('obs.shape =', observations.shape)164 dists_n = self.forward(observations, avail_actions, actions)165 entropy = dists_n.entropy()166 entropy = entropy.mean(axis=-1) # Asuming independent actions167 return entropy168 def log_likelihood(self, observations, avail_actions, actions):169 if self.state_include_actions:170 dists_n = self.forward(observations, avail_actions, actions)171 else:172 dists_n = self.forward(observations, avail_actions)173 llhs = dists_n.log_prob(actions)174 # llhs.shape = (n_paths, max_path_length, n_agents)175 # For n agents action probability can be treated as independent176 # Pa = prob_i^n Pa_i177 # log(Pa) = sum_i^n log(Pa_i)178 llhs = llhs.sum(axis=-1) # Asuming independent actions179 # llhs.shape = (n_paths, max_path_length)180 return llhs181 @property182 def recurrent(self):183 return True...

Full Screen

Full Screen

dec_categorical_lstm_policy.py

Source:dec_categorical_lstm_policy.py Github

copy

Full Screen

1import akro2import torch3from torch import nn4import numpy as np5import copy6from torch.distributions import Categorical7from dicg.torch.modules import CategoricalLSTMModule, MLPEncoderModule8class DecCategoricalLSTMPolicy(nn.Module):9 def __init__(self,10 env_spec,11 n_agents,12 encoder_hidden_sizes=(64, 64),13 embedding_dim=64,14 lstm_hidden_size=64,15 state_include_actions=False,16 hidden_nonlinearity=torch.tanh,17 hidden_w_init=nn.init.xavier_uniform_,18 hidden_b_init=nn.init.zeros_,19 output_nonlinearity=None,20 output_w_init=nn.init.xavier_uniform_,21 output_b_init=nn.init.zeros_,22 layer_normalization=False,23 name='DecCategoricalLSTMPolicy'):24 assert isinstance(env_spec.action_space, akro.Discrete), (25 'Categorical policy only works with akro.Discrete action space.')26 super().__init__()27 self.centralized = True # ccentralized training! Using centralized sampler28 self.vectorized = True29 30 self._n_agents = n_agents31 self._obs_dim = int(env_spec.observation_space.flat_dim / n_agents) # dec obs_dim32 self._action_dim = env_spec.action_space.n33 self._embedding_dim = embedding_dim34 self.name = name35 self.state_include_actions = state_include_actions36 self._prev_actions = None37 self._prev_hiddens = None38 self._prev_cells = None39 if state_include_actions:40 mlp_input_dim = self._obs_dim + self._action_dim41 else:42 mlp_input_dim = self._obs_dim43 44 self.encoder = MLPEncoderModule(45 input_dim=mlp_input_dim,46 output_dim=self._embedding_dim,47 hidden_sizes=encoder_hidden_sizes,48 hidden_nonlinearity=hidden_nonlinearity,49 hidden_w_init=hidden_w_init,50 hidden_b_init=hidden_b_init,51 output_nonlinearity=output_nonlinearity,52 output_w_init=output_w_init,53 output_b_init=output_b_init,54 layer_normalization=layer_normalization)55 self.categorical_lstm_output_layer = \56 CategoricalLSTMModule(input_size=self._embedding_dim,57 output_size=self._action_dim,58 hidden_size=lstm_hidden_size)59 def grad_norm(self):60 return np.sqrt(61 np.sum([p.grad.norm(2).item() ** 2 for p in self.parameters()]))62 # Batch forward63 def forward(self, obs_n, avail_actions_n, actions_n=None):64 # obs_n = torch.Tensor(obs_n)65 obs_n = obs_n.reshape(obs_n.shape[:-1] + (self._n_agents, -1))66 n_paths = obs_n.shape[0]67 max_path_len = obs_n.shape[1]68 if self.state_include_actions:69 assert actions_n is not None70 actions_n = torch.Tensor(actions_n).unsqueeze(-1).type(torch.LongTensor)71 # actions_n.shape = (n_paths, max_path_len, n_agents, 1)72 # Convert actions_n to one hot encoding73 actions_onehot = torch.zeros(actions_n.shape[:-1] + (self._action_dim,))74 # actions_onehot.shape = (n_paths, max_path_len, n_agents, action_dim)75 actions_onehot.scatter_(-1, actions_n, 1)76 # Shift and pad actions_onehot by one time step77 actions_onehot_shifted = actions_onehot[:, :-1, :, :]78 # Use zeros as _prev_actions in the first time step79 zero_pad = torch.zeros(n_paths, 1, self._n_agents, self._action_dim)80 # Concatenate zeros to the beginning of actions81 actions_onehot_shifted = torch.cat((zero_pad, actions_onehot_shifted), dim=1)82 # Combine actions into obs83 obs_n = torch.cat((obs_n, actions_onehot_shifted), dim=-1)84 inputs = self.encoder.forward(obs_n)85 # inputs.shape = (n_paths, max_path_len, n_agents, emb_dim) 86 # Reshape to be compliant with the input shape requirement of LSTM87 inputs = inputs.transpose(0, 1)88 # inputs.shape = (max_path_len, n_paths, n_agents, emb_dim)89 inputs = inputs.reshape(max_path_len, n_paths * self._n_agents, self._embedding_dim)90 dists_n = self.categorical_lstm_output_layer.forward(inputs)[0]91 # Apply available actions mask92 avail_actions_n = avail_actions_n.reshape(93 avail_actions_n.shape[:-1] + (self._n_agents, -1))94 # Reshape back95 masked_probs = dists_n.probs.reshape(96 max_path_len, n_paths, self._n_agents, self._action_dim)97 masked_probs = masked_probs.transpose(0, 1)98 masked_probs = masked_probs * torch.Tensor(avail_actions_n) # mask99 masked_probs = masked_probs / masked_probs.sum(dim=-1, keepdim=True) # renormalize100 masked_dists_n = Categorical(probs=masked_probs) # redefine distribution101 return masked_dists_n102 def step_forward(self, obs_n, avail_actions_n):103 """104 Single step forward for stepping in envs105 """106 # obs_n.shape = (n_envs, n_agents * obs_dim)107 obs_n = torch.Tensor(obs_n)108 obs_n = obs_n.reshape(obs_n.shape[:-1] + (self._n_agents, -1))109 # obs_n.shape = (n_envs, n_agents, obs_dim)110 n_envs = obs_n.shape[0]111 if self.state_include_actions:112 if self._prev_actions is None:113 self._prev_actions = torch.zeros(n_envs, self._n_agents, self._action_dim)114 obs_n = torch.cat((obs_n, self._prev_actions), dim=-1)115 # obs_n.shape = (n_envs, n_agents, (obs_dim + action_dim))116 inputs = self.encoder.forward(obs_n)117 # input.shape = (n_envs, n_agents, emb_dim)118 inputs = inputs.reshape(1, n_envs * self._n_agents, -1)119 # input.shape = (1, n_envs * n_agents, emb_dim)120 dists_n, next_h, next_c = self.categorical_lstm_output_layer.forward(121 inputs, self._prev_hiddens, self._prev_cells)122 self._prev_hiddens = next_h123 self._prev_cells = next_c124 # Apply available actions mask125 avail_actions_n = avail_actions_n.reshape(126 avail_actions_n.shape[:-1] + (self._n_agents, -1))127 masked_probs = dists_n.probs.squeeze(0).reshape(128 n_envs, self._n_agents, self._action_dim)129 masked_probs = masked_probs * torch.Tensor(avail_actions_n) # mask130 masked_probs = masked_probs / masked_probs.sum(axis=-1, keepdims=True) # renormalize131 masked_dists_n = Categorical(probs=masked_probs) # redefine distribution132 return masked_dists_n133 def get_actions(self, obs_n, avail_actions_n, greedy=False):134 """Independent agent actions (not using an exponential joint action space)135 136 Args:137 obs_n: list of obs of all agents in ONE time step [o1, o2, ..., on]138 E.g. 3 agents: [o1, o2, o3]139 """140 with torch.no_grad():141 dists_n = self.step_forward(obs_n, avail_actions_n)142 if not greedy:143 actions_n = dists_n.sample().numpy()144 else:145 actions_n = np.argmax(dists_n.probs.numpy(), axis=-1)146 agent_infos_n = {}147 agent_infos_n['action_probs'] = [dists_n.probs[i].numpy() 148 for i in range(len(actions_n))]149 if self.state_include_actions:150 # actions_onehot.shape = (n_envs, self._n_agents, self._action_dim)151 actions_onehot = torch.zeros(len(obs_n), self._n_agents, self._action_dim)152 actions_onehot.scatter_(153 -1, torch.Tensor(actions_n).unsqueeze(-1).type(torch.LongTensor), 1) 154 self._prev_actions = actions_onehot155 return actions_n, agent_infos_n156 def reset(self, dones):157 if all(dones): # dones is synched158 self._prev_actions = None159 self._prev_hiddens = None160 self._prev_cells = None161 def entropy(self, observations, avail_actions, actions=None):162 dists_n = self.forward(observations, avail_actions, actions)163 entropy = dists_n.entropy()164 entropy = entropy.mean(axis=-1) # Asuming independent actions165 return entropy166 def log_likelihood(self, observations, avail_actions, actions):167 if self.state_include_actions:168 dists_n = self.forward(observations, avail_actions, actions)169 else:170 dists_n = self.forward(observations, avail_actions)171 llhs = dists_n.log_prob(actions)172 # llhs.shape = (n_paths, max_path_length, n_agents)173 # For n agents action probability can be treated as independent174 # Pa = prob_i^n Pa_i175 # log(Pa) = sum_i^n log(Pa_i)176 llhs = llhs.sum(axis=-1) # Asuming independent actions177 # llhs.shape = (n_paths, max_path_length)178 return llhs179 @property180 def recurrent(self):181 return True...

Full Screen

Full Screen

dicg_ce_categorical_lstm_policy.py

Source:dicg_ce_categorical_lstm_policy.py Github

copy

Full Screen

1import akro2import torch3from torch import nn4import numpy as np5import copy6from torch.distributions import Categorical7from dicg.torch.modules import CategoricalLSTMModule, DICGBase8class DICGCECategoricalLSTMPolicy(DICGBase):9 def __init__(self,10 env_spec,11 n_agents,12 encoder_hidden_sizes=(128, ),13 embedding_dim=64,14 attention_type='general',15 n_gcn_layers=2,16 residual=True,17 gcn_bias=True,18 lstm_hidden_size=64,19 state_include_actions=False,20 name='dicg_ce_categorical_mlp_policy'):21 assert isinstance(env_spec.action_space, akro.Discrete), (22 'Categorical policy only works with akro.Discrete action space.')23 super().__init__(24 env_spec=env_spec,25 n_agents=n_agents,26 encoder_hidden_sizes=encoder_hidden_sizes,27 embedding_dim=embedding_dim,28 attention_type=attention_type,29 n_gcn_layers=n_gcn_layers,30 gcn_bias=gcn_bias,31 state_include_actions=state_include_actions,32 name=name33 )34 self.residual = residual35 self.state_include_actions = state_include_actions36 self._prev_actions = None37 self._prev_hiddens = None38 self._prev_cells = None39 40 # Policy layer41 self.categorical_lstm_output_layer = \42 CategoricalLSTMModule(input_size=self._embedding_dim,43 output_size=self._action_dim,44 hidden_size=lstm_hidden_size)45 self.layers.append(self.categorical_lstm_output_layer)46 # Batch forward47 def forward(self, obs_n, avail_actions_n, actions_n=None):48 obs_n = torch.Tensor(obs_n)49 obs_n = obs_n.reshape(obs_n.shape[:-1] + (self._n_agents, -1))50 n_paths = obs_n.shape[0]51 max_path_len = obs_n.shape[1]52 if self.state_include_actions:53 assert actions_n is not None54 actions_n = torch.Tensor(actions_n).unsqueeze(-1).type(torch.LongTensor)55 # actions_n.shape = (n_paths, max_path_len, n_agents, 1)56 # Convert actions_n to one hot encoding57 actions_onehot = torch.zeros(actions_n.shape[:-1] + (self._action_dim,))58 # actions_onehot.shape = (n_paths, max_path_len, n_agents, action_dim)59 actions_onehot.scatter_(-1, actions_n, 1)60 # Shift and pad actions_onehot by one time step61 actions_onehot_shifted = actions_onehot[:, :-1, :, :]62 # Use zeros as _prev_actions in the first time step63 zero_pad = torch.zeros(n_paths, 1, self._n_agents, self._action_dim)64 # Concatenate zeros to the beginning of actions65 actions_onehot_shifted = torch.cat((zero_pad, actions_onehot_shifted), dim=1)66 # Combine actions into obs67 obs_n = torch.cat((obs_n, actions_onehot_shifted), dim=-1)68 69 avail_actions_n = avail_actions_n.reshape(70 avail_actions_n.shape[:-1] + (self._n_agents, -1))71 embeddings_collection, attention_weights = super().forward(obs_n)72 if self.residual:73 inputs = embeddings_collection[0] + embeddings_collection[-1]74 else:75 inputs = embeddings_collection[-1]76 77 # inputs.shape = (n_paths, max_path_len, n_agents, emb_dim) 78 inputs = inputs.transpose(0, 1)79 # inputs.shape = (max_path_len, n_paths, n_agents, emb_dim)80 inputs = inputs.reshape(81 max_path_len, n_paths * self._n_agents, self._embedding_dim)82 dists_n = self.categorical_lstm_output_layer.forward(inputs)[0]83 # Apply available actions mask84 masked_probs = dists_n.probs.reshape(85 max_path_len, n_paths, self._n_agents, self._action_dim)86 masked_probs = masked_probs.transpose(0, 1)87 masked_probs = masked_probs * torch.Tensor(avail_actions_n) # mask88 masked_probs = masked_probs / masked_probs.sum(dim=-1, keepdim=True) # renormalize89 masked_dists_n = Categorical(probs=masked_probs) # redefine distribution90 return masked_dists_n, attention_weights91 def step_forward(self, obs_n, avail_actions_n):92 """93 Single step forward for stepping in envs94 """95 obs_n = torch.Tensor(obs_n)96 obs_n = obs_n.reshape(obs_n.shape[:-1] + (self._n_agents, -1))97 n_envs = obs_n.shape[0]98 avail_actions_n = avail_actions_n.reshape(99 avail_actions_n.shape[:-1] + (self._n_agents, -1))100 if self.state_include_actions:101 if self._prev_actions is None:102 self._prev_actions = torch.zeros(n_envs, self._n_agents, self._action_dim)103 obs_n = torch.cat((obs_n, self._prev_actions), dim=-1)104 embeddings_collection, attention_weights = super().forward(obs_n)105 if self.residual:106 inputs = embeddings_collection[0] + embeddings_collection[-1]107 else:108 inputs = embeddings_collection[-1]109 # input.shape = (n_envs, n_agents, emb_dim)110 inputs = inputs.reshape(111 1, n_envs * self._n_agents, self._embedding_dim)112 dists_n, next_h, next_c = self.categorical_lstm_output_layer.forward(113 inputs, self._prev_hiddens, self._prev_cells)114 self._prev_hiddens = next_h115 self._prev_cells = next_c116 # Apply available actions mask117 masked_probs = dists_n.probs.reshape(n_envs, self._n_agents, self._action_dim)118 masked_probs = masked_probs * torch.Tensor(avail_actions_n) # mask119 masked_probs = masked_probs / masked_probs.sum(axis=-1, keepdims=True) # renormalize120 masked_dists_n = Categorical(probs=masked_probs) # redefine distribution121 return masked_dists_n, attention_weights122 def get_actions(self, obs_n, avail_actions_n, greedy=False):123 """Independent agent actions (not using an exponential joint action space)124 125 Args:126 obs_n: list of obs of all agents in ONE time step [o1, o2, ..., on]127 E.g. 3 agents: [o1, o2, o3]128 """129 with torch.no_grad():130 dists_n, attention_weights = self.step_forward(obs_n, avail_actions_n)131 if not greedy:132 actions_n = dists_n.sample().numpy()133 else:134 actions_n = np.argmax(dists_n.probs.numpy(), axis=-1)135 agent_infos_n = {}136 agent_infos_n['action_probs'] = [dists_n.probs[i].numpy() 137 for i in range(len(actions_n))]138 agent_infos_n['attention_weights'] = [attention_weights.numpy()[i, :]139 for i in range(len(actions_n))]140 if self.state_include_actions:141 # actions_onehot.shape = (n_envs, self._n_agents, self._action_dim)142 actions_onehot = torch.zeros(len(obs_n), self._n_agents, self._action_dim)143 actions_onehot.scatter_(144 -1, torch.Tensor(actions_n).unsqueeze(-1).type(torch.LongTensor), 1) 145 self._prev_actions = actions_onehot146 return actions_n, agent_infos_n147 def reset(self, dones):148 if all(dones): # dones is synched149 self._prev_actions = None150 self._prev_hiddens = None151 self._prev_cells = None152 def entropy(self, observations, avail_actions, actions=None):153 # print('obs.shape =', observations.shape)154 dists_n, _ = self.forward(observations, avail_actions, actions)155 # print('dist =', dists_n)156 # print('dist.probs =', dists_n.probs)157 entropy = dists_n.entropy()158 # print('entropy.shapeBefore =', entropy.shape)159 entropy = entropy.mean(axis=-1) # Asuming independent actions160 # print('entropy.shapeAfter =', entropy.shape)161 return entropy162 def log_likelihood(self, observations, avail_actions, actions):163 if self.state_include_actions:164 dists_n, _ = self.forward(observations, avail_actions, actions)165 else:166 dists_n, _ = self.forward(observations, avail_actions)167 llhs = dists_n.log_prob(actions)168 # llhs.shape = (n_paths, max_path_length, n_agents)169 # For n agents action probability can be treated as independent170 # Pa = prob_i^n Pa_i171 # log(Pa) = sum_i^n log(Pa_i)172 llhs = llhs.sum(axis=-1) # Asuming independent actions173 # llhs.shape = (n_paths, max_path_length)174 return llhs175 @property176 def recurrent(self):177 return True...

Full Screen

Full Screen

tree-massactions.test.js

Source:tree-massactions.test.js Github

copy

Full Screen

...52 actions: [{53 type: 'delete'54 }]55 }];56 model.recursiveObserveActions(actions);57 expect(actions[0].visible).toBeDefined();58 expect(actions[0].visible()).toBeFalsy();59 });60 it('check when actions is absent', function () {61 var actions = [{62 type: 'delete'63 }];64 model.recursiveObserveActions(actions);65 expect(actions[0].visible).toBeUndefined();66 });67 it('check nested level actions', function () {68 var actions = [{69 type: 'availability',70 actions: [{71 type: 'delete',72 actions: [{73 type: 'safely'74 }]75 }]76 }];77 model.recursiveObserveActions(actions);78 expect(actions[0].actions[0].visible).toBeDefined();79 expect(actions[0].actions[0].visible()).toBeFalsy();80 });81 it('check reference to parent object', function () {82 var actions = [{83 type: 'availability',84 actions: [{85 type: 'delete'86 }]87 }];88 model.recursiveObserveActions(actions);89 expect(actions[0].parent).toBe(actions);90 });91 });92 it('check getAction', function () {93 expect(model.getAction('availability')).toBe(model.actions()[0]);94 expect(model.getAction('availability.enable')).toBe(model.actions()[0].actions[0]);95 expect(model.getAction('absent')).toBeFalsy();96 });97 describe('check hideSubmenus', function () {98 it('with class actions', function () {99 model.actions()[0].visible(true);100 expect(model.actions()[0].visible()).toBeTruthy();101 model.hideSubmenus();102 expect(model.actions()[0].visible()).toBeFalsy();...

Full Screen

Full Screen

actions.js

Source:actions.js Github

copy

Full Screen

1import * as allUsersActions from './allUsers/actions';2import * as activeUserActions from './activeUser/actions';3import * as allProductsActions from './allProducts/actions';4import * as featuredProductsActions from './featuredProducts/actions';5import * as popularProductsActions from './popularProducts/actions';6import * as similarProductsActions from './similarProducts/actions';7import * as activeProductActions from './activeProduct/actions';8import * as allOrdersActions from './allOrders/actions';9import * as activeOrderActions from './activeOrder/actions';10import * as cartActions from './cart/actions';11import * as wishlistActions from './wishlist/actions';12import * as cartPreviewActions from './cartPreview/actions';13import * as checkoutSuccessActions from './checkoutSuccess/actions';14export {15 allUsersActions,16 activeUserActions,17 allProductsActions,18 featuredProductsActions,19 popularProductsActions,20 similarProductsActions,21 activeProductActions,22 allOrdersActions,23 activeOrderActions,24 cartActions,25 wishlistActions,26 cartPreviewActions,27 checkoutSuccessActions...

Full Screen

Full Screen

Using AI Code Generation

copy

Full Screen

1var wptoolkit = require('wptoolkit');2var actions = wptoolkit.actions;3var wptoolkit = require('wptoolkit');4var actions = wptoolkit.actions;5var wptoolkit = require('wptoolkit');6var actions = wptoolkit.actions;7var wptoolkit = require('wptoolkit');8var actions = wptoolkit.actions;9var wptoolkit = require('wptoolkit');10var actions = wptoolkit.actions;11var wptoolkit = require('wptoolkit');12var actions = wptoolkit.actions;13var wptoolkit = require('wptoolkit');14var actions = wptoolkit.actions;15var wptoolkit = require('wptoolkit');16var actions = wptoolkit.actions;17var wptoolkit = require('wptoolkit');18var actions = wptoolkit.actions;19var wptoolkit = require('wptoolkit');20var actions = wptoolkit.actions;21var wptoolkit = require('wptoolkit');22var actions = wptoolkit.actions;23var wptoolkit = require('wptoolkit');24var actions = wptoolkit.actions;25var wptoolkit = require('wptoolkit');26var actions = wptoolkit.actions;27var wptoolkit = require('wptoolkit');28var actions = wptoolkit.actions;29var wptoolkit = require('wptoolkit');

Full Screen

Using AI Code Generation

copy

Full Screen

1const wptools = require('wptools');2wptools.actions('Albert Einstein').then((data) => {3 console.log(data);4});5const wptools = require('wptools');6wptools.actions('Albert Einstein').then((data) => {7 console.log(data);8});9const wptools = require('wptools');10wptools.actions('Albert Einstein').then((data) => {11 console.log(data);12});13const wptools = require('wptools');14wptools.actions('Albert Einstein').then((data) => {15 console.log(data);16});17const wptools = require('wptools');18wptools.actions('Albert Einstein').then((data) => {19 console.log(data);20});21const wptools = require('wptools');22wptools.actions('Albert Einstein').then((data) => {23 console.log(data);24});

Full Screen

Using AI Code Generation

copy

Full Screen

1var wptoolkit = require('wptoolkit');2var actions = wptoolkit.actions;3var assert = require('assert');4var webdriver = require('selenium-webdriver');5var By = webdriver.By;6var until = webdriver.until;7var driver = new webdriver.Builder()8 .forBrowser('chrome')9 .build();10driver.findElement(By.name('q')).sendKeys('webdriver');11driver.findElement(By.name('btnG')).click();12driver.wait(until.titleIs('webdriver - Google Search'), 1000);13driver.quit();14#### actions.click(webElement, [callback])15actions.click(driver.findElement(By.id('submit')));16#### actions.clickAndWait(webElement, [callback])17actions.clickAndWait(driver.findElement(By.id('submit')));18#### actions.clickAndWaitForElement(webElement, elementToWaitFor, [callback])19actions.clickAndWaitForElement(20 driver.findElement(By.id('submit')),21 driver.findElement(By.id('success'))22);23#### actions.clickAndWaitForText(webElement, textToWaitFor, [callback])24actions.clickAndWaitForText(25 driver.findElement(By.id('submit')),26);27#### actions.clickAndWaitForUrl(webElement, urlToWaitFor, [callback])

Full Screen

Using AI Code Generation

copy

Full Screen

1const wptools = require('wptools');2const fs = require('fs');3const path = require('path');4const { exec } = require('child_process');5const { createObjectCsvWriter } = require('csv-writer');6const { spawn } = require('child_process');7const csvWriter = createObjectCsvWriter({8 { id: 'title', title: 'Title' },9 { id: 'url', title: 'URL' },10 { id: 'text', title: 'Text' },11});12const data = [];13const writeData = (title, url, text) => {14 data.push({ title, url, text });15 csvWriter.writeRecords(data).then(() => console.log('The CSV file was written successfully'));16};17const getWikiData = (title) => {18 .page(title)19 .get()20 .then((page) => {21 const wikiUrl = page.url();22 const wikiText = page.text();23 writeData(title, wikiUrl, wikiText);24 })25 .catch((err) => {26 console.log(err);27 });28};29const getWikiPage = (title) => {30 .page(title)31 .get()32 .then((page) => {33 console.log(page.url());34 console.log(page.text());35 })36 .catch((err) => {37 console.log(err);38 });39};40const getWikiDataAsync = async (title) => {41 const page = await wptools.page(title).get();42 const wikiUrl = page.url();43 const wikiText = page.text();44 writeData(title, wikiUrl, wikiText);45};46const getWikiPageAsync = async (title) => {47 const page = await wptools.page(title).get();48 console.log(page.url());49 console.log(page.text());50};51const getWikiDataAsyncAwait = async (title) => {52 const page = await wptools.page(title).get();53 const wikiUrl = page.url();54 const wikiText = page.text();55 writeData(title, wikiUrl, wikiText);56};57const getWikiPageAsyncAwait = async (title) => {58 const page = await wptools.page(title).get();59 console.log(page.url());60 console.log(page.text());61};

Full Screen

Using AI Code Generation

copy

Full Screen

1const wptoolkit = require('wptoolkit');2const { Actions } = wptoolkit;3const { Page } = wptoolkit;4const { Browser } = wptoolkit;5const { Actions } = require('wptoolkit');6const { Page } = require('wptoolkit');7const { Browser } = require('wptoolkit');8const wptoolkit = require('wptoolkit');9const { Browser } = wptoolkit;10const wptoolkit = require('wptoolkit');11const { Page } = wptoolkit;12const wptoolkit = require('wptoolkit');13const { Actions } = wptoolkit;14const wptoolkit = require('wptoolkit');15const { Page } = wptoolkit;16const wptoolkit = require('wptoolkit');17const { Browser } = wptoolkit;18const wptoolkit = require('wptoolkit');19const { Actions } = wptoolkit;20const { Actions } = require('wptoolkit');21const { Page } = require('wptoolkit');22const { Browser } = require('wptoolkit');23const wptoolkit = require('wptoolkit');24const { Browser } = wptoolkit;25const wptoolkit = require('wptoolkit');26const { Page } = wptoolkit;27const wptoolkit = require('wptoolkit');28const { Actions }

Full Screen

Using AI Code Generation

copy

Full Screen

1const wptool = require('wptool');2wptool.actions('wp-admin');3const wptool = require('wptool');4wptool.actions('wp-admin', 'wp-login.php');5const wptool = require('wptool');6wptool.actions('wp-admin', 'wp-login.php', 'admin', 'admin');7const wptool = require('wptool');8wptool.actions('wp-admin', 'wp-login.php', 'admin', 'admin', 'login');9const wptool = require('wptool');10wptool.actions('wp-admin', 'wp-login.php', 'admin', 'admin', 'login', 'login');11const wptool = require('wptool');12wptool.actions('wp-admin', 'wp-login.php', 'admin', 'admin', 'login', 'login', 'login');13const wptool = require('wptool');14wptool.actions('wp-admin', 'wp-login.php', 'admin', 'admin', 'login', 'login', 'login', 'login');15const wptool = require('wptool');16wptool.actions('wp-admin', 'wp-login.php', 'admin', 'admin', 'login', 'login', 'login', 'login', 'login');

Full Screen

Using AI Code Generation

copy

Full Screen

1var wptoolkit = require('wptoolkit');2wp.actions().then(function(data){3 console.log(data);4});5var wptoolkit = require('wptoolkit');6wp.actions().then(function(data){7 console.log(data);8}).catch(function(err){9 console.log(err);10});11var wptoolkit = require('wptoolkit');12wp.actions().then(function(data){13 console.log(data);14}).catch(function(err){15 console.log(err);16});17var wptoolkit = require('wptoolkit');18wp.actions().then(function(data){19 console.log(data);20}).catch(function(err){21 console.log(err);22});23var wptoolkit = require('wptoolkit');24wp.actions().then(function(data){25 console.log(data);26}).catch(function(err){27 console.log(err);28});29var wptoolkit = require('wptoolkit');30wp.actions().then(function(data){31 console.log(data);32}).catch(function(err){33 console.log(err);34});35var wptoolkit = require('wptoolkit');

Full Screen

Using AI Code Generation

copy

Full Screen

1var Actions = require('wptoolkit').Actions;2var actions = new Actions();3actions.click({id: 'id'});4actions.type({id: 'id'}, 'some text');5actions.submit({id: 'id'});6actions.select({id: 'id'}, 'value');7actions.select({id: 'id'}, 'text');8actions.select({id: 'id'}, 'index');9actions.select({id: 'id'}, 'label');10actions.select({id: 'id'}, 'value');11actions.select({id: 'id'}, 'text');12actions.select({id: 'id'}, 'index');13actions.select({id: 'id'}, 'label');14actions.select({id: 'id'}, 'value');15actions.select({id: 'id'}, 'text');16actions.select({id: 'id'}, 'index');17actions.select({id: 'id'}, 'label');18actions.select({id: 'id'}, 'value');19actions.select({id: 'id'}, 'text');20actions.select({id: 'id'}, 'index');21actions.select({id: 'id'}, 'label');22actions.select({id: 'id'}, 'value');23actions.select({id: 'id'}, 'text');24actions.select({id: 'id'}, 'index');25actions.select({id: 'id'}, 'label');26actions.select({id: 'id'}, 'value');27actions.select({id: 'id'}, 'text');28actions.select({id: 'id'}, 'index');29actions.select({id: 'id'}, 'label');30actions.select({id: 'id'}, 'value');31actions.select({id: 'id'}, 'text');32actions.select({id: 'id'}, 'index');33actions.select({id: 'id'}, 'label');34actions.select({id: 'id'}, 'value');35actions.select({id: 'id'}, 'text');36actions.select({id: 'id'}, 'index');37actions.select({id: 'id'}, 'label');38actions.select({id: 'id'}, 'value');39actions.select({id: 'id'}, 'text');40actions.select({id: 'id'}, 'index');41actions.select({id: 'id'}, 'label');42actions.select({id: 'id'}, 'value');43actions.select({id: 'id'}, 'text');44actions.select({id: 'id'}, 'index');45actions.select({id: 'id'}, 'label');46actions.select({id: 'id'}, 'value

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run wpt automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful