How to use get_num_tests method in Slash

Best Python code snippet using slash

compare_ppo_dqn.py

Source:compare_ppo_dqn.py Github

copy

Full Screen

...64#######################################################################'''\65 .format(test_no))66 test_once()67# Visualisation {{{1 #68def get_num_tests(keys, measurement_name):69 return len([None for k in keys if k.startswith(measurement_name)])70# Generalisation {{{2 #71def visualise_generalisation(f):72 import matplotlib.pyplot as plt73 # Generalisation loss74 fig, axs = plt.subplots(2, 2)75 for measurement_template, i in (76 ('35fixed-{}-new-', 0),77 ('35random-{}-cont.-', 1),):78 # DQN79 legend = 'loss'80 measurement_name = measurement_template.format('DQN')81 for t in range(get_num_tests(f.keys(),measurement_name)):82 axs[i, 0].plot(f[measurement_name + str(t)]['loss'],83 '-', color = (0, 0, 1, 0.1),84 label = legend)85 legend = '_'86 axs[i, 0].grid(True)87 axs[i, 0].set_title(['DQN baseline',88 'DQN generalisation (random)'][i])89 axs[i, 0].legend()90 # PPO91 legend_prefix = ''92 measurement_name = measurement_template.format('PPO')93 for t in range(get_num_tests(f.keys(), measurement_name)):94 ppo_loss = f[measurement_name + str(t)]['loss']95 axs[i, 1].plot(ppo_loss[0::3], '-', color = (0, 0, 1, 0.1),96 label = legend_prefix + 'clip')97 axs[i, 1].plot(ppo_loss[1::3], '-', color = (0, 1, 0, 0.1),98 label = legend_prefix + 'vf')99 axs[i, 1].plot(ppo_loss[2::3], '-', color = (1, 0, 0, 0.1),100 label = legend_prefix + 'entropy')101 legend_prefix = '_'102 axs[i, 1].grid(True)103 axs[i, 1].set_title(['PPO baseline',104 'PPO generalisation (random)'][i])105 axs[i, 1].legend()106 fig.tight_layout()107 plt.show()108 # Generalisation reward prediction error109 fig, axs = plt.subplots(2, 2)110 for measurement_template, i in (111 ('35fixed-{}-new-', 0),112 ('35random-{}-cont.-', 1),):113 # DQN fixed114 legend = 'reward prediction error'115 measurement_name = measurement_template.format('DQN')116 for t in range(get_num_tests(f.keys(),measurement_name)):117 g = f[measurement_name + str(t)]118 num_episodes = g['num_episodes'][0]119 rp_error = g['reward_prediction'][:num_episodes] - \120 g['episode_rewards'][:num_episodes]121 axs[i, 0].plot(rp_error, 'r.', label = legend)122 legend = '_'123 axs[i, 0].grid(True)124 axs[i, 0].set_title(['DQN baseline',125 'DQN generalisation (random start)'][i])126 axs[i, 0].legend()127 # PPO fixed128 legend = 'reward prediction error'129 measurement_name = measurement_template.format('PPO')130 for t in range(get_num_tests(f.keys(), measurement_name)):131 g = f[measurement_name + str(t)]132 num_episodes = g['num_episodes'][0]133 rp_error = g['reward_prediction'][:num_episodes] - \134 g['episode_rewards'][:num_episodes]135 episode_ends = g['episode_ends'][:num_episodes]136 axs[i, 1].plot(137 episode_ends, rp_error, 'r.',138 label = legend)139 legend = '_'140 axs[i, 1].grid(True)141 axs[i, 1].set_title(['PPO baseline',142 'PPO generalisation (random start)'][i])143 fig.tight_layout()144 plt.show()145# 2}}} #146def visualise_fixed(f):147 import matplotlib.pyplot as plt148 # FIXME determine this dynamically149 TEST_REWARD_INTERVAL = 100150 # Loss151 fig, axs = plt.subplots(len(STARTING_HEIGHTS), 2)152 for i, starting_height in enumerate(STARTING_HEIGHTS):153 # DQN154 legend = 'loss'155 measurement_name = '{}fixed-DQN-new-'.format(starting_height)156 for t in range(get_num_tests(f.keys(), measurement_name)):157 axs[i, 0].plot(f[measurement_name + str(t)]['loss'],158 '-', color = (0, 0, 1, 0.1),159 label = legend)160 legend = '_'161 axs[i, 0].set_ylim([-1, 10])162 axs[i, 0].grid(True)163 axs[i, 0].set_title('DQN started from ' + str(starting_height))164 axs[i, 0].legend()165 # PPO166 legend_prefix = ''167 measurement_name = '{}fixed-PPO-new-'.format(starting_height)168 for t in range(get_num_tests(f.keys(), measurement_name)):169 ppo_loss = f[measurement_name + str(t)]['loss']170 axs[i, 1].plot(ppo_loss[0::3], '-', color = (0, 0, 1, 0.1),171 label = legend_prefix + 'clip')172 axs[i, 1].plot(ppo_loss[1::3], '-', color = (0, 1, 0, 0.1),173 label = legend_prefix + 'vf')174 axs[i, 1].plot(ppo_loss[2::3], '-', color = (1, 0, 0, 0.1),175 label = legend_prefix + 'entropy')176 legend_prefix = '_'177 axs[i, 1].set_ylim([-3, 20])178 axs[i, 1].grid(True)179 axs[i, 1].set_title('PPO started from ' + str(starting_height))180 axs[i, 1].legend()181 fig.tight_layout()182 plt.show()183 # Metrics184 fig, axs = plt.subplots(len(STARTING_HEIGHTS), 2)185 for i, starting_height in enumerate(STARTING_HEIGHTS):186 # DQN187 legend_prefix = ''188 measurement_name = '{}fixed-DQN-new-'.format(starting_height)189 for t in range(get_num_tests(f.keys(), measurement_name)):190 g = f[measurement_name + str(t)]191 num_episodes = g['num_episodes'][0]192 episode_ends = g['episode_ends'][:num_episodes]193 episode_rewards = g['episode_rewards'][:num_episodes]194 # xx = episode_ends + \195 # np.random.random(episode_ends.shape) * 0.5 - 0.25196 # axs[i, 0].plot(xx, episode_rewards, '.',197 # label = legend_prefix + 'episode rewards')198 reward_prediction = g['reward_prediction'][:num_episodes]199 xx = episode_ends + \200 np.random.random(episode_ends.shape) * 0.5 - 0.25201 # axs[i, 0].plot(xx, reward_prediction, 'x',202 # label = legend_prefix + 'reward prediction')203 axs[i, 0].plot(xx, reward_prediction - episode_rewards, 'b.',204 label = legend_prefix + 'reward prediction error')205 test_episodes = g['test_episode_rewards'][206 :num_episodes // TEST_REWARD_INTERVAL]207 xx = episode_ends[TEST_REWARD_INTERVAL - 1::TEST_REWARD_INTERVAL]208 xx = xx + np.random.random(*xx.shape) * 0.5 - 0.25209 axs[i, 0].plot(xx, test_episodes,210 'ro', label = legend_prefix + 'test rewards')211 legend_prefix = '_'212 axs[i, 0].grid(True)213 axs[i, 0].set_title('DQN started from ' + str(starting_height))214 axs[i, 0].legend()215 legend_prefix = ''216 measurement_name = '{}fixed-PPO-new-'.format(starting_height)217 for t in range(get_num_tests(f.keys(), measurement_name)):218 g = f[measurement_name + str(t)]219 num_episodes = g['num_episodes'][0]220 episode_ends = g['episode_ends'][:num_episodes]221 episode_rewards = g['episode_rewards'][:num_episodes]222 # xx = episode_ends + \223 # np.random.random(episode_ends.shape) * 0.5 - 0.25224 # axs[i, 1].plot(xx, episode_rewards, '.',225 # label = legend_prefix + 'episode rewards')226 reward_prediction = g['reward_prediction'][:num_episodes]227 xx = episode_ends + \228 np.random.random(episode_ends.shape) * 0.5 - 0.25229 # axs[i, 1].plot(xx, reward_prediction, 'x',230 # label = legend_prefix + 'reward prediction')231 axs[i, 1].plot(xx, reward_prediction - episode_rewards, 'b.',...

Full Screen

Full Screen

Class.py

Source:Class.py Github

copy

Full Screen

...2223 def get_n_correct(self):24 return self.n_correct2526 def get_num_tests(self):27 return self.num_tests2829 def get_prediction(self):30 return self.prediction3132 def set_prediction(self, support_set, targets):33 # to be overridden by subclasses34 self.prediction = 03536 def update_score(self, support_set, targets, verbose):37 self.set_prediction(support_set, targets)38 if targets[self.get_prediction()] == 1:39 self.n_correct += 140 self.num_tests += 14142 if verbose and self.get_num_tests() < 10:43 print('TEST {}'.format(self.get_num_tests()))44 print('{} GUESSED PAIR {}'.format(self.get_name(), self.get_prediction() + 1))45 46 if targets[self.get_prediction()] == 1:47 print("CORRECT")48 else:49 print("INCORRECT")50 print('\n')5152 def calc_accuracy(self, N, K):53 acc = round((self.n_correct/self.num_tests) * 100, 2)54 print("{} Model achieved {}% accuracy on {} {}-way {}-shot tests".format(self.get_name(), acc, self.get_num_tests(), N, K))55 return acc56 5758class FSL(Predictor):5960 def __init__(self, model, name='Few Shot Learning', use_training_set=False, convert_2_rgb=False):61 super().__init__(name)62 self.model = model63 self.name = name64 self.probs = None65 self.use_training_set = use_training_set66 self.convert_2_rgb = convert_2_rgb67 self.fsl = True68 ...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Slash automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful