Best Python code snippet using fMBT_python
train.py
Source:train.py  
1"""2This file is used for training the DQN (DQfD)3"""4import neptune.new as neptune5import torch6import numpy as np7import os8from agents import *9from env import construct_task_env10# from utils import *11from core import *12from my_logging import *13nep_logger = GenericLogger(2000)14# FAST_DOWNWARD_PATH = "/fast_downward/"15"""16Hyperparameters used for the training17"""18# learning_rate = 1e-4 # learning rate reduced for training model after 5170 episodes19# max_episodes  = 100000020# pretrain_epochs = 1000021# t_max         = 60022# print_interval= 2023# target_update = 10 # episode(s)24# train_steps   = 1025def create_agent(test_case_id, *args, **kwargs):26    '''27    Method that will be called to create your agent during testing.28    You can, for example, initialize different class of agent depending on test case.29    '''30    return DQfDAgent(test_case_id=test_case_id)31def train(agent, env, weights_path=None, tag=None, disable_il=False):32    agent_init = {'fast_downward_path': FAST_DOWNWARD_PATH, 'agent_speed_range': (-3,-1), 'gamma' : 1}33    agent.initialize(**agent_init)34    num_actions = len(env.actions)35    if disable_il:36        agent.init_train()37    else:38        # agent.init_train(expert_demo_path=os.path.join("agent", "expert_dem_combined_easy_10_20.pt"))39        agent.init_train(expert_demo_path=os.path.join("agent", "expert_dem_combined.pt"))40    rewards = []41    losses = {42        'J_E': [],43        'J_DQ': [],44        'Total': []45    }46    optimizer = torch.optim.Adam(agent.model.parameters(), lr=learning_rate, weight_decay=1e-4)47    GenericLogger.initialize_writer()48    GenericLogger.add_params({49        "weight_decay": 1e-4,50        "learning_rate": learning_rate,51        "target_update": target_update,52        "train_steps": train_steps,53        "epsilon_decay": epsilon_decay,54        "gamma": gamma,55        "max_epsilon": max_epsilon,56        "min_epsilon": min_epsilon,57        "batch_size": batch_size,58        "buffer_limit": buffer_limit,59        "min_buffer": min_buffer,60    })61    if opt.tag is not None:62        GenericLogger.add_tag(opt.tag)63    agent.model.train()64    epoch = 065    if weights_path is None and not disable_il:66        print("Beginning Expert pretraining phase...")67        # Phase 1: Expert initialization phase68        for epoch in range(pretrain_epochs):69            loss, J_E, J_DQ = agent.optimize(optimizer, expert_phase=True)70            if epoch % (print_interval * 10) == 0 and epoch > 0:71                print("[Epoch {}]\tavg loss: : {:.6f}\tavg J_E loss: {:.6f}\tavg J_DQ loss: {:.6f}".format(epoch, loss.item(), J_E.item(), J_DQ.item()))72                # print("[Epoch {}]\tavg loss: : {:.6f}".format(epoch, np.mean(losses[-print_interval*5:])))73            # Update target network every once in a while74            if epoch % target_update == 0:75                agent.update_tgt_train()76            # if epoch % 2 == 0 and epoch > 0:77            #     break78    elif not weights_path is None:79        print("Loading preexisting weights....")80        agent.model.load_state_dict(torch.load(weights_path))81        agent.update_tgt_train()82    else:83        print("Imitation learning is disabled.")84    # torch.save(agent.target.state_dict(), "phase1.pt")85    manual = False86    # Phase 2: Exploration with some sampling of expert data87    print("Beginning exploration and training phase...")88    episode_lens = []89    prefix = f"last_model_{INPUT_SHAPE[1]}_{INPUT_SHAPE[2]}_"90    for episode in range(max_episodes):91        epsilon = compute_epsilon(episode)92        state = env.reset()93        episode_rewards = 0.094        experiences = []95        # hidden_state, cell_state = agent.model.reset_hidden_states(1)96        # Try the epsiode97        for t in range(t_max):98            # action, hidden_state, cell_state = agent.act(state, hidden_state, cell_state, epsilon=epsilon, env=env, manual=manual)99            action = agent.act(state, epsilon=epsilon, env=env, manual=manual)100            next_state, reward, done, info = env.step(action)101            experiences.append(Transition(state, [action], [reward], next_state, [done]))102            episode_rewards += reward103            if done:104                if t > 50:105                    print(f"Weird value t: {t}")106                break107            state = next_state108            if len(experiences) > 50:109                print(f"Found anomaly {len(experiences)}")110        rewards.append(episode_rewards)111        # Record down all the stuff related to episodes112        episode_lens.append(len(experiences))113        # Store all episodes into the replay buffer using the agent's store method114        if len(experiences) > NUM_LOOKBACK_TIMESTEPS + 2:115            agent.record_episode_train(experiences)116        if agent.enough_memory_train():117            manual = False118            for i in range(train_steps):119                epoch += 1120                loss, J_E, J_DQ = agent.optimize(optimizer)121                losses['Total'].append(loss.item())122                losses['J_DQ'].append(J_DQ.item())123                losses['J_E'].append(J_E.item())124        if episode % print_interval == 0 and episode > 0:125            GenericLogger.add_scalar('Mean episode length', np.mean(episode_lens[-print_interval:]))126            # GenericLogger.add_scalar('Min episode length', np.min(episode_lens[-print_interval:]))127            # GenericLogger.add_scalar('Max episode length', np.sum(episode_lens[-print_interval:]))128            GenericLogger.add_scalar('Mean episode reward', np.mean(rewards[-print_interval:]))129            print("[Episode {}]\tavg rewards : {:.3f},\tavg loss: : {:.6f},\tavg J_E loss {:.6f},\tavg J_DQ loss {:.6f}, \130                    \tbuffer size : {},\t epsilon: {}".format(131                            episode, np.mean(rewards[-print_interval:]), np.mean(losses['Total'][-print_interval*10:]),132                            np.mean(losses['J_E'][-print_interval*10:]), np.mean(losses['J_DQ'][-print_interval*10:]), 133                            len(agent.memory), epsilon * 100))134            135            print("TOTAL: [Episode {}]\tavg rewards : {:.3f},\tavg loss: : {:.6f},\tavg J_E loss {:.6f},\tavg J_DQ loss {:.6f}, \136                    \tbuffer size : {},\t epsilon: {}".format(137                            episode, np.mean(rewards[-print_interval*10:]), np.mean(losses['Total'][-print_interval*10:]),138                            np.mean(losses['J_E'][-print_interval*10:]), np.mean(losses['J_DQ'][-print_interval*10:]), 139                            len(agent.memory), epsilon * 100))140            agent.memory.distribution()141        if episode % (target_update*2000) == 0:142            save_filename = prefix + str(episode)143            torch.save(agent.target.state_dict(), f"{save_filename}.pt")144        # Update target network every once in a while145        if episode % target_update == 0:146            agent.update_tgt_train()147            # print(">>>>>>>>>>> Saving target network to disc")148if __name__ == "__main__":149    import sys150    import time151    import argparse152    from env import construct_task_env153    parser = argparse.ArgumentParser()154    parser.add_argument('--weights_path', default=None, type=str, help="Path to weights file")155    parser.add_argument('--tag', default=None, type=str, help="Tag for neptune ai logging")156    parser.add_argument('--disable-il', default=False, action="store_true", help="Disable imitation learning")157    opt = parser.parse_args()158    test_env = construct_task_env()159    agent = create_agent(0)160    train(agent, test_env, weights_path=opt.weights_path, tag=opt.tag, disable_il=opt.disable_il)...LogHandler.py
Source:LogHandler.py  
1#####################################################################################################2#####################################################################################################3## Created on Sep 01, 20134## @author: Gerasimos Kassaras5## E-mail: g.kassaras@gmail.com6## Nickname: Lamehacker Free Industries7## 8## Comment: This class is used as a centralized logger for the scanner9#######################################################################################################10#######################################################################################################11import logging12from bs4 import UnicodeDammit13genericLoggerName = 'CapCake_logger'14genericLogger = logging.getLogger(genericLoggerName)15genericLoggerHandler = logging.FileHandler('CapCake_logger.log')16formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')17genericLoggerHandler.setFormatter(formatter)18genericLogger.addHandler(genericLoggerHandler)19genericLogger.setLevel(logging.DEBUG)20class loggingHandler: # logs info,warning,error,critical,debug events.21    def __init__(self):22        '''23        Description: This class is used to manage the logging information from the scanner.24        Status: Finished.25        Usage: This is used to initialize with the proper logging level.26        '''27        self.logInfo("--- Package: LoggingManager - Module: LoggingHandler Class: loggingHandler Initiated ---")28    29#-------------------------------------------------------------------------------------------------------------------------------------------------------------------30    def logInfo(self,msg):31        genericLogger.info(msg)32#-------------------------------------------------------------------------------------------------------------------------------------------------------------------33    def logWarning(self,msg):34        genericLogger.warn(msg,exc_info=True)35#-------------------------------------------------------------------------------------------------------------------------------------------------------------------36    def logError(self,msg):37        genericLogger.error(msg,exc_info=True)38#-------------------------------------------------------------------------------------------------------------------------------------------------------------------39    def logCritical(self,msg):40        genericLogger.critical(msg,exc_info=True)41#-------------------------------------------------------------------------------------------------------------------------------------------------------------------42    def logDebug(self,msg):...my_logging.py
Source:my_logging.py  
1import neptune.new as neptune2default_update_interval = 0  # Not used3class NepLoggerEmpty():4    ''' NepLogger that does nothing '''5    def __init__(self, update_interval=default_update_interval):6        pass7    def __str__(self):8        return repr(self)9    def initialize_writer(self):10        pass11    def get_writer(self):12        pass13    def add_scalar(self, tag, item):14        pass15    def add_params(self, params):16        pass17    def add_tag(self, tag):18        pass19class NepLogger():20    ''' Logger that logs to Neptune.ai '''21    def __init__(self, update_interval=default_update_interval):22        self.logger = None23        self.update_interval = update_interval24    def __str__(self):25        return repr(self)26    def initialize_writer(self):27        self.logger = neptune.init(project='tenvinc/cs4246-project', 28        source_files=['agent/*.py', 'requirements.txt'])29    def get_writer(self):30        return self.logger31    def add_scalar(self, tag, item):32        self.logger[tag].log(item)33    def add_params(self, params):34        self.logger["parameters"] = params35    def add_tag(self, tag):36        self.logger["sys/tags"].add([tag])37class GenericLogger():38    ''' Generic logger singleton '''39    instance = None40    def __init__(self, update_interval=default_update_interval):41        if not GenericLogger.instance:42            GenericLogger.instance = NepLoggerEmpty(update_interval)43            print(GenericLogger.instance)44        45    def __getattr__(self, name):46        return getattr(self.instance, name)47    @classmethod48    def initialize_writer(cls):49        return cls.instance.initialize_writer()50    @classmethod51    def get_writer(cls):52        return cls.instance.get_writer()53    @classmethod54    def add_scalar(cls, tag, item):55        assert cls.instance is not None56        cls.instance.add_scalar(tag, item)57    @classmethod58    def add_params(cls, params):59        assert cls.instance is not None60        cls.instance.add_params(params)61    62    @classmethod63    def add_tag(cls, tag):64        assert cls.instance is not None...Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!
