Best Python code snippet using lisa_python
Experiments.py
Source:Experiments.py  
...216        for e in range(self.max_batch_episodes):217            # Make a gradient update step218            self.learner.train(batch)219            for _ in range(10):220                partial_result = self.test_in_env()221                self.episode_returns.append(partial_result)222            if self.plot_frequency is not None and (e + 1) % self.plot_frequency == 0:223                self.plot_training(update=True)224                if self.print_when_plot:225                    print('Batch %u, epi-return %.4g +- %.3g' %226                          (len(self.episode_returns), np.mean(self.episode_returns[-10:]),227                           np.std(self.episode_returns[-10:])))228    def test_in_env(self):229        rewards = []230        for _ in range(10):231        # for _ in range(self.plot_frequency):232            state = self.runner.env.reset()233            done = False234            score = 0235            while not done:236                action = self.controller.choose(state, increase_counter=False).detach().item()237                # action = 1238                new_state, reward, done = self.runner.env.step(action)239                score += reward240                state = new_state241            rewards.append(score)242        return np.mean(score)243class BatchHeuristicActorCriticExperiment(Experiment):244    def __init__(self, params, model, learner=None, **kwargs):245        super().__init__(params, model, **kwargs)246        self.max_episodes = params.get('max_episodes', int(1E6))247        self.max_batch_episodes = params.get('max_batch_episodes', int(1E6))248        self.max_steps = params.get('max_steps', int(1E9))249        self.grad_repeats = params.get('grad_repeats', 1)250        self.batch_size = params.get('batch_size', 1e5)251        self.mini_batch_size = params.get('mini_batch_size', 200)252        self.controller = ACController(model, num_actions=params.get('num_actions'), params=params)253        self.controller = EpsilonGreedyController(controller=self.controller, params=params)254        self.runner = Heuristic_runner(self.controller, params=params)255        self.learner = BatchReinforceLearner(model, params=params) if learner is None else learner256        self.learner.set_controller(self.controller)257        self.opposd = params.get('opposd', False)258        self.opposd_iterations = params.get('opposd_iterations', 50)259    def get_transition_batch(self):260        transition_buffer = TransitionBatch(self.batch_size, self.runner.transition_format(), self.mini_batch_size)261        batch = self.runner.run(self.batch_size, transition_buffer)262        return batch263    def close(self):264        """ Overrides Experiment.close() """265        self.runner.close()266    def run(self, batch=None):267        """ Overrides Experiment.run() """268        # Plot past results if available269        if self.plot_frequency is not None and len(self.episode_losses) > 2:270            self.plot_training(update=True)271        # Run the experiment272        if not batch:273            batch = self.get_transition_batch()274        for e in range(self.max_batch_episodes):275            # Make a gradient update step276            self.learner.train(batch["buffer"])277            for _ in range(10):278                partial_result = self.test_in_env()279                self.episode_returns.append(partial_result)280            if self.plot_frequency is not None and (e + 1) % self.plot_frequency == 0:281                # self.plot_training(update=True)282                if self.print_when_plot:283                    print('Batch %u, epi-return %.4g +- %.3g' %284                          (len(self.episode_returns), np.mean(self.episode_returns[-10:]),285                           np.std(self.episode_returns[-10:])))286    def test_in_env(self):287        rewards = []288        for _ in range(10):289        # for _ in range(self.plot_frequency):290            state = self.runner.env.reset()291            done = False292            score = 0293            while not done:294                action = self.controller.choose(state, increase_counter=False).detach().item()295                # action = 1296                new_state, reward, done = self.runner.env.step(action)297                score += reward298                state = new_state299            rewards.append(score)300        return np.mean(score)301class ActorCriticExperimentRunning(Experiment):302    def __init__(self, params, model, learner=None, **kwargs):303        super().__init__(params, model, **kwargs)304        self.max_episodes = params.get('max_episodes', int(1E6))305        self.max_batch_episodes = params.get('max_batch_episodes', int(1E6))306        self.max_steps = params.get('max_steps', int(1E9))307        self.grad_repeats = params.get('grad_repeats', 1)308        self.batch_size = params.get('batch_size', 1024)309        self.controller = ACController(model, num_actions=params.get('num_actions'), params=params)310        self.controller = EpsilonGreedyController(controller=self.controller, params=params)311        # self.controller = Experiments_controller(controller=self.controller, params=params)312        self.runner = Experiments_runner(self.controller, params=params)313        self.learner = ReinforceLearner(model, params=params) if learner is None else learner314        self.learner.set_controller(self.controller)315        self.opposd = params.get('opposd', False)316    def close(self):317        """ Overrides Experiment.close() """318        self.runner.close()319    def run(self):320        """ Overrides Experiment.run() """321        # Run the experiment322        transition_buffer = TransitionBatch(self.batch_size, self.runner.transition_format(), self.batch_size)323        env_steps = 0 if len(self.env_steps) == 0 else self.env_steps[-1]324        interacted_episodes = 0325        for e in range(self.max_batch_episodes):326            # Run the policy for batch_size steps327            batch = self.runner.run(self.batch_size, transition_buffer)328            if self.opposd:329                for _ in range(50):330                    batch_w = self.runner.run(self.batch_size, transition_buffer)331                    self.learner.update_policy_distribution(batch_w['buffer'])332            # Make a gradient update step333            loss = self.learner.train(batch['buffer'])334            self.episode_losses.append(loss)335            # Quit if maximal number of environment steps is reached336            if env_steps >= self.max_steps:337                print('Steps limit reached')338                break339            self.episode_returns = np.append(self.episode_returns, np.mean(self.test_in_env()))340            self.plot_training(update=True)341            print('Batch %d %.4g +- %.3g' % (e, np.mean(self.episode_returns[-5:]),342                                             np.std(self.episode_returns[-5:])))343        if self.max_batch_episodes == 1:344            self.episode_returns = self.test_in_env()345    def plot_training(self, update=False):346        """ Plots logged training results. Use "update=True" if the plot is continuously updated347            or use "update=False" if this is the final call (otherwise there will be double plotting). """348        # Smooth curves349        # window = max(int(len(self.episode_returns) / 50), 10)350        window = max(int(len(self.episode_returns) / 20), 2)351        returns = np.convolve(self.episode_returns, np.ones(window) / window, 'valid')352        # Determine x-axis based on samples or episodes353        x_returns = [i + window for i in range(len(returns))]354        # Create plot355        colors = ['b', 'g', 'r']356        fig = plt.gcf()357        fig.set_size_inches(16, 4)358        plt.clf()359        pl.plot(x_returns, returns, colors[0])360        pl.xlabel('environment steps' if self.plot_train_samples else 'batch trainings')361        pl.ylabel('episode return')362        # dynamic plot update363        display.clear_output(wait=True)364        if update:365            display.display(pl.gcf())366    def test_in_env(self):367        rewards = []368        for _ in range(50):369        # for _ in range(self.plot_frequency):370            state = self.runner.env.reset()371            done = False372            score = 0373            while not done:374                action = self.controller.choose(state, increase_counter=False).detach().item()375                # action = 1376                new_state, reward, done = self.runner.env.step(action)377                score += reward378                state = new_state379            rewards.append(score)380        return rewards381class ActorCriticExperimentHeuristic(Experiment):382    def __init__(self, params, model, learner=None, **kwargs):383        super().__init__(params, model, **kwargs)384        self.max_episodes = params.get('max_episodes', int(1E6))385        self.max_batch_episodes = params.get('max_batch_episodes', int(1E6))386        self.max_steps = params.get('max_steps', int(1E9))387        self.grad_repeats = params.get('grad_repeats', 1)388        self.batch_size = params.get('batch_size', 1024)389        self.controller = ACController(model, num_actions=params.get('num_actions'), params=params)390        self.controller = EpsilonGreedyController(controller=self.controller, params=params)391        # self.controller = Experiments_controller(controller=self.controller, params=params)392        self.runner = Heuristic_runner(self.controller, params=params)393        self.learner = ReinforceLearner(model, params=params) if learner is None else learner394        self.learner.set_controller(self.controller)395    def run(self):396        """ Overrides Experiment.run() """397        # Plot past results if available398        if self.plot_frequency is not None and len(self.episode_losses) > 2:399            self.plot_training(update=True)400        # Run the experiment401        transition_buffer = TransitionBatch(self.batch_size, self.runner.transition_format(), self.batch_size)402        env_steps = 0 if len(self.env_steps) == 0 else self.env_steps[-1]403        interacted_episodes = 0404        for e in range(self.max_batch_episodes):405            # Run the policy for batch_size steps406            batch = self.runner.run(self.batch_size, transition_buffer)407            batch_episodes = 0408            loss = self.learner.train(batch['buffer'])409            self.episode_losses.append(loss)410            interacted_episodes += batch_episodes411            if interacted_episodes >= self.max_episodes:412                print('Environment interaction limit reached')413                break414            # Show intermediate results415            self.episode_returns = np.append(self.episode_returns, np.mean(self.test_in_env()))416            self.plot_training(update=True)417            print('Batch %d %.4g +- %.3g' % (e, np.mean(self.episode_returns[-5:]),418                                             np.std(self.episode_returns[-5:])))419        if self.max_batch_episodes == 1:420            self.episode_returns = self.test_in_env()421    def test_in_env(self):422        rewards = []423        for _ in range(50):424        # for _ in range(self.plot_frequency):425            state = self.runner.env.reset()426            done = False427            score = 0428            while not done:429                action = self.controller.choose(state, increase_counter=False).detach().item()430                # action = 1431                new_state, reward, done = self.runner.env.step(action)432                score += reward433                state = new_state434            rewards.append(score)435        return rewards...afl.py
Source:afl.py  
1import shutil2import subprocess3import sys4import time5import traceback6from pathlib import Path7from FTB.ProgramConfiguration import ProgramConfiguration8from FTB.Running.AutoRunner import AutoRunner9from .utils import (10    HAVE_FFPUPPET,11    apply_transform,12    setup_firefox,13    warn_local,14    write_stats_file,15)16def command_file_to_list(cmd_file):17    """18    Open and parse custom command line file19    @type cmd_file: String20    @param cmd_file: Command line file containing list of commands21    @rtype: Tuple22    @return: Test index in list and the command as a list of strings23    """24    cmdline = []25    idx = 026    test_idx = None27    with open(cmd_file) as cmd_fp:28        for line in cmd_fp:29            if "@@" in line:30                test_idx = idx31            cmdline.append(line.rstrip())32            idx += 133    return test_idx, cmdline34def scan_crashes(35    base_dir,36    collector,37    cmdline_path=None,38    env_path=None,39    test_path=None,40    firefox=None,41    firefox_prefs=None,42    firefox_extensions=None,43    firefox_testpath=None,44    transform=None,45):46    """47    Scan the base directory for crash tests and submit them to FuzzManager.48    @type base_dir: String49    @param base_dir: AFL base directory50    @type cmdline_path: String51    @param cmdline_path: Optional command line file to use instead of the52                         one found inside the base directory.53    @type env_path: String54    @param env_path: Optional file containing environment variables.55    @type test_path: String56    @param test_path: Optional filename where to copy the test before57                      attempting to reproduce a crash.58    @type transform: String59    @param transform: Optional path to script for applying post-crash60                      transformations.61    @rtype: int62    @return: Non-zero return code on failure63    """64    crash_dir = Path(base_dir) / "crashes"65    crash_files = []66    for crash_path in crash_dir.iterdir():67        # Ignore all files that aren't crash results68        if not crash_path.name.startswith("id:"):69            continue70        # Ignore our own status files71        if crash_path.suffix in {".submitted", ".failed"}:72            continue73        # Ignore files we already processed74        if (crash_dir / f"{crash_path.name}.submitted").exists() or (75            crash_dir / f"{crash_path.name}.failed"76        ).exists():77            continue78        crash_files.append(crash_path)79    if crash_files:80        # First try to read necessary information for reproducing crashes81        base_env = {}82        test_in_env = None83        if env_path:84            with open(env_path) as env_file:85                for line in env_file:86                    (name, val) = line.rstrip("\n").split("=", 1)87                    base_env[name] = val88                    if "@@" in val:89                        test_in_env = name90        if not cmdline_path:91            cmdline_path = Path(base_dir) / "cmdline"92        test_idx, cmdline = command_file_to_list(cmdline_path)93        if test_idx is not None:94            orig_test_arg = cmdline[test_idx]95        configuration = ProgramConfiguration.fromBinary(cmdline[0])96        if not configuration:97            raise Exception(98                "Creating program configuration from binary failed. "99                "Check your binary configuration file."100            )101        if firefox:102            (ffp, ff_cmd, ff_env) = setup_firefox(103                cmdline[0], firefox_prefs, firefox_extensions, firefox_testpath104            )105            cmdline = ff_cmd106            base_env.update(ff_env)107        for crash_file in crash_files:108            stdin = None109            env = None110            if base_env:111                env = dict(base_env)112            submission = crash_file113            if transform:114                try:115                    submission = Path(apply_transform(transform, crash_file))116                except Exception as exc:  # pylint: disable=broad-except117                    print(exc.args[1], file=sys.stderr)118            if test_idx is not None:119                cmdline[test_idx] = orig_test_arg.replace("@@", str(crash_file))120            elif test_in_env is not None:121                env[test_in_env] = env[test_in_env].replace("@@", str(crash_file))122            elif test_path is not None:123                shutil.copy(str(crash_file), test_path)124            else:125                stdin = crash_file.read_text()126            print(f"Processing crash file {crash_file}", file=sys.stderr)127            runner = AutoRunner.fromBinaryArgs(128                cmdline[0], cmdline[1:], env=env, stdin=stdin129            )130            if runner.run():131                crash_info = runner.getCrashInfo(configuration)132                collector.submit(crash_info, submission)133                (submission.parent / f"{submission.name}.submitted").touch()134                print("Success: Submitted crash to server.", file=sys.stderr)135            else:136                (submission.parent / f"{submission.name}.failed").touch()137                print(138                    "Error: Failed to reproduce the given crash, cannot submit.",139                    file=sys.stderr,140                )141        if firefox:142            ffp.clean_up()143def write_aggregated_stats_afl(base_dirs, outfile, cmdline_path=None):144    """145    Generate aggregated statistics from the given base directories146    and write them to the specified output file.147    @type base_dirs: list148    @param base_dirs: List of AFL base directories149    @type outfile: str150    @param outfile: Output file for aggregated statistics151    @type cmdline_path: String152    @param cmdline_path: Optional command line file to use instead of the153                         one found inside the base directory.154    """155    # Which fields to add156    wanted_fields_total = [157        "execs_done",158        "execs_per_sec",159        "pending_favs",160        "pending_total",161        "variable_paths",162        "unique_crashes",163        "unique_hangs",164    ]165    # Which fields to aggregate by mean166    wanted_fields_mean = ["exec_timeout"]167    # Which fields should be displayed per fuzzer instance168    wanted_fields_all = ["cycles_done", "bitmap_cvg"]169    # Which fields should be aggregated by max170    wanted_fields_max = ["last_path"]171    # Generate total list of fields to write172    fields = []173    fields.extend(wanted_fields_total)174    fields.extend(wanted_fields_mean)175    fields.extend(wanted_fields_all)176    fields.extend(wanted_fields_max)177    # Warnings to include178    warnings = []179    aggregated_stats = {}180    for field in wanted_fields_total:181        aggregated_stats[field] = 0182    for field in wanted_fields_mean:183        aggregated_stats[field] = (0, 0)184    for field in wanted_fields_all:185        aggregated_stats[field] = []186    def convert_num(num):187        if "." in num:188            return float(num)189        return int(num)190    for base_dir in base_dirs:191        stats_path = Path(base_dir) / "fuzzer_stats"192        if not cmdline_path:193            cmdline_path = Path(base_dir) / "cmdline"194        if stats_path.exists():195            stats = stats_path.read_text()196            for line in stats.splitlines():197                (field_name, field_val) = line.split(":", 1)198                field_name = field_name.strip()199                field_val = field_val.strip()200                if field_name in wanted_fields_total:201                    aggregated_stats[field_name] += convert_num(field_val)202                elif field_name in wanted_fields_mean:203                    (val, cnt) = aggregated_stats[field_name]204                    aggregated_stats[field_name] = (205                        val + convert_num(field_val),206                        cnt + 1,207                    )208                elif field_name in wanted_fields_all:209                    aggregated_stats[field_name].append(field_val)210                elif field_name in wanted_fields_max:211                    num_val = convert_num(field_val)212                    if (field_name not in aggregated_stats) or aggregated_stats[213                        field_name214                    ] < num_val:215                        aggregated_stats[field_name] = num_val216    # If we don't have any data here, then the fuzzers haven't written any statistics217    # yet218    if not aggregated_stats:219        return220    # Mean conversion221    for field_name in wanted_fields_mean:222        (val, cnt) = aggregated_stats[field_name]223        if cnt:224            aggregated_stats[field_name] = float(val) / float(cnt)225        else:226            aggregated_stats[field_name] = val227    # Verify fuzzmanagerconf exists and can be parsed228    _, cmdline = command_file_to_list(cmdline_path)229    target_binary = cmdline[0] if cmdline else None230    if target_binary is not None:231        if not Path(f"{target_binary}.fuzzmanagerconf").is_file():232            warnings.append(f"WARNING: Missing {target_binary}.fuzzmanagerconf\n")233        elif ProgramConfiguration.fromBinary(target_binary) is None:234            warnings.append(f"WARNING: Invalid {target_binary}.fuzzmanagerconf\n")235    # Look for unreported crashes236    failed_reports = 0237    for base_dir in base_dirs:238        crashes_dir = Path(base_dir) / "crashes"239        if not crashes_dir.is_dir():240            continue241        for crash_file in crashes_dir.iterdir():242            if crash_file.suffix == ".failed":243                failed_reports += 1244    if failed_reports:245        warnings.append(f"WARNING: Unreported crashes detected ({failed_reports})\n")246    # Write out data247    write_stats_file(outfile, fields, aggregated_stats, warnings)248def aflfuzz_main(opts, collector, s3m):249    assert not opts.cmd or opts.firefox250    if opts.firefox or opts.firefox_start_afl:251        assert HAVE_FFPUPPET252        assert not opts.custom_cmdline_file253        assert opts.firefox_prefs and opts.firefox_testpath254    if opts.firefox_start_afl:255        assert opts.aflbindir256        (ffp, cmd, env) = setup_firefox(257            opts.firefox_start_afl,258            opts.firefox_prefs,259            opts.firefox_extensions,260            opts.firefox_testpath,261        )262        afl_cmd = [str(Path(opts.aflbindir) / "afl-fuzz")]263        opts.rargs.remove("--")264        afl_cmd.extend(opts.rargs)265        afl_cmd.extend(cmd)266        try:267            subprocess.run(afl_cmd, env=env)268        except Exception:  # pylint: disable=broad-except269            traceback.print_exc()270        ffp.clean_up()271        return 0272    afl_out_dirs = []273    if opts.afloutdir:274        if not (Path(opts.afloutdir) / "crashes").exists():275            # The specified directory doesn't have a "crashes" sub directory.276            # Either the wrong directory was specified, or this is an AFL277            # multi-process synchronization directory. Try to figure this out here.278            for sync_dir in Path(opts.afloutdir).iterdir():279                if (sync_dir / "crashes").exists():280                    afl_out_dirs.append(str(sync_dir))281            if not afl_out_dirs:282                print(283                    f"Error: Directory {opts.afloutdir} does not appear to be a "284                    "valid AFL output/sync directory",285                    file=sys.stderr,286                )287                return 2288        else:289            afl_out_dirs.append(opts.afloutdir)290    # Upload and FuzzManager modes require specifying the AFL directory291    assert not (opts.s3_queue_upload or opts.fuzzmanager) or opts.afloutdir292    if opts.fuzzmanager or opts.s3_queue_upload or opts.aflstats:293        last_queue_upload = 0294        # If we reach this point, we know that AFL will be running on this machine,295        # so do the local warning check296        warn_local(opts)297        while True:298            if opts.fuzzmanager:299                for afl_out_dir in afl_out_dirs:300                    scan_crashes(301                        afl_out_dir,302                        collector,303                        opts.custom_cmdline_file,304                        opts.env_file,305                        opts.test_file,306                    )307            # Only upload queue files every 20 minutes308            if opts.s3_queue_upload and last_queue_upload < int(time.time()) - 1200:309                for afl_out_dir in afl_out_dirs:310                    s3m.upload_afl_queue_dir(afl_out_dir, new_cov_only=True)311                last_queue_upload = int(time.time())312            if opts.stats or opts.aflstats:313                write_aggregated_stats_afl(314                    afl_out_dirs,315                    opts.aflstats,316                    cmdline_path=opts.custom_cmdline_file,317                )318            time.sleep(10)...Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!
