Best Python code snippet using localstack_python
evaluation_test.py
Source:evaluation_test.py  
...29      constants.DATE: np.repeat(dates, len(sites)),30      constants.SITE_ID: np.tile(sites, len(dates)),31      constants.PREDICTION: np.random.rand(len(dates) * len(sites)) * 10,32  })33def _get_entry(last_observation_date="2020-05-07",34               dataset_index_key="dset_index_1", cadence=1):35  return forecast_indexing.build_entry(36      forecast_id="",37      file_location="",38      dataset_name=_TEST_DATASET,39      creation_timestamp="",40      dataset_index_key=dataset_index_key,41      dataset_location="",42      last_observation_date=last_observation_date,43      cadence=cadence,44      extra_info={})45def _get_dataset(eval_dates, sites, target="new_confirmed", cadence=1):46  training_datetime = (47      datetime.datetime.strptime(eval_dates[0], "%Y-%m-%d") -48      datetime.timedelta(days=1))49  return dataset_factory.Dataset(50      training_targets=np.random.randint(0, 10, (1, len(sites), 1)),51      training_features=[],52      evaluation_targets=np.random.randint(0, 10,53                                           (len(eval_dates), len(sites), 1)),54      sum_past_targets=np.random.randint(0, 10, (len(sites), 1)),55      target_names=[target],56      feature_names=[],57      training_dates=[58          datetime.datetime.strftime(training_datetime, "%Y-%m-%d")59      ],60      evaluation_dates=eval_dates,61      sites=np.array(sites),62      dataset_index_key="test_dset_index_1",63      cadence=cadence)64class EvaluationTest(absltest.TestCase):65  def setUp(self):66    super().setUp()67    self._default_dates = ["2020-05-07", "2020-05-08", "2020-05-09"]68    self._default_sites = ["site_1", "site_2"]69  def test_comparable_forecasts_subset_sites(self):70    """Check that validation fails for forecasts with different sites."""71    forecasts_1 = _get_forecasts(self._default_dates, self._default_sites)72    forecasts_2 = _get_forecasts(self._default_dates, ["site_1", "site_3"])73    all_forecast_arrays = evaluation._convert_forecasts_to_arrays(74        [forecasts_1, forecasts_2])75    _, sites_to_eval, sites_to_drop, updated_data_arrays = (76        evaluation._get_forecast_spec_and_comparable_predictions(77            all_forecast_arrays))78    np.testing.assert_array_equal(sites_to_eval, ["site_1"])79    np.testing.assert_array_equal(sites_to_drop, ["site_2", "site_3"])80    np.testing.assert_array_equal(81        updated_data_arrays, np.array(82            [arr.data_array for arr in all_forecast_arrays])[:, :, 0:1, :])83    np.testing.assert_array_equal(updated_data_arrays[0].shape,84                                  updated_data_arrays[1].shape)85  def test_incomparable_forecasts_subset_dates(self):86    """Check that validation fails for forecasts with different start dates."""87    forecasts_1 = _get_forecasts(self._default_dates, self._default_sites)88    forecasts_2 = _get_forecasts(["2020-05-06", "2020-05-07"],89                                 self._default_sites)90    all_forecast_arrays = evaluation._convert_forecasts_to_arrays(91        [forecasts_1, forecasts_2])92    dates_to_eval, _, _, updated_data_arrays = (93        evaluation._get_forecast_spec_and_comparable_predictions(94            all_forecast_arrays))95    np.testing.assert_array_equal(dates_to_eval, ["2020-05-07"])96    np.testing.assert_array_equal(97        updated_data_arrays[0], all_forecast_arrays[0].data_array[0:1])98    np.testing.assert_array_equal(99        updated_data_arrays[1], all_forecast_arrays[1].data_array[1:2])100    np.testing.assert_array_equal(updated_data_arrays[0].shape,101                                  updated_data_arrays[1].shape)102  def test_valid_different_forecast_horizons(self):103    """Check that validation passes for forecasts with different horizons."""104    forecasts_1 = _get_forecasts(self._default_dates, self._default_sites)105    forecasts_2 = _get_forecasts(["2020-05-07", "2020-05-08"],106                                 self._default_sites)107    all_forecast_arrays = evaluation._convert_forecasts_to_arrays(108        [forecasts_1, forecasts_2])109    evaluation._get_forecast_spec_and_comparable_predictions(110        all_forecast_arrays)111  def test_badly_formatted_forecasts(self):112    """Checks that forecasts with unexpected format fail evaluation."""113    forecasts = _get_forecasts(self._default_dates, self._default_sites)114    forecasts["extra_column"] = ""115    all_forecast_arrays = evaluation._convert_forecasts_to_arrays([forecasts])116    with self.assertRaisesRegex(AssertionError,117                                "Unexpected columns in forecasts:*"):118      evaluation._get_forecast_spec_and_comparable_predictions(119          all_forecast_arrays)120  def test_incomparable_last_observation_dates(self):121    """Checks that validation fails for different last_observation_dates."""122    entry_1 = _get_entry(last_observation_date="2020-05-06")123    entry_2 = _get_entry(last_observation_date="2020-05-07")124    with self.assertRaisesRegex(125        ValueError,126        "Models can only be compared if they have the same "127        "last_observation_date. *"):128      evaluation._get_last_observation_date_and_validate_comparable(129          [entry_1, entry_2])130  def test_incomparable_forecast_cadences(self):131    """Checks that validation fails for forecasts with different cadences."""132    entry_1 = _get_entry(cadence=1)133    entry_2 = _get_entry(cadence=7)134    with self.assertRaisesRegex(135        ValueError,136        "Models can only be compared if they have the same forecast cadence *"):137      evaluation._get_last_observation_date_and_validate_comparable(138          [entry_1, entry_2])139  def test_incomparable_forecast_sources(self):140    """Checks validation fails for forecasts trained on different datasets."""141    entry_1 = _get_entry(dataset_index_key="dset_index_1")142    entry_2 = _get_entry(dataset_index_key="dset_index_2")143    with self.assertRaisesRegex(144        ValueError,145        "Models can only be compared if they were trained using the same "146        "dataset.*"):147      evaluation._get_last_observation_date_and_validate_comparable(148          [entry_1, entry_2])149  def test_incomparable_eval_dset_missing_sites(self):150    """Checks that validation fails when the dataset is missing sites."""151    dataset = _get_dataset(self._default_dates, ["site_1", "site_3"])152    with self.assertRaisesRegex(153        ValueError, "Not all of the sites in the forecasts are present in the "154        "evaluation dataset*"):155      evaluation._validate_eval_dataset_comparable(dataset, self._default_dates,156                                                   self._default_sites)...test_vdocs.py
Source:test_vdocs.py  
...43            file_2.close()44            self.assertNotEqual(md5_1, md5_2)45        shutil.rmtree(f_dir1)46        shutil.rmtree(f_dir2)47    def test_get_entry(self):48        """Objective: Test if the entries generated by the '_get_entry()'different.49        Input: Return value from GlastopfHoneypot._get_entry()50        Expected Result: Two runs of GlastopfHoneypot._get_entry() generate different results51        Notes:"""52        user_id1 = random.randint(1000, 1500)  # Realistic user ID53        pwd_entry1, shd_entry1, grp_entry1 = vdocs._get_entry(user_id1)54        user_id2 = random.randint(1000, 1500)55        pwd_entry2, shd_entry2, grp_entry2 = vdocs._get_entry(user_id2)56        self.assertNotEqual(pwd_entry1, pwd_entry2)57        self.assertNotEqual(shd_entry1, shd_entry2)58        self.assertNotEqual(grp_entry1, grp_entry2)59if __name__ == '__main__':...config.py
Source:config.py  
...16CONFIG_DST_MAPPING_FIELD_STROKE_COUNT = 'strokeCount'17class ConfigWrapper:18    def __init__(self) -> None:19        config: Optional[Dict[str, Any]] = mw.addonManager.getConfig(__name__)20        self.noteTypes = self._get_entry(config, CONFIG_NOTE_TYPES, ['Japanese::Kanji'])21        self.field_kanji = self._get_entry(config, CONFIG_FIELD_SRC, 'kanji')22        self.skipTags = self._get_entry(config, CONFIG_SKIP_TAGS, ['kanji_skip'])23        self.addTags = self._get_entry(config, CONFIG_ADD_TAGS, ['kanji_auto_filled'])24        self.targetDeck = self._get_entry(config, CONFIG_TARGET_DECK, 'Japanese::Kanji')25        self.targetNoteType = self._get_entry(config, CONFIG_TARGET_NOTE_TYPE, 'Japanese::Kanji')26        self.targetTag = self._get_entry(config, CONFIG_TARGET_TAG, 'auto_created')27        dst_mapping = config.get(CONFIG_DST_MAPPING)28        self.field_kun_yomi = self._get_entry(dst_mapping, CONFIG_DST_MAPPING_FIELD_KUNYOMI, 'kun_yomi')29        self.field_on_yomi = self._get_entry(dst_mapping, CONFIG_DST_MAPPING_FIELD_ONYOMI, 'on_yomi')30        self.field_english = self._get_entry(dst_mapping, CONFIG_DST_MAPPING_FIELD_ENGLISH, 'meanings')31        self.field_jlpt = self._get_entry(dst_mapping, CONFIG_DST_MAPPING_FIELD_JLPT, 'jlpt')32        self.field_grade = self._get_entry(dst_mapping, CONFIG_DST_MAPPING_FIELD_GRADE, 'grade')33        self.field_stroke_count = self._get_entry(dst_mapping, CONFIG_DST_MAPPING_FIELD_STROKE_COUNT, 'stroke_count')34    @staticmethod35    def _get_entry(src: Optional[Dict[str, Any]], field_name: str, default: Union[str, List[str]]) -> Union[str, List[str]]:36        if not src:37            return default38        return src.get(field_name, default)39# TODO probably replace with ConfigWrapper singleton...Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!
