How to use get_latest_run method in stestr

Best Python code snippet using stestr_python

test_lightgbm_autolog.py

Source:test_lightgbm_autolog.py Github

copy

Full Screen

...8import matplotlib as mpl9import mlflow10import mlflow.lightgbm11mpl.use("Agg")12def get_latest_run():13 client = mlflow.tracking.MlflowClient()14 return client.get_run(client.list_run_infos(experiment_id="0")[0].run_id)15@pytest.fixture(scope="session")16def bst_params():17 return {18 "objective": "multiclass",19 "num_class": 3,20 }21@pytest.fixture(scope="session")22def train_set():23 iris = datasets.load_iris()24 X = pd.DataFrame(iris.data[:, :2], columns=iris.feature_names[:2])25 y = iris.target26 # set free_raw_data False to use raw data later.27 return lgb.Dataset(X, y, free_raw_data=False)28@pytest.mark.large29def test_lgb_autolog_ends_auto_created_run(bst_params, train_set):30 mlflow.lightgbm.autolog()31 lgb.train(bst_params, train_set, num_boost_round=1)32 assert mlflow.active_run() is None33@pytest.mark.large34def test_lgb_autolog_persists_manually_created_run(bst_params, train_set):35 mlflow.lightgbm.autolog()36 with mlflow.start_run() as run:37 lgb.train(bst_params, train_set, num_boost_round=1)38 assert mlflow.active_run()39 assert mlflow.active_run().info.run_id == run.info.run_id40@pytest.mark.large41def test_lgb_autolog_logs_default_params(bst_params, train_set):42 mlflow.lightgbm.autolog()43 lgb.train(bst_params, train_set)44 run = get_latest_run()45 params = run.data.params46 expected_params = {47 "num_boost_round": 100,48 "feature_name": "auto",49 "categorical_feature": "auto",50 "verbose_eval": True,51 "keep_training_booster": False,52 }53 expected_params.update(bst_params)54 for key, val in expected_params.items():55 assert key in params56 assert params[key] == str(val)57 unlogged_params = [58 "params",59 "train_set",60 "valid_sets",61 "valid_names",62 "fobj",63 "feval",64 "init_model",65 "evals_result",66 "learning_rates",67 "callbacks",68 ]69 for param in unlogged_params:70 assert param not in params71@pytest.mark.large72def test_lgb_autolog_logs_specified_params(bst_params, train_set):73 mlflow.lightgbm.autolog()74 expected_params = {75 "num_boost_round": 10,76 "early_stopping_rounds": 5,77 "verbose_eval": False,78 }79 lgb.train(bst_params, train_set, valid_sets=[train_set], **expected_params)80 run = get_latest_run()81 params = run.data.params82 expected_params.update(bst_params)83 for key, val in expected_params.items():84 assert key in params85 assert params[key] == str(val)86 unlogged_params = [87 "params",88 "train_set",89 "valid_sets",90 "valid_names",91 "fobj",92 "feval",93 "init_model",94 "evals_result",95 "learning_rates",96 "callbacks",97 ]98 for param in unlogged_params:99 assert param not in params100@pytest.mark.large101def test_lgb_autolog_logs_metrics_with_validation_data(bst_params, train_set):102 mlflow.lightgbm.autolog()103 evals_result = {}104 lgb.train(105 bst_params,106 train_set,107 num_boost_round=10,108 valid_sets=[train_set],109 valid_names=["train"],110 evals_result=evals_result,111 )112 run = get_latest_run()113 data = run.data114 client = mlflow.tracking.MlflowClient()115 metric_key = "train-multi_logloss"116 metric_history = [x.value for x in client.get_metric_history(run.info.run_id, metric_key)]117 assert metric_key in data.metrics118 assert len(metric_history) == 10119 assert metric_history == evals_result["train"]["multi_logloss"]120@pytest.mark.large121def test_lgb_autolog_logs_metrics_with_multi_validation_data(bst_params, train_set):122 mlflow.lightgbm.autolog()123 evals_result = {}124 # If we use [train_set, train_set] here, LightGBM ignores the first dataset.125 # To avoid that, create a new Dataset object.126 valid_sets = [train_set, lgb.Dataset(train_set.data)]127 valid_names = ["train", "valid"]128 lgb.train(129 bst_params,130 train_set,131 num_boost_round=10,132 valid_sets=valid_sets,133 valid_names=valid_names,134 evals_result=evals_result,135 )136 run = get_latest_run()137 data = run.data138 client = mlflow.tracking.MlflowClient()139 for valid_name in valid_names:140 metric_key = "{}-multi_logloss".format(valid_name)141 metric_history = [x.value for x in client.get_metric_history(run.info.run_id, metric_key)]142 assert metric_key in data.metrics143 assert len(metric_history) == 10144 assert metric_history == evals_result[valid_name]["multi_logloss"]145@pytest.mark.large146def test_lgb_autolog_logs_metrics_with_multi_metrics(bst_params, train_set):147 mlflow.lightgbm.autolog()148 evals_result = {}149 params = {"metric": ["multi_error", "multi_logloss"]}150 params.update(bst_params)151 valid_sets = [train_set]152 valid_names = ["train"]153 lgb.train(154 params,155 train_set,156 num_boost_round=10,157 valid_sets=valid_sets,158 valid_names=valid_names,159 evals_result=evals_result,160 )161 run = get_latest_run()162 data = run.data163 client = mlflow.tracking.MlflowClient()164 for metric_name in params["metric"]:165 metric_key = "{}-{}".format(valid_names[0], metric_name)166 metric_history = [x.value for x in client.get_metric_history(run.info.run_id, metric_key)]167 assert metric_key in data.metrics168 assert len(metric_history) == 10169 assert metric_history == evals_result["train"][metric_name]170@pytest.mark.large171def test_lgb_autolog_logs_metrics_with_multi_validation_data_and_metrics(bst_params, train_set):172 mlflow.lightgbm.autolog()173 evals_result = {}174 params = {"metric": ["multi_error", "multi_logloss"]}175 params.update(bst_params)176 valid_sets = [train_set, lgb.Dataset(train_set.data)]177 valid_names = ["train", "valid"]178 lgb.train(179 params,180 train_set,181 num_boost_round=10,182 valid_sets=valid_sets,183 valid_names=valid_names,184 evals_result=evals_result,185 )186 run = get_latest_run()187 data = run.data188 client = mlflow.tracking.MlflowClient()189 for valid_name in valid_names:190 for metric_name in params["metric"]:191 metric_key = "{}-{}".format(valid_name, metric_name)192 metric_history = [193 x.value for x in client.get_metric_history(run.info.run_id, metric_key)194 ]195 assert metric_key in data.metrics196 assert len(metric_history) == 10197 assert metric_history == evals_result[valid_name][metric_name]198@pytest.mark.large199def test_lgb_autolog_logs_metrics_with_early_stopping(bst_params, train_set):200 mlflow.lightgbm.autolog()201 evals_result = {}202 params = {"metric": ["multi_error", "multi_logloss"]}203 params.update(bst_params)204 valid_sets = [train_set, lgb.Dataset(train_set.data)]205 valid_names = ["train", "valid"]206 model = lgb.train(207 params,208 train_set,209 num_boost_round=10,210 early_stopping_rounds=5,211 valid_sets=valid_sets,212 valid_names=valid_names,213 evals_result=evals_result,214 )215 run = get_latest_run()216 data = run.data217 client = mlflow.tracking.MlflowClient()218 assert "best_iteration" in data.metrics219 assert int(data.metrics["best_iteration"]) == model.best_iteration220 assert "stopped_iteration" in data.metrics221 assert int(data.metrics["stopped_iteration"]) == len(evals_result["train"]["multi_logloss"])222 for valid_name in valid_names:223 for metric_name in params["metric"]:224 metric_key = "{}-{}".format(valid_name, metric_name)225 metric_history = [226 x.value for x in client.get_metric_history(run.info.run_id, metric_key)227 ]228 assert metric_key in data.metrics229 best_metrics = evals_result[valid_name][metric_name][model.best_iteration - 1]230 assert metric_history == evals_result[valid_name][metric_name] + [best_metrics]231@pytest.mark.large232def test_lgb_autolog_logs_feature_importance(bst_params, train_set):233 mlflow.lightgbm.autolog()234 model = lgb.train(bst_params, train_set, num_boost_round=10)235 run = get_latest_run()236 run_id = run.info.run_id237 artifacts_dir = run.info.artifact_uri.replace("file://", "")238 client = mlflow.tracking.MlflowClient()239 artifacts = [x.path for x in client.list_artifacts(run_id)]240 for imp_type in ["split", "gain"]:241 plot_name = "feature_importance_{}.png".format(imp_type)242 assert plot_name in artifacts243 json_name = "feature_importance_{}.json".format(imp_type)244 assert json_name in artifacts245 json_path = os.path.join(artifacts_dir, json_name)246 with open(json_path, "r") as f:247 loaded_imp = json.load(f)248 features = model.feature_name()249 importance = model.feature_importance(importance_type=imp_type)250 imp = {ft: imp for ft, imp in zip(features, importance.tolist())}251 assert loaded_imp == imp252@pytest.mark.large253def test_no_figure_is_opened_after_logging(bst_params, train_set):254 mlflow.lightgbm.autolog()255 lgb.train(bst_params, train_set, num_boost_round=10)256 assert mpl.pyplot.get_fignums() == []257@pytest.mark.large258def test_lgb_autolog_loads_model_from_artifact(bst_params, train_set):259 mlflow.lightgbm.autolog()260 model = lgb.train(bst_params, train_set, num_boost_round=10)261 run = get_latest_run()262 run_id = run.info.run_id263 loaded_model = mlflow.lightgbm.load_model("runs:/{}/model".format(run_id))264 np.testing.assert_array_almost_equal(265 model.predict(train_set.data), loaded_model.predict(train_set.data)...

Full Screen

Full Screen

test_xgboost_autolog.py

Source:test_xgboost_autolog.py Github

copy

Full Screen

...8import matplotlib as mpl9import mlflow10import mlflow.xgboost11mpl.use("Agg")12def get_latest_run():13 client = mlflow.tracking.MlflowClient()14 return client.get_run(client.list_run_infos(experiment_id="0")[0].run_id)15@pytest.fixture(scope="session")16def bst_params():17 return {18 "objective": "multi:softprob",19 "num_class": 3,20 }21@pytest.fixture(scope="session")22def dtrain():23 iris = datasets.load_iris()24 X = pd.DataFrame(iris.data[:, :2], columns=iris.feature_names[:2])25 y = iris.target26 return xgb.DMatrix(X, y)27@pytest.mark.large28def test_xgb_autolog_ends_auto_created_run(bst_params, dtrain):29 mlflow.xgboost.autolog()30 xgb.train(bst_params, dtrain)31 assert mlflow.active_run() is None32@pytest.mark.large33def test_xgb_autolog_persists_manually_created_run(bst_params, dtrain):34 mlflow.xgboost.autolog()35 with mlflow.start_run() as run:36 xgb.train(bst_params, dtrain)37 assert mlflow.active_run()38 assert mlflow.active_run().info.run_id == run.info.run_id39@pytest.mark.large40def test_xgb_autolog_logs_default_params(bst_params, dtrain):41 mlflow.xgboost.autolog()42 xgb.train(bst_params, dtrain)43 run = get_latest_run()44 params = run.data.params45 expected_params = {46 "num_boost_round": 10,47 "maximize": False,48 "early_stopping_rounds": None,49 "verbose_eval": True,50 }51 expected_params.update(bst_params)52 for key, val in expected_params.items():53 assert key in params54 assert params[key] == str(val)55 unlogged_params = [56 "dtrain",57 "evals",58 "obj",59 "feval",60 "evals_result",61 "xgb_model",62 "callbacks",63 "learning_rates",64 ]65 for param in unlogged_params:66 assert param not in params67@pytest.mark.large68def test_xgb_autolog_logs_specified_params(bst_params, dtrain):69 mlflow.xgboost.autolog()70 expected_params = {71 "num_boost_round": 20,72 "early_stopping_rounds": 5,73 "verbose_eval": False,74 }75 xgb.train(bst_params, dtrain, evals=[(dtrain, "train")], **expected_params)76 run = get_latest_run()77 params = run.data.params78 expected_params.update(bst_params)79 for key, val in expected_params.items():80 assert key in params81 assert params[key] == str(val)82 unlogged_params = [83 "dtrain",84 "evals",85 "obj",86 "feval",87 "evals_result",88 "xgb_model",89 "callbacks",90 "learning_rates",91 ]92 for param in unlogged_params:93 assert param not in params94@pytest.mark.large95def test_xgb_autolog_logs_metrics_with_validation_data(bst_params, dtrain):96 mlflow.xgboost.autolog()97 evals_result = {}98 xgb.train(99 bst_params, dtrain, num_boost_round=20, evals=[(dtrain, "train")], evals_result=evals_result100 )101 run = get_latest_run()102 data = run.data103 metric_key = "train-merror"104 client = mlflow.tracking.MlflowClient()105 metric_history = [x.value for x in client.get_metric_history(run.info.run_id, metric_key)]106 assert metric_key in data.metrics107 assert len(metric_history) == 20108 assert metric_history == evals_result["train"]["merror"]109@pytest.mark.large110def test_xgb_autolog_logs_metrics_with_multi_validation_data(bst_params, dtrain):111 mlflow.xgboost.autolog()112 evals_result = {}113 evals = [(dtrain, "train"), (dtrain, "valid")]114 xgb.train(bst_params, dtrain, num_boost_round=20, evals=evals, evals_result=evals_result)115 run = get_latest_run()116 data = run.data117 client = mlflow.tracking.MlflowClient()118 for eval_name in [e[1] for e in evals]:119 metric_key = "{}-merror".format(eval_name)120 metric_history = [x.value for x in client.get_metric_history(run.info.run_id, metric_key)]121 assert metric_key in data.metrics122 assert len(metric_history) == 20123 assert metric_history == evals_result[eval_name]["merror"]124@pytest.mark.large125def test_xgb_autolog_logs_metrics_with_multi_metrics(bst_params, dtrain):126 mlflow.xgboost.autolog()127 evals_result = {}128 params = {"eval_metric": ["merror", "mlogloss"]}129 params.update(bst_params)130 xgb.train(131 params, dtrain, num_boost_round=20, evals=[(dtrain, "train")], evals_result=evals_result132 )133 run = get_latest_run()134 data = run.data135 client = mlflow.tracking.MlflowClient()136 for metric_name in params["eval_metric"]:137 metric_key = "train-{}".format(metric_name)138 metric_history = [x.value for x in client.get_metric_history(run.info.run_id, metric_key)]139 assert metric_key in data.metrics140 assert len(metric_history) == 20141 assert metric_history == evals_result["train"][metric_name]142@pytest.mark.large143def test_xgb_autolog_logs_metrics_with_multi_validation_data_and_metrics(bst_params, dtrain):144 mlflow.xgboost.autolog()145 evals_result = {}146 params = {"eval_metric": ["merror", "mlogloss"]}147 params.update(bst_params)148 evals = [(dtrain, "train"), (dtrain, "valid")]149 xgb.train(params, dtrain, num_boost_round=20, evals=evals, evals_result=evals_result)150 run = get_latest_run()151 data = run.data152 client = mlflow.tracking.MlflowClient()153 for eval_name in [e[1] for e in evals]:154 for metric_name in params["eval_metric"]:155 metric_key = "{}-{}".format(eval_name, metric_name)156 metric_history = [157 x.value for x in client.get_metric_history(run.info.run_id, metric_key)158 ]159 assert metric_key in data.metrics160 assert len(metric_history) == 20161 assert metric_history == evals_result[eval_name][metric_name]162@pytest.mark.large163def test_xgb_autolog_logs_metrics_with_early_stopping(bst_params, dtrain):164 mlflow.xgboost.autolog()165 evals_result = {}166 params = {"eval_metric": ["merror", "mlogloss"]}167 params.update(bst_params)168 evals = [(dtrain, "train"), (dtrain, "valid")]169 model = xgb.train(170 params,171 dtrain,172 num_boost_round=20,173 early_stopping_rounds=5,174 evals=evals,175 evals_result=evals_result,176 )177 run = get_latest_run()178 data = run.data179 assert "best_iteration" in data.metrics180 assert int(data.metrics["best_iteration"]) == model.best_iteration181 assert "stopped_iteration" in data.metrics182 assert int(data.metrics["stopped_iteration"]) == len(evals_result["train"]["merror"]) - 1183 client = mlflow.tracking.MlflowClient()184 for eval_name in [e[1] for e in evals]:185 for metric_name in params["eval_metric"]:186 metric_key = "{}-{}".format(eval_name, metric_name)187 metric_history = [188 x.value for x in client.get_metric_history(run.info.run_id, metric_key)189 ]190 assert metric_key in data.metrics191 assert len(metric_history) == 20 + 1192 best_metrics = evals_result[eval_name][metric_name][model.best_iteration]193 assert metric_history == evals_result[eval_name][metric_name] + [best_metrics]194@pytest.mark.large195def test_xgb_autolog_logs_feature_importance(bst_params, dtrain):196 mlflow.xgboost.autolog()197 model = xgb.train(bst_params, dtrain)198 run = get_latest_run()199 run_id = run.info.run_id200 artifacts_dir = run.info.artifact_uri.replace("file://", "")201 client = mlflow.tracking.MlflowClient()202 artifacts = [x.path for x in client.list_artifacts(run_id)]203 importance_type = "weight"204 plot_name = "feature_importance_{}.png".format(importance_type)205 assert plot_name in artifacts206 json_name = "feature_importance_{}.json".format(importance_type)207 assert json_name in artifacts208 json_path = os.path.join(artifacts_dir, json_name)209 with open(json_path, "r") as f:210 loaded_imp = json.load(f)211 assert loaded_imp == model.get_score(importance_type=importance_type)212@pytest.mark.large213def test_xgb_autolog_logs_specified_feature_importance(bst_params, dtrain):214 importance_types = ["weight", "total_gain"]215 mlflow.xgboost.autolog(importance_types)216 model = xgb.train(bst_params, dtrain)217 run = get_latest_run()218 run_id = run.info.run_id219 artifacts_dir = run.info.artifact_uri.replace("file://", "")220 client = mlflow.tracking.MlflowClient()221 artifacts = [x.path for x in client.list_artifacts(run_id)]222 for imp_type in importance_types:223 plot_name = "feature_importance_{}.png".format(imp_type)224 assert plot_name in artifacts225 json_name = "feature_importance_{}.json".format(imp_type)226 assert json_name in artifacts227 json_path = os.path.join(artifacts_dir, json_name)228 with open(json_path, "r") as f:229 loaded_imp = json.load(f)230 assert loaded_imp == model.get_score(importance_type=imp_type)231@pytest.mark.large232def test_no_figure_is_opened_after_logging(bst_params, dtrain):233 mlflow.xgboost.autolog()234 xgb.train(bst_params, dtrain)235 assert mpl.pyplot.get_fignums() == []236@pytest.mark.large237def test_xgb_autolog_loads_model_from_artifact(bst_params, dtrain):238 mlflow.xgboost.autolog()239 model = xgb.train(bst_params, dtrain)240 run = get_latest_run()241 run_id = run.info.run_id242 loaded_model = mlflow.xgboost.load_model("runs:/{}/model".format(run_id))...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run stestr automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful