How to use rule_wrapper method in hypothesis

Best Python code snippet using hypothesis

run_exp_stub.py

Source:run_exp_stub.py Github

copy

Full Screen

1import os2import random3from typing import List, Optional4import pandas as pd5from pylo.language.lp import (Clause as PyloClause, global_context as pylo_global_context)6from tqdm import tqdm7from artificial_bias_experiments.experiment_utils import print_or_log8from kbc_pul.amie.amie_output_rule_extraction import get_amie_rules_from_rule_tsv_file9from artificial_bias_experiments.evaluation.ground_truth_utils import TrueEntitySetsTuple, \10 get_true_entity_sets_as_string_sets11from artificial_bias_experiments.noisy_prop_scores.available_prop_scores_to_pu_metrics_controller import \12 RuleWrapperNoisyPropScoresToPuMetricsMap13from artificial_bias_experiments.noisy_prop_scores.scar.experiment_info import \14 NoisyPropScoresSCARExperimentInfo15from artificial_bias_experiments.noisy_prop_scores.scar.noisy_prop_scores_scar_file_naming import \16 NoisyPropScoresSCARFileNamer17from kbc_pul.data_structures.pandas_kb import PandasKnowledgeBaseWrapper18from kbc_pul.data_structures.rule_wrapper import RuleWrapper, filter_rules_predicting19from kbc_pul.experiments_utils.load_df_ground_truth import get_df_ground_truth20from kbc_pul.observed_data_generation.abstract_triple_selection import ObservedTargetRelationInfo21from kbc_pul.observed_data_generation.sar_two_subject_groups.sar_two_subject_groups_prop_scores import \22 PropScoresTwoSARGroups23from kbc_pul.observed_data_generation.scar.scar_propensity_score_controller import \24 SCARPropensityScoreController25from kbc_pul.observed_data_generation.scar.scar_triple_selection import SCARTripleSelector26from kbc_pul.rule_metrics.prediction_cache_rule_metrics.rule_cwa_and_pca_confidences_from_cached_predictions import \27 set_rule_wrapper_cwa_and_pca_confidence_calculated_from_cache28from kbc_pul.rule_metrics.prediction_cache_rule_metrics.rule_ipw_and_ipw_pca_confidences_from_cached_predictions import \29 calculate_rule_ipw_and_ipw_pca_confidences_from_df_cached_predictions30from kbc_pul.rule_metrics.prediction_cache_rule_metrics.rule_true_confidence_on_observed_data_from_cached_predictions import \31 calculate_true_confidence_metrics_from_df_cached_predictions32def write_observed_target_relation_to_csv(33 mask_observed_rows: pd.Series,34 experiment_dir: str,35 experiment_info: NoisyPropScoresSCARExperimentInfo,36 random_trial_index: int37):38 filename_mask_observed_rows: str = os.path.join(39 experiment_dir,40 f"mask_{experiment_info.target_relation}"41 f"c{experiment_info.true_label_frequency}_trial{random_trial_index}.csv"42 )43 mask_observed_rows.astype(int).to_csv(44 filename_mask_observed_rows,45 sep="\t",46 index=False,47 header=False48 )49def run_single_experiment_setting_of_experiment_noisy_prop_scores_scar(50 filename_ground_truth_dataset: str,51 separator_ground_truth_dataset: str,52 amie_rule_tsv_filename: str,53 experiment_info: NoisyPropScoresSCARExperimentInfo,54 random_seed: int,55 n_random_trials: int,56 verbose: bool = False,57):58 experiment_dir: str = NoisyPropScoresSCARFileNamer.get_dir_experiment_specific(59 experiment_info=experiment_info60 )61 print(experiment_dir)62 if verbose:63 print(experiment_dir)64 rng = random.Random(random_seed)65 df_ground_truth: pd.DataFrame = get_df_ground_truth(66 filename_ground_truth_dataset, separator_ground_truth_dataset67 )68 print_or_log(message="Generating SCAR dataset", logger=None, verbose=verbose)69 pandas_kb_wrapper = PandasKnowledgeBaseWrapper.create_from_full_data(df_full_data=df_ground_truth)70 df_ground_truth_target_relation: pd.DataFrame = pandas_kb_wrapper.get_relation(71 experiment_info.target_relation72 )73 if verbose:74 print(f"ground truth:")75 print(f"\t{df_ground_truth.shape[0]} literals")76 print(f"\t{df_ground_truth_target_relation.shape[0]} {experiment_info.target_relation} (target) literals")77 dir_rule_wrappers: str = os.path.join(78 experiment_dir,79 'rule_wrappers'80 )81 if not os.path.exists(dir_rule_wrappers):82 os.makedirs(dir_rule_wrappers)83 triple_selector: SCARTripleSelector = SCARTripleSelector(84 constant_label_frequency=experiment_info.true_label_frequency,85 verbose=False86 )87 #############################################################################################88 random_trial_index: int89 for random_trial_index in range(n_random_trials):90 observed_target_relation_info: ObservedTargetRelationInfo = triple_selector.select_observed_target_relation(91 df_ground_truth_target_relation=df_ground_truth_target_relation,92 rng=rng93 )94 write_observed_target_relation_to_csv(95 mask_observed_rows=observed_target_relation_info.mask,96 experiment_dir=experiment_dir,97 experiment_info=experiment_info,98 random_trial_index=random_trial_index99 )100 # Set the observed target relation101 pandas_kb_wrapper.replace_predicate(102 relation=experiment_info.target_relation,103 new_df_for_relation=observed_target_relation_info.df104 )105 print_or_log(message="Finished generating biased dataset (non-PCA)", logger=None, verbose=verbose)106 print_or_log(message="Evaluating rules on biased dataset (non-PCA)", logger=None, verbose=verbose)107 true_ent_sets_tuple: TrueEntitySetsTuple = get_true_entity_sets_as_string_sets(108 df_ground_truth_target_relation109 )110 #############################################################################################111 if verbose:112 print("Start evaluation phase")113 print(f"True Label frequency: {experiment_info.true_label_frequency}")114 rule_wrapper_list: List[RuleWrapper] = [115 rule_wrapper.clone_with_metrics_unset()116 for rule_wrapper in filter_rules_predicting(117 get_amie_rules_from_rule_tsv_file(amie_rule_tsv_filename),118 head_functor_set={119 experiment_info.target_relation120 }121 )122 ]123 print(f"Start evaluation (random trial index {random_trial_index}"124 f" ({random_trial_index + 1} / {n_random_trials}))")125 rule_wrapper: RuleWrapper126 for rule_wrapper in tqdm(rule_wrapper_list, disable=not verbose):127 rule: PyloClause = rule_wrapper.rule128 if verbose:129 print(f"Rule: {rule}")130 o_df_cached_predictions: Optional[pd.DataFrame] = pandas_kb_wrapper.calculate_prediction_cache_for_rule(131 rule=rule_wrapper.rule132 )133 if o_df_cached_predictions is None or (len(o_df_cached_predictions) == 0):134 if verbose:135 print(f"ZERO PREDICTIONS for rule {rule}")136 else:137 # STD, PCA138 set_rule_wrapper_cwa_and_pca_confidence_calculated_from_cache(rule_wrapper, o_df_cached_predictions)139 # TRUE CONF & CONF*140 calculate_true_confidence_metrics_from_df_cached_predictions(141 rule_wrapper=rule_wrapper,142 df_cached_predictions=o_df_cached_predictions,143 df_ground_truth_target_relation=df_ground_truth_target_relation,144 true_entity_sets=true_ent_sets_tuple145 )146 filename_rule_wrapper: str = os.path.join(147 dir_rule_wrappers,148 f"{str(rule_wrapper.rule)}_trial{random_trial_index}.json.gz"149 )150 rule_wrapper.to_json_file(filename_rule_wrapper)151 ##################################################152 dir_pu_metrics_of_rule_wrappers: str = os.path.join(153 dir_rule_wrappers,154 "pu_metrics"155 )156 if not os.path.exists(dir_pu_metrics_of_rule_wrappers):157 os.makedirs(dir_pu_metrics_of_rule_wrappers)158 filename_pu_metrics_of_rule_wrapper: str = os.path.join(159 dir_pu_metrics_of_rule_wrappers,160 f"{str(rule_wrapper.rule)}_trial{random_trial_index}.tsv.gz"161 )162 noisy_prop_score_tuple_to_pu_metrics_map_controller = RuleWrapperNoisyPropScoresToPuMetricsMap(163 rule_str=str(rule_wrapper.rule),164 random_trial_index=random_trial_index,165 true_prop_scores=PropScoresTwoSARGroups(166 in_filter=experiment_info.true_label_frequency,167 other=experiment_info.true_label_frequency168 )169 )170 noisy_propensity_score_other_entities: float171 for noisy_label_frequency in experiment_info.available_label_frequency_list:172 rule_wrapper.o_relative_pu_confidence_unbiased = None173 rule_wrapper.o_relative_pu_confidence_pca_subject_to_object = None174 rule_wrapper.o_relative_pu_confidence_pca_object_to_subject = None175 rule_wrapper.o_c_weighted_std_conf = None176 available_label_frequency_scar_propensity_score_controller = SCARPropensityScoreController(177 constant_label_frequency=noisy_label_frequency178 )179 calculate_rule_ipw_and_ipw_pca_confidences_from_df_cached_predictions(180 rule_wrapper=rule_wrapper,181 df_cached_predictions=o_df_cached_predictions,182 pylo_context=pylo_global_context,183 propensity_score_controller=available_label_frequency_scar_propensity_score_controller,184 verbose=verbose185 )186 rule_wrapper.set_inverse_c_weighted_std_confidence(187 label_frequency=noisy_label_frequency188 )189 noisy_prop_score_tuple_to_pu_metrics_map_controller.add_pu_metrics_for_available_prop_scores(190 noisy_prop_scores=PropScoresTwoSARGroups(191 in_filter=noisy_label_frequency,192 other=noisy_label_frequency193 ),194 o_relative_pu_confidence_unbiased=rule_wrapper.o_relative_pu_confidence_unbiased,195 o_relative_pu_confidence_pca_subject_to_object=rule_wrapper.o_relative_pu_confidence_pca_subject_to_object,196 o_relative_pu_confidence_pca_object_to_subject=rule_wrapper.o_relative_pu_confidence_pca_object_to_subject,197 o_inverse_c_weighted_std_confidence=rule_wrapper.o_c_weighted_std_conf198 )199 noisy_prop_score_tuple_to_pu_metrics_map_controller.to_tsv(200 filename_noisy_prop_scores_to_pu_metrics_map=filename_pu_metrics_of_rule_wrapper201 )...

Full Screen

Full Screen

rule.py

Source:rule.py Github

copy

Full Screen

1from flask import jsonify, request2from wrappers import rule_wrapper3from validators import rule_validator4from utils.nft_errors import abort, NFTError, NFTValidationError5def rules():6 '''7 GET:8 List all rules in the system9 POST:10 Create a new rule in the system11 '''12 if request.method == 'POST':13 try:14 rule_json = rule_validator.validate_new_rule(request.get_json())15 rule = rule_wrapper.create_rule(rule_json)16 response = jsonify(rule=rule)17 response.status_code = 20118 return response19 except NFTValidationError as e:20 return abort(400, e)21 except NFTError as e:22 return abort(500, e)23 else:24 return jsonify(rules=rule_wrapper.list_all_rules())25def rule(rule_id):26 '''27 GET:28 Get a rule by it's id29 DELETE:30 Delete the rule with the specified id31 '''32 if request.method == 'DELETE':33 try:34 rule = rule_validator.validate_rule_delete(rule_id)35 rule = rule_wrapper.delete_rule(rule_id)36 response = jsonify({})37 response.status_code = 20438 return response39 except NFTValidationError as e:40 return abort(400, e)41 except NFTError as e:42 return abort(500, e)43 else:...

Full Screen

Full Screen

rule_validator.py

Source:rule_validator.py Github

copy

Full Screen

1from utils.nft_errors import NFTValidationError, abort2from wrappers import rule_wrapper3def validate_new_rule(rule_json):4 rule = rule_json.pop('rule', None)5 validation_error = NFTValidationError('rule')6 # JSON errors7 if not rule:8 validation_error.add_error('rule', 'No "rule" field in rule json')9 raise validation_error10 # Return11 if validation_error.has_errors():12 raise validation_error13 else:14 return rule15def validate_rule_delete(rule_id):16 if not rule_id:17 pass18 rule = rule_wrapper.get_rule(rule_id)19 if not rule:20 pass...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run hypothesis automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful