How to use get_labels method in autotest

Best Python code snippet using autotest_python

test_labels.py

Source:test_labels.py Github

copy

Full Screen

...8 sentence[1].add_label("sentiment", "positive")9 sentence[2].add_label("pos", "proper noun")10 sentence[0].add_label("pos", "pronoun")11 # check if there are three POS labels with correct text and values12 labels: List[Label] = sentence.get_labels("pos")13 assert 3 == len(labels)14 assert "I" == labels[0].data_point.text15 assert "pronoun" == labels[0].value16 assert "love" == labels[1].data_point.text17 assert "verb" == labels[1].value18 assert "Berlin" == labels[2].data_point.text19 assert "proper noun" == labels[2].value20 # check if there are is one SENTIMENT label with correct text and values21 labels: List[Label] = sentence.get_labels("sentiment")22 assert 1 == len(labels)23 assert "love" == labels[0].data_point.text24 assert "positive" == labels[0].value25 # check if all tokens are correctly labeled26 assert 3 == len(sentence)27 assert "I" == sentence[0].text28 assert "love" == sentence[1].text29 assert "Berlin" == sentence[2].text30 assert 1 == len(sentence[0].get_labels("pos"))31 assert 1 == len(sentence[1].get_labels("pos"))32 assert 2 == len(sentence[1].labels)33 assert 1 == len(sentence[2].get_labels("pos"))34 assert "verb" == sentence[1].get_label("pos").value35 assert "positive" == sentence[1].get_label("sentiment").value36 # remove the pos label from the last word37 sentence[2].remove_labels("pos")38 # there should be 2 POS labels left39 labels: List[Label] = sentence.get_labels("pos")40 assert 2 == len(labels)41 assert 1 == len(sentence[0].get_labels("pos"))42 assert 1 == len(sentence[1].get_labels("pos"))43 assert 2 == len(sentence[1].labels)44 assert 0 == len(sentence[2].get_labels("pos"))45 # now remove all pos tags46 sentence.remove_labels("pos")47 print(sentence[0].get_labels("pos"))48 assert 0 == len(sentence.get_labels("pos"))49 assert 1 == len(sentence.get_labels("sentiment"))50 assert 1 == len(sentence.labels)51 assert 0 == len(sentence[0].get_labels("pos"))52 assert 0 == len(sentence[1].get_labels("pos"))53 assert 0 == len(sentence[2].get_labels("pos"))54def test_span_tags():55 # set 3 labels for 2 spans (HU is tagged twice)56 sentence = Sentence("Humboldt Universität zu Berlin is located in Berlin .")57 sentence[0:4].add_label("ner", "Organization")58 sentence[0:4].add_label("ner", "University")59 sentence[7:8].add_label("ner", "City")60 # check if there are three labels with correct text and values61 labels: List[Label] = sentence.get_labels("ner")62 assert 3 == len(labels)63 assert "Humboldt Universität zu Berlin" == labels[0].data_point.text64 assert "Organization" == labels[0].value65 assert "Humboldt Universität zu Berlin" == labels[1].data_point.text66 assert "University" == labels[1].value67 assert "Berlin" == labels[2].data_point.text68 assert "City" == labels[2].value69 # check if there are two spans with correct text and values70 spans: List[Span] = sentence.get_spans("ner")71 assert 2 == len(spans)72 assert "Humboldt Universität zu Berlin" == spans[0].text73 assert 2 == len(spans[0].get_labels("ner"))74 assert "Berlin" == spans[1].text75 assert "City" == spans[1].get_label("ner").value76 # now delete the NER tags of "Humboldt-Universität zu Berlin"77 sentence[0:4].remove_labels("ner")78 # should be only one NER label left79 labels: List[Label] = sentence.get_labels("ner")80 assert 1 == len(labels)81 assert "Berlin" == labels[0].data_point.text82 assert "City" == labels[0].value83 # and only one NER span84 spans: List[Span] = sentence.get_spans("ner")85 assert 1 == len(spans)86 assert "Berlin" == spans[0].text87 assert "City" == spans[0].get_label("ner").value88def test_different_span_tags():89 # set 3 labels for 2 spans (HU is tagged twice with different tags)90 sentence = Sentence("Humboldt Universität zu Berlin is located in Berlin .")91 sentence[0:4].add_label("ner", "Organization")92 sentence[0:4].add_label("orgtype", "University")93 sentence[7:8].add_label("ner", "City")94 # check if there are three labels with correct text and values95 labels: List[Label] = sentence.get_labels("ner")96 assert 2 == len(labels)97 assert "Humboldt Universität zu Berlin" == labels[0].data_point.text98 assert "Organization" == labels[0].value99 assert "Berlin" == labels[1].data_point.text100 assert "City" == labels[1].value101 # check if there are two spans with correct text and values102 spans: List[Span] = sentence.get_spans("ner")103 assert 2 == len(spans)104 assert "Humboldt Universität zu Berlin" == spans[0].text105 assert "Organization" == spans[0].get_label("ner").value106 assert "University" == spans[0].get_label("orgtype").value107 assert 1 == len(spans[0].get_labels("ner"))108 assert "Berlin" == spans[1].text109 assert "City" == spans[1].get_label("ner").value110 # now delete the NER tags of "Humboldt-Universität zu Berlin"111 sentence[0:4].remove_labels("ner")112 # should be only one NER label left113 labels: List[Label] = sentence.get_labels("ner")114 assert 1 == len(labels)115 assert "Berlin" == labels[0].data_point.text116 assert "City" == labels[0].value117 # and only one NER span118 spans: List[Span] = sentence.get_spans("ner")119 assert 1 == len(spans)120 assert "Berlin" == spans[0].text121 assert "City" == spans[0].get_label("ner").value122 # but there is also one orgtype span and label123 labels: List[Label] = sentence.get_labels("orgtype")124 assert 1 == len(labels)125 assert "Humboldt Universität zu Berlin" == labels[0].data_point.text126 assert "University" == labels[0].value127 # and only one NER span128 spans: List[Span] = sentence.get_spans("orgtype")129 assert 1 == len(spans)130 assert "Humboldt Universität zu Berlin" == spans[0].text131 assert "University" == spans[0].get_label("orgtype").value132 # let's add the NER tag back133 sentence[0:4].add_label("ner", "Organization")134 # check if there are three labels with correct text and values135 labels: List[Label] = sentence.get_labels("ner")136 print(labels)137 assert 2 == len(labels)138 assert "Humboldt Universität zu Berlin" == labels[0].data_point.text139 assert "Organization" == labels[0].value140 assert "Berlin" == labels[1].data_point.text141 assert "City" == labels[1].value142 # check if there are two spans with correct text and values143 spans: List[Span] = sentence.get_spans("ner")144 assert 2 == len(spans)145 assert "Humboldt Universität zu Berlin" == spans[0].text146 assert "Organization" == spans[0].get_label("ner").value147 assert "University" == spans[0].get_label("orgtype").value148 assert 1 == len(spans[0].get_labels("ner"))149 assert "Berlin" == spans[1].text150 assert "City" == spans[1].get_label("ner").value151 # now remove all NER tags152 sentence.remove_labels("ner")153 assert 0 == len(sentence.get_labels("ner"))154 assert 0 == len(sentence.get_spans("ner"))155 assert 1 == len(sentence.get_spans("orgtype"))156 assert 1 == len(sentence.get_labels("orgtype"))157 assert 1 == len(sentence.labels)158 assert 0 == len(sentence[0:4].get_labels("ner"))159 assert 1 == len(sentence[0:4].get_labels("orgtype"))160def test_relation_tags():161 # set 3 labels for 2 spans (HU is tagged twice with different tags)162 sentence = Sentence("Humboldt Universität zu Berlin is located in Berlin .")163 # create two relation label164 Relation(sentence[0:4], sentence[7:8]).add_label("rel", "located in")165 Relation(sentence[0:2], sentence[3:4]).add_label("rel", "university of")166 Relation(sentence[0:2], sentence[3:4]).add_label("syntactic", "apposition")167 # there should be two relation labels168 labels: List[Label] = sentence.get_labels("rel")169 assert 2 == len(labels)170 assert "located in" == labels[0].value171 assert "university of" == labels[1].value172 # there should be one syntactic labels173 labels: List[Label] = sentence.get_labels("syntactic")174 assert 1 == len(labels)175 # there should be two relations, one with two and one with one label176 relations: List[Relation] = sentence.get_relations("rel")177 assert 2 == len(relations)178 assert 1 == len(relations[0].labels)179 assert 2 == len(relations[1].labels)180def test_sentence_labels():181 # example sentence182 sentence = Sentence("I love Berlin")183 sentence.add_label("sentiment", "positive")184 sentence.add_label("topic", "travelling")185 assert 2 == len(sentence.labels)186 assert 1 == len(sentence.get_labels("sentiment"))187 assert 1 == len(sentence.get_labels("topic"))188 # add another topic label189 sentence.add_label("topic", "travelling")190 assert 3 == len(sentence.labels)191 assert 1 == len(sentence.get_labels("sentiment"))192 assert 2 == len(sentence.get_labels("topic"))193 sentence.remove_labels("topic")194 assert 1 == len(sentence.labels)195 assert 1 == len(sentence.get_labels("sentiment"))196 assert 0 == len(sentence.get_labels("topic"))197def test_mixed_labels():198 # example sentence199 sentence = Sentence("I love New York")200 # has sentiment value201 sentence.add_label("sentiment", "positive")202 # has 4 part of speech tags203 sentence[1].add_label("pos", "verb")204 sentence[2].add_label("pos", "proper noun")205 sentence[3].add_label("pos", "proper noun")206 sentence[0].add_label("pos", "pronoun")207 # has 1 NER tag208 sentence[2:4].add_label("ner", "City")209 # should be in total 6 labels210 assert 6 == len(sentence.labels)211 assert 4 == len(sentence.get_labels("pos"))212 assert 1 == len(sentence.get_labels("sentiment"))213 assert 1 == len(sentence.get_labels("ner"))214def test_data_point_equality():215 # example sentence216 sentence = Sentence("George Washington went to Washington .")217 # add two NER labels218 sentence[0:2].add_label("span_ner", "PER")219 sentence[0:2].add_label("span_other", "Politician")220 sentence[4].add_label("ner", "LOC")221 sentence[4].add_label("other", "Village")222 # get the four labels223 ner_label = sentence.get_label("ner")224 other_label = sentence.get_label("other")225 span_ner_label = sentence.get_label("span_ner")226 span_other_label = sentence.get_label("span_other")227 # check that only two of the respective data points are equal...

Full Screen

Full Screen

pspeech_features.py

Source:pspeech_features.py Github

copy

Full Screen

...41from python_speech_features import ssc42import scipy.io.wavfile as wav43import os 44# get labels for later 45def get_labels(vector, label, label2):46 sample_list=list()47 for i in range(len(vector)):48 sample_list.append(label+str(i+1)+'_'+label2)49 return sample_list50def pspeech_featurize(file):51 # convert if .mp3 to .wav or it will fail 52 convert=False 53 if file[-4:]=='.mp3':54 convert=True 55 os.system('ffmpeg -i %s %s'%(file, file[0:-4]+'.wav'))56 file = file[0:-4] +'.wav'57 (rate,sig) = wav.read(file)58 mfcc_feat = mfcc(sig,rate)59 fbank_feat = logfbank(sig,rate)60 ssc_feat=ssc(sig, rate)61 one_=np.mean(mfcc_feat, axis=0)62 one=get_labels(one_, 'mfcc_', 'means')63 two_=np.std(mfcc_feat, axis=0)64 two=get_labels(one_, 'mfcc_', 'stds')65 three_=np.amax(mfcc_feat, axis=0)66 three=get_labels(one_, 'mfcc_', 'max')67 four_=np.amin(mfcc_feat, axis=0)68 four=get_labels(one_, 'mfcc_', 'min')69 five_=np.median(mfcc_feat, axis=0)70 five=get_labels(one_, 'mfcc_', 'medians')71 six_=np.mean(fbank_feat, axis=0)72 six=get_labels(six_, 'fbank_', 'means')73 seven_=np.mean(fbank_feat, axis=0)74 seven=get_labels(six_, 'fbank_', 'stds')75 eight_=np.mean(fbank_feat, axis=0)76 eight=get_labels(six_, 'fbank_', 'max')77 nine_=np.mean(fbank_feat, axis=0)78 nine=get_labels(six_, 'fbank_', 'min')79 ten_=np.mean(fbank_feat, axis=0)80 ten=get_labels(six_, 'fbank_', 'medians')81 eleven_=np.mean(ssc_feat, axis=0)82 eleven=get_labels(eleven_, 'spectral_centroid_', 'means')83 twelve_=np.mean(ssc_feat, axis=0)84 twelve=get_labels(eleven_, 'spectral_centroid_', 'stds')85 thirteen_=np.mean(ssc_feat, axis=0)86 thirteen=get_labels(eleven_, 'spectral_centroid_', 'max')87 fourteen_=np.mean(ssc_feat, axis=0)88 fourteen=get_labels(eleven_, 'spectral_centroid_', 'min')89 fifteen_=np.mean(ssc_feat, axis=0)90 fifteen=get_labels(eleven_, 'spectral_centroid_', 'medians')91 labels=one+two+three+four+five+six+seven+eight+nine+ten+eleven+twelve+thirteen+fourteen+fifteen92 features=np.append(one_,two_)93 features=np.append(features, three_)94 features=np.append(features, four_)95 features=np.append(features, five_)96 features=np.append(features, six_)97 features=np.append(features, seven_)98 features=np.append(features, eight_)99 features=np.append(features, nine_)100 features=np.append(features, ten_)101 features=np.append(features, eleven_)102 features=np.append(features, twelve_)103 features=np.append(features, thirteen_)104 features=np.append(features, fourteen_)...

Full Screen

Full Screen

parse_predictions.py

Source:parse_predictions.py Github

copy

Full Screen

...16 import run_regression17 predicted_labels = []18 if task == "MRPC":19 #ids = MrpcProcessor().get_test_examples(os.environ['GLUE_DIR'] + "/MRPC")20 labels = run_classifier.MrpcProcessor().get_labels()21 if task == "RTE":22 labels = run_classifier.RTEProcessor().get_labels()23 if task == "QNLI":24 labels = run_classifier.QNLIProcessor().get_labels()25 if task == "QNLIV2":26 labels = run_classifier.QNLIProcessor().get_labels()27 if task == "MNLI":28 labels = run_classifier.MnliProcessor().get_labels()29 if task == "SST2":30 labels = run_classifier.SST2Processor().get_labels()31 if task == "CoLA":32 labels = run_classifier.ColaProcessor().get_labels()33 if task == "QQP":34 labels = run_classifier.QQPProcessor().get_labels()35 if task == "diagnostic":36 labels = run_classifier.DiagnosticProcessor().get_labels()37 with codecs.open(input_path, "r", "utf8") as f_in:38 for line in f_in.readlines():39 predictions = np.array(line.split("\t"), dtype=np.float32)40 if task != "STSB":41 predicted_index = np.argmax(predictions)42 predicted_labels.append(labels[predicted_index])43 else:44 predicted_labels.append(predictions[0])45 f_in.close()46 with codecs.open(output_path, "w", "utf8") as f_out:47 f_out.write("index\tprediction\n")48 for i, prediction in enumerate(predicted_labels):49 f_out.write(str(i) + "\t" + str(prediction) + "\n")50 f_out.close()51def write_fake_predictions(output_path, task="MRPC"):52 """53 :param input_path:54 :param output_path:55 :param task:56 :return:57 >>> write_fake_predictions("/work/anlausch/replant/bert/predictions/base_32_5e-05_3.0/copy_for_submission/fakes/STS-B.tsv", task="STSB")58 """59 if task != "STSB":60 import run_classifier61 else:62 import run_regression63 if task == "MNLI":64 test_examples = run_classifier.MnliProcessor().get_test_examples(os.environ['GLUE_DIR'] + "/" + task, False)65 labels = run_classifier.MnliProcessor().get_labels()66 elif task == "QQP":67 test_examples = run_classifier.QQPProcessor().get_test_examples(os.environ['GLUE_DIR'] + "/" + task)68 labels = run_classifier.QQPProcessor().get_labels()69 elif task == "WNLI":70 test_examples = run_classifier.WNLIProcessor().get_test_examples(os.environ['GLUE_DIR'] + "/" + task)71 labels = run_classifier.WNLIProcessor().get_labels()72 elif task == "CoLA":73 test_examples = run_classifier.ColaProcessor().get_test_examples(os.environ['GLUE_DIR'] + "/" + task)74 labels = run_classifier.ColaProcessor().get_labels()75 elif task == "STSB":76 test_examples = run_regression.STSBProcessor().get_test_examples(os.environ['GLUE_DIR'] + "/" + task)77 elif task == "diagnostic":78 test_examples = run_classifier.DiagnosticProcessor().get_test_examples(os.environ['GLUE_DIR'] + "/" + task)79 labels = run_classifier.DiagnosticProcessor().get_labels()80 with codecs.open(output_path, "w", "utf8") as f_out:81 f_out.write("index\tprediction\n")82 if task != "STSB":83 for i, data in enumerate(test_examples):84 f_out.write(str(i) + "\t" + str(labels[0]) + "\n")85 else:86 for i, data in enumerate(test_examples):87 f_out.write(str(i) + "\t" + str(2.5) + "\n")88 f_out.close()89def main():90 parser = argparse.ArgumentParser(description="Running prediction parser")91 parser.add_argument("--task", type=str, default=None,92 help="Input path in case train and dev are in a single file", required=True)93 parser.add_argument("--input_path", type=str, default="/work/anlausch/replant/bert/predictions/wn_binary_32_5e-05_3.0/test_results.tsv",...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run autotest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful