How to use test_keywords method in pyshould

Best Python code snippet using pyshould_python

real_fake.py

Source:real_fake.py Github

copy

Full Screen

1import numpy as np 2import pandas as pd3# ====================================TRAINING========================================4# Importing training dataset5train = pd.read_csv('data/train.csv')6# Creating train_keywords, train_text, and train_targets arrays from training dataset7train_keywords = train.iloc[:, 1].values8train_text = train.iloc[:, 3].values9train_targets = train.iloc[:, -1].values10# Reshaping train_keywords to prepare for imputing11train_keywords = train_keywords.reshape(len(train_keywords), 1)12# Imputing train_keywords to get rid of NaN values13from sklearn.impute import SimpleImputer14imputer = SimpleImputer(missing_values=np.nan, strategy='constant', fill_value='no_keyword')15train_keywords = imputer.fit_transform(train_keywords)16# One Hot Encoding the train_keywords column17from sklearn.preprocessing import OneHotEncoder18from sklearn.compose import ColumnTransformer19encoder = OneHotEncoder(dtype=int, drop='first')20ct = ColumnTransformer([('encoding', encoder, [0])], remainder='passthrough')21train_keywords = ct.fit_transform(train_keywords)22# Cleaning the train_text array and creating a corpus23from nltk.corpus import stopwords24from nltk.stem.porter import PorterStemmer25import re26train_corpus = []27all_stopwords = stopwords.words('english')28all_stopwords.remove('not')29ps = PorterStemmer()30for i in range(0, len(train_text)):31 tweet = re.sub('[^a-zA-Z]', ' ', train_text[i])32 tweet = tweet.lower()33 tweet = tweet.split()34 tweet = [ps.stem(word) for word in tweet if word not in set(all_stopwords)]35 tweet = ' '.join(tweet)36 train_corpus.append(tweet)37# Count Vectorizing train_corpus to create the bag of words model38from sklearn.feature_extraction.text import CountVectorizer39cv = CountVectorizer()40train_corpus = cv.fit_transform(train_corpus)41# Transforming train_corpus into an array for concatenation42train_corpus = train_corpus.toarray()43# Reshaping train_keywords into an array for concatenation44train_keywords = train_keywords.toarray()45# Concatenating train_keywords to train_corpus to create x_train array46x_train = np.concatenate((train_keywords, train_corpus), axis=1)47# Creating and training the classifier48from sklearn.svm import SVC49classifier = SVC(kernel='rbf', random_state=0)50classifier.fit(x_train, train_targets)51# ====================================TESTING========================================52# Importing the testing dataset53test = pd.read_csv('data/test.csv')54# Creating ids, test_keywords, and test_text arrays from testing datsaet55test_keywords = test.iloc[:, 1].values56test_text = test.iloc[:, -1].values57ids = test.iloc[:, 0].values58# Reshaping test_keywords to prepare for imputing59test_keywords = test_keywords.reshape(len(test_keywords), 1)60# Imputing train_keywords to get rid of NaN values61test_keywords = imputer.transform(test_keywords)62# One Hot Encoding test_keywords63test_keywords = ct.transform(test_keywords)64# Cleaning the test_text array and creating a corpus65test_corpus = []66for i in range(0, len(test_text)):67 tweet = re.sub('[^a-zA-Z]', ' ', test_text[i])68 tweet = tweet.lower()69 tweet = tweet.split()70 tweet = [ps.stem(word) for word in tweet if word not in set(all_stopwords)]71 tweet = ' '.join(tweet)72 test_corpus.append(tweet)73# Count Vectorizing test_corpus to create the bag of words model74test_corpus = cv.transform(test_corpus)75# Transforming test_corpus into an array for concatenation76test_corpus = test_corpus.toarray()77# Transforming test_keywords into an array for concatenation78test_keywords = test_keywords.toarray()79# Concatenating test_corpus to test_corpus to create x_test array80x_test = np.concatenate((test_keywords, test_corpus), axis=1)81# Predicting with the classifier82predictions = classifier.predict(x_test)83# Reshaping ids for concatenation84ids = ids.reshape(len(ids), 1)85# Reshaping predictions for concatenation86predictions = predictions.reshape(len(predictions), 1)87# Concatenating ids and predictions88submission = np.concatenate((ids, predictions), axis=1)89# Saving submission to a .csv file...

Full Screen

Full Screen

test_gen.py

Source:test_gen.py Github

copy

Full Screen

1import os2import sys3import re4import ast 5from os import listdir6from os.path import isfile, join7def check_keywords(word, keyword_list): 8 for keyword in keyword_list: 9 if re.search(keyword, word):10 #print("\t" + word + " matched with \t" + keyword)11 return True12 return False13def read_dictionary(data): 14 # reconstructing the data as a dictionary 15 d = ast.literal_eval(data) 16 17 print("attributes dictionary : ", type(d)) 18 print(d) 19 return d20def generate_test(content, keyword_list, test_model, test_keywords, classToBeTested, output_package, attributes_dictionary):21 words = content.split()22 attributes = list()23 print("file content: \n")24 print(words) 25 print("attributes found: \n")26 for word in words:27 #check_keywords(word, keyword_list) 28 if not check_keywords(word, keyword_list):29 print(word)30 attributes.append(word)31 32 setters = ""33 assertions = ""34 #builds setters and assertions35 for attribute in attributes: 36 if(attribute != classToBeTested):37 value_to_set = None38 if attribute[:-1] in attributes_dictionary:39 value_to_set = attributes_dictionary[attribute[:-1]]40 attrList = list(attribute)41 attrList[0] = attrList[0].upper()42 attribute = ''.join(attrList)43 attribute = attribute[:-1]44 if value_to_set == None:45 setters = setters + "out.set" + attribute + "();\n"46 else:47 setters = setters + "out.set" + attribute + "("+value_to_set+");\n"48 assertions = assertions + "assertNotNull("+"out.get"+attribute+"());\n"49 test_model = test_model.replace("[classToBeTested]",classToBeTested)50 test_model = test_model.replace("[setters]", setters)51 test_model = test_model.replace("[assertions]", assertions)52 test_model = test_model.replace("[outputPackage]", output_package)53 54 print("generated test:\n")55 print(test_model)56 return test_model57if len(sys.argv) > 4:58 print('You have specified too many arguments')59 sys.exit()60if len(sys.argv) < 4:61 print('You need to specify the path to be listed and the output package')62 sys.exit()63input_path = sys.argv[1]64output_path = sys.argv[2]65output_package = sys.argv[3]66keys_file = open(".\special_keywords.txt", "r")67special_keywords = keys_file.read().split()68keys_file.close()69test_model_file = open(".\\test_model.txt", "r")70test_model = test_model_file.read()71test_model_file.close()72test_keywords_file = open(".\\test_keywords.txt", "r")73test_keywords = test_keywords_file.read()74test_keywords_file.close()75field_values_file = open(".\\field_values.json", "r")76field_values = field_values_file.read()77field_values_file.close()78attributes_dictionary = read_dictionary(field_values)79print("skipping these patterns: \n")80print(special_keywords)81if not os.path.isdir(input_path):82 print('The path specified does not exist')83 sys.exit()84print('reading data from: \t' + input_path + '\n')85print('writing data into: \t' + output_path + '\n')86onlyfiles = [f for f in listdir(input_path) if isfile(join(input_path, f))]87print("files to be processed: ")88print(onlyfiles)89print("\n")90for element in onlyfiles:91 f = open(input_path+"\\"+element, "r")92 print(".... generating test for class: \t" + element + "....")93 splittedFileName = element.split(".")94 classToBeTested = splittedFileName[0]95 fwr = open(output_path+"\\"+ classToBeTested + "Test.java", "w")96 fwr.write(generate_test(f.read(), special_keywords, test_model, test_keywords, classToBeTested, output_package, attributes_dictionary))97 fwr.close()...

Full Screen

Full Screen

test_data_utils.py

Source:test_data_utils.py Github

copy

Full Screen

1"""2Suite of tests for library 'data' utilities submodule.3"""4# Package imports5from aylluiot.utils.data_utils import parse_inputs6def test_parse_inputs() -> None:7 """8 Suite of tests for parsing_inputs.9 """10 test_keywords = ['var_1', 'var_2']11 test_kwargs = {'var_1': 'abc', 'var_2': 123}12 test_args_one = ('abc', 123)13 test_args_two = (test_kwargs, )14 output_1_a, output_1_b = parse_inputs(keywords=test_keywords,15 _args=test_args_one)16 output_2_a, output_2_b = parse_inputs(keywords=test_keywords,17 _args=test_args_two)18 output_1_c, output_1_d = parse_inputs(keywords=test_keywords,19 _kwargs=test_kwargs)20 assert output_1_a == output_2_a21 assert output_1_a == output_1_c22 assert output_1_b == output_2_b...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run pyshould automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful