How to use addCap method in fMBT

Best Python code snippet using fMBT_python

json_pandas.py

Source:json_pandas.py Github

copy

Full Screen

1"""2json 불러와서 캡션 붙이는 것3"""4import json5import pandas as pd6path = './datasets/vqa/v2_OpenEnded_mscoco_train2014_questions.json'7with open(path) as question:8 question = json.load(question)9# question['questions'][0]10# question['questions'][1]11# question['questions'][2]12df = pd.DataFrame(question['questions'])13df14caption_path = './datasets/caption/vis_st_trainval.json'15with open(caption_path) as cap:16 cap = json.load(cap)17df_cap = pd.DataFrame(cap)18df_cap19df_addcap = pd.merge(df, df_cap, how='left', on='image_id')20del df_addcap['file_path']21########################################################################################################################22"""23pandas to json24"""25df_addcap.to_json('./datasets/caption/train_cap2.json', orient='table')26with open('./datasets/caption/train_cap2.json') as train_cap:27 train_cap = json.load(train_cap)28########################################################################################################################29########################################################################################################################30"""31answer + cap32"""33path = '/home/nextgen/Desktop/mcan-vqa/datasets/vqa/v2_mscoco_train2014_annotations.json'34path = './datasets/vqa/v2_mscoco_val2014_annotations.json'35with open(path) as answer:36 answer = json.load(answer)37answer['annotations'][0]38df_ans = pd.DataFrame(answer['annotations'])39df_ans[:0]40del df_ans['question_type']41del df_ans['answers']42del df_ans['answer_type']43del df_ans['image_id']44df_ans[df_ans['question_id']==458752000]45df_addcap2 = pd.merge(df_addcap, df_ans, how='left', on='question_id')46df_addcap2[:0]47df_addcap2['multiple_choice_answer']48# del df_addcap['file_path']49df_addcap2.to_json('./datasets/caption/val_qacap.json', orient='table')50with open('./datasets/caption/train_qacap.json') as train_qacap:51 train_qacap = json.load(train_qacap)52########################################################################################################################53"""val test도 마찬가지"""54path = './datasets/vqa/v2_OpenEnded_mscoco_val2014_questions.json'55with open(path) as question:56 question = json.load(question)57df = pd.DataFrame(question['questions'])58df59caption_path = './datasets/caption/vis_st_trainval.json'60with open(caption_path) as cap:61 cap = json.load(cap)62df_cap = pd.DataFrame(cap)63df_cap64df_addcap = pd.merge(df, df_cap, how='left', on='image_id')65df_addcap[:0]66del df_addcap['file_path']67df_addcap.to_json('./datasets/caption/val_cap.json', orient='table')68#test69path = './datasets/vqa/v2_OpenEnded_mscoco_test-dev2015_questions.json'70with open(path) as question:71 question = json.load(question)72df = pd.DataFrame(question['questions'])73df74df['image_id'] = df.image_id.astype(int)75caption_path = './datasets/caption/vis_st_test.json'76with open(caption_path) as cap:77 cap = json.load(cap)78df_cap = pd.DataFrame(cap)79df_cap80df_cap['image_id'] = df_cap.image_id.astype(int)81df_addcap = pd.merge(df, df_cap, how='left', on='image_id')82df_addcap[:0]83del df_addcap['file_path']84df_addcap.to_json('./datasets/caption/test_cap.json', orient='table')85########################################################################################################################86from core.data.ans_punct import prep_ans87import numpy as np88import en_vectors_web_lg, random, re, json89import json90from core.data.data_utils import ques_load91stat_ques_list = \92 json.load(open('./datasets/caption/train_cap.json', 'r'))['data'] + \93 json.load(open('./datasets/caption/val_cap.json', 'r'))['data'] + \94 json.load(open('./datasets/caption/test_cap.json', 'r'))['data']95def tokenize(stat_ques_list, use_glove):96 token_to_ix = {97 'PAD': 0,98 'UNK': 1,99 }100 spacy_tool = None101 pretrained_emb = []102 if use_glove:103 spacy_tool = en_vectors_web_lg.load()104 pretrained_emb.append(spacy_tool('PAD').vector)105 pretrained_emb.append(spacy_tool('UNK').vector)106 for ques in stat_ques_list:107 words = re.sub(108 r"([.,'!?\"()*#:;])",109 '',110 ques['question'].lower()111 ).replace('-', ' ').replace('/', ' ').split()112 for word in words:113 if word not in token_to_ix:114 token_to_ix[word] = len(token_to_ix)115 if use_glove:116 pretrained_emb.append(spacy_tool(word).vector)117 for ques in stat_ques_list:118 words = re.sub(119 r"([.,'!?\"()*#:;])",120 '',121 ques['caption'].lower()122 ).replace('-', ' ').replace('/', ' ').split()123 for word in words:124 if word not in token_to_ix:125 token_to_ix[word] = len(token_to_ix)126 if use_glove:127 pretrained_emb.append(spacy_tool(word).vector)128 pretrained_emb = np.array(pretrained_emb)129 return token_to_ix, pretrained_emb130token_to_ix, pretrained_emb = tokenize(stat_ques_list, True)131#######################################################################################################################132# with open('./datasets/vqa/v2_mscoco_train2014_annotations.json') as answer:133# answer = json.load(answer)134#135# answer['annotations'][2]136"""137답을 이용하는거로 하면 train val 비교로해야 함138test셋은 답을 제공하지 않아서 test할 때 답을 이용하는 모델을 사용할 수 없음139"""140####141import cal_sim142import pandas as pd143with open('datasets/caption/train_cap.json') as train_cap:144 train_cap = json.load(train_cap)145with open('datasets/caption/val_cap.json') as val_cap:146 val_cap = json.load(val_cap)147with open('datasets/caption/test_cap.json') as test_cap:148 test_cap = json.load(test_cap)149df_train = pd.DataFrame(train_cap['data'])150df_val = pd.DataFrame(val_cap['data'])151df_test = pd.DataFrame(test_cap['data'])152df_train[:0]153# df_train['similarity'] = cal_sim.sent_sim((df_train['question'], dtype=int32), (df_train['caption'], dtype=int32))154df_train.iloc[0]['question']155def txt2vec(sentence):156 # s = sentence.split()157 tt = []158 new_i = re.sub(159 r"([.,'!?\"()*#:;])",160 '',161 sentence.lower()162 ).replace('-', ' ').replace('/', ' ').split()163 for i in new_i:164 num = token_to_ix[i]165 tt.append(pretrained_emb[num])166 return tt167stat_ques_list[0]168token_to_ix['what']169len(txt2vec(df_train.iloc[0]['question']))170df_train.iloc[0]['question']171df_train.iloc[0]['caption']172len(txt2vec(df_train.iloc[0]['caption']))173from numpy import dot174from numpy.linalg import norm175import numpy as np176def cos_sim(A, B):177 return dot(A, np.transpose(B)) / (norm(A) * norm(B))178def word_sim(w1,w2): #word simiarity179 s = 0.5 * (1+ cos_sim(w1,w2))180 return s181def sent_sim(ss1, ss2): #sentence simiarity182 s1 = txt2vec(ss1)183 s2 = txt2vec(ss2)184 t = []185 for i in s1[2:]: #question 0,1 are PAD, UNK186 tmp = []187 for j in s2[2:]: #caption188 tmp_sim = word_sim(i,j)189 tmp.append(tmp_sim)190 t.append(max(tmp))191 sentence_sim = sum(t) / len(s1[2:])192 return sentence_sim193t = sent_sim('yes', 'hello')194tmp = sent_sim(df_train.iloc[105]['question'], df_train.iloc[103]['caption'])195t1 = sent_sim('Is there a travel guide on the table?', 'A place of cake and coffee are on an outdoor table')196t2 = sent_sim('yes', 'A place of cake and coffee are on an outdoor table')197t3 = sent_sim('no', 'no')198df_train.iloc[105]['question'] #유사도 좀 이상한 듯 너무 높게 나오는 것 같은느낌199df_train.iloc[103]['caption']200cos_sim(txt2vec('e'), txt2vec('z'))201new_i = re.sub(202 r"([.,'!?\"()*#:;])",203 '',204 df_train.iloc[102]['question'].lower()205 ).replace('-', ' ').replace('/', ' ').split()...

Full Screen

Full Screen

questiongen.py

Source:questiongen.py Github

copy

Full Screen

1from random import randint2from math import ceil,floor3from question_list import addition, subtraction, multiplication, division, mixed4class QuestionGenerator():5 def __init__(self, itemType, itemUnit, itemName, itemPluralName, itemCost, numberOfGuests, level, question_num):6 self.itemType = itemType7 self.itemName = itemName8 self.itemPlurName = itemPluralName9 self.itemUnit = int(itemUnit)10 self.itemCost = int(itemCost)11 self.guestsNum = int(numberOfGuests)12 self.level = int(level)13 self.question_num = int(question_num)14 if (self.level <= 3):15 self.multcap = 516 self.addcap = 2517 elif (self.level <= 6):18 self.multcap = 719 self.addcap = 5020 else:21 self.multcap = 922 self.addcap = 5023 self.divcap = self.multcap**224 self.subcap = self.addcap*225 def generate(self):26 add_q = addition (self.itemCost, self.itemUnit, self.itemPlurName, self.addcap, self.itemName)27 sub_q = subtraction (self.itemCost, self.itemUnit, self.itemPlurName, self.subcap, self.itemName)28 mult_q = multiplication (self.itemCost, self.itemUnit, self.itemPlurName, self.multcap, self.itemName)29 div_q = division (self.itemCost, self.itemUnit, self.itemPlurName, self.multcap, self.itemName, self.guestsNum)30 mix_q = mixed (self.itemCost, self.itemUnit, self.itemPlurName, self.multcap, self.itemName, self.guestsNum)31 if(self.question_num == 1):32 return addition(self.itemCost, self.itemUnit, self.itemPlurName, self.addcap, self.itemName)33 elif(self.question_num == 2):34 return subtraction(self.itemCost, self.itemUnit, self.itemPlurName, self.subcap, self.itemName)35 elif(self.question_num == 3):36 return multiplication(self.itemCost, self.itemUnit, self.itemPlurName, self.multcap, self.itemName)37 elif(self.question_num == 4):38 return division(self.itemCost, self.itemUnit, self.itemPlurName, self.multcap, self.itemName, self.guestsNum)39 elif(self.question_num == 5):...

Full Screen

Full Screen

addcap.py

Source:addcap.py Github

copy

Full Screen

1#!/usr/bin/python2import sys,os3usage="""4Add Cap Using tLEaP.5Firstly addh6Then add cap NME and ACE to the C- and N- terminal of each peptide7Usage $0 in.pdb out.pdb8"""9if len(sys.argv)<3:10 print usage11 sys.exit()12infile,outfile=sys.argv[1:3]13leap1in='''14source leaprc.ff14SB15rec = loadpdb %s16savepdb rec addcap_tmp.pdb17quit18'''19ofp=open("addcap_tleap1.in","w")20ofp.write(leap1in%infile)21ofp.close()22os.system("tleap -f addcap_tleap1.in")23firstresi=True24lastter=True25cachedlines=""26oldindex=027ofp=open("addcap_tmp2.pdb","w")28for line in open("addcap_tmp.pdb","r"):29 if len(line)>30:index=int(line[22:26])30 if index!=oldindex:31 firstresi=lastter32 oldindex=index33 if firstresi:34 if " H1 " not in line and " H2 " not in line and " H3 " not in line :35 cachedlines=cachedlines+line36 if not firstresi:37 ofp.write(cachedlines)38 cachedlines=""39 if "OXT" in line:40 ofp.write("%s N NME%s"%(line[:12],line[20:]))41 elif "H3 " in line:42 ofp.write("%s C ACE%s"%(line[:12],line[20:]))43 elif not firstresi:44 ofp.write(line)45 else:46 pass47 lastter=("TER" in line)48ofp.close()49leap2in='''50source leaprc.ff14SB51rec = loadpdb addcap_tmp2.pdb52savepdb rec %s53quit54'''55ofp=open("addcap_leap2.in","w")56ofp.write(leap2in%outfile)57ofp.close()...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run fMBT automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful