How to use set_only method in autotest

Best Python code snippet using autotest_python

views.py

Source:views.py Github

copy

Full Screen

1import re2import ast3import requests4import json5from .utils import ExtractAndRecommend, GetRawResult, GetDetailResult6from rest_framework import viewsets7from rest_framework.decorators import api_view8from rest_framework.response import Response9from django.shortcuts import get_object_or_40410from django.forms.models import model_to_dict11from .models import Message, Uploadcorpus, Extractor, Recommend, Simplesearch, Detailsearch, Temp, Folder, Collection, Repository, Corpus, Filerepo, Pending, Project, Projectinfo, Personal, MessageSerializer, UploadcorpusSerializer, ExtractorSerializer, RecommendSerializer, SimplesearchSerializer, DetailsearchSerializer, TempSerializer, FolderSerializer, CollectionSerializer, RepositorySerializer, CorpusSerializer, FilerepoSerializer, PendingSerializer, ProjectSerializer, ProjectinfoSerializer, PersonalSerializer12@api_view(('POST', 'GET',))13def scrapy(request):14 if request.method == 'POST':15 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))16 raw_dict_key = list(raw_dict.keys())[0]17 scrapy_dict = ast.literal_eval(raw_dict_key)18 print(scrapy_dict)19 extractors = scrapy_dict['extractors']20 recommends = scrapy_dict['recommends']21 keywords = []22 from backdoor.main import cnki_main, cqvip_main, wf_main23 data = Projectinfo.objects.last()24 source = model_to_dict(data)['source'].split(',')25 project = model_to_dict(data)['project']26 extract = model_to_dict(data)['extract'].split(',')27 recommend = model_to_dict(data)['recommend'].split(',')28 keywords.extend(extract)29 keywords.extend(recommend)30 for k in keywords:31 if k == '':32 keywords.remove(k)33 print(keywords)34 if not Pending.objects.filter(project=project):35 pend = Pending(project=project, extract=extractors, recommend=recommends)36 pend.save()37 else:38 print('待办项目已存在!')39 # ****** 自此开始, 根据source字段启动相应爬虫 ****** #40 if len(source) == 1:41 if source[0] == '知网':42 for word in keywords:43 cnki_main(word)44 elif source[0] == '维普':45 for word in keywords:46 cqvip_main(word)47 else:48 for word in keywords:49 wf_main(word)50 elif len(source) == 2:51 if re.search('知网,维普', model_to_dict(data)['source']):52 for word in keywords:53 cnki_main(word)54 cqvip_main(word)55 elif re.search('维普,万方', model_to_dict(data)['source']):56 for word in keywords:57 cqvip_main(word)58 wf_main(word)59 else:60 for word in keywords:61 cnki_main(word)62 wf_main(word)63 else:64 for word in keywords:65 cnki_main(word)66 cqvip_main(word)67 wf_main(word)68 # ****** 截此为止, 数据已插入ES中 ****** #69 # ****** 自动发送邮件 ****** #70 from backdoor.gmail import SendMail71 SendMail().automatic_send_email(project, keywords)72 return Response('待办项目已完成!')73 if request.method == 'GET':74 return Response('No method!')75@api_view(('GET',))76def extract(request):77 latest = Message.objects.last()78 raw_dict = model_to_dict(latest)79 print(raw_dict)80 serializer_context = {81 'request': request,82 }83 base_router = 'http://127.0.0.1:8000/api/extract/'84 extractors = []85 extractors_group = []86 db = 'extractor'87 raw_temps = Temp.objects.filter(record_db=db)88 print(raw_temps)89 if raw_temps:90 temps = []91 for raw_temp in raw_temps: # 最新的extractor id92 raw_temp_dict = model_to_dict(raw_temp)93 temps.append(raw_temp_dict)94 temp_dict = temps[-1]95 print(temp_dict)96 pre_id = temp_dict['record_id']97 post_id = raw_dict['id'] # 更新id98 temp = Temp(record_id=post_id, record_db=db)99 temp.save()100 updates = Message.objects.filter(id__gt=pre_id)101 print(updates)102 for update in updates:103 update_dict = model_to_dict(update)104 ex = ExtractAndRecommend()105 extr_kws = ex.extract_kws(update_dict)106 for kws in extr_kws:107 extr = Extractor(originkws=kws)108 extr.save()109 # retrieve kws110 data = Extractor.objects.filter(originkws=kws)111 raw_d_dict = []112 for d in data:113 d_dict = model_to_dict(d)114 raw_d_dict.append(d_dict)115 set_only = []116 set_only.append(raw_d_dict[0])117 # drop reqeated118 for item in raw_d_dict:119 k = 0120 for iitem in set_only:121 if item['originkws'] != iitem['originkws']:122 k += 1123 else:124 break125 if k == len(set_only):126 set_only.append(item)127 for only in set_only:128 pkid = only['id']129 extractor = ExtractorSerializer(data=only, context=serializer_context)130 if extractor.is_valid():131 ordered_li = extractor.validated_data132 ordered_li['pk'] = pkid133 ordered_li['url'] = base_router + str(pkid) + '/'134 ordered_li = dict(ordered_li)135 extractors.append(ordered_li)136 if extractors is not None:137 extractors.sort(key=lambda x: (x['pk']), reverse=False)138 extractors_group.extend(extractors)139 extractors.clear()140 else:141 print('The extractors are empty!')142 try:143 if extractors_group is not None:144 print(extractors_group)145 return Response(extractors_group)146 except Exception as e:147 return Response('None extracted objects!')148 else:149 pre_id = raw_dict['id']150 print(pre_id)151 temp = Temp(record_id=pre_id, record_db=db)152 temp.save()153 updates = Message.objects.all()154 print(updates)155 for update in updates:156 update_dict = model_to_dict(update)157 ex = ExtractAndRecommend()158 extr_kws = ex.extract_kws(update_dict)159 for kws in extr_kws:160 extr = Extractor(originkws=kws)161 extr.save()162 # retrieve kws163 data = Extractor.objects.filter(originkws=kws)164 raw_d_dict = []165 for d in data:166 d_dict = model_to_dict(d)167 raw_d_dict.append(d_dict)168 set_only = []169 set_only.append(raw_d_dict[0])170 # drop reqeated171 for item in raw_d_dict:172 k = 0173 for iitem in set_only:174 if item['originkws'] != iitem['originkws']:175 k += 1176 else:177 break178 if k == len(set_only):179 set_only.append(item)180 for only in set_only:181 pkid = only['id']182 extractor = ExtractorSerializer(data=only, context=serializer_context)183 if extractor.is_valid():184 ordered_li = extractor.validated_data185 ordered_li['pk'] = pkid186 ordered_li['url'] = base_router + str(pkid) + '/'187 ordered_li = dict(ordered_li)188 extractors.append(ordered_li)189 if extractors is not None:190 extractors.sort(key=lambda x: (x['pk']), reverse=False)191 extractors_group.extend(extractors)192 extractors.clear()193 else:194 print('The extractors are empty!')195 try:196 if extractors_group is not None:197 print(extractors_group)198 return Response(extractors_group)199 except Exception as e:200 return Response('None extracted objects!')201@api_view(('GET',))202def recommend(request):203 latest = Message.objects.last()204 raw_dict = model_to_dict(latest)205 serializer_context = {206 'request': request,207 }208 base_router = 'http://127.0.0.1:8000/api/recommend/'209 recommends = []210 recommends_group = []211 db = 'recommend'212 raw_temps = Temp.objects.filter(record_db=db)213 if raw_temps:214 temps = []215 for raw_temp in raw_temps:216 raw_temp_dict = model_to_dict(raw_temp)217 temps.append(raw_temp_dict)218 temp_dict = temps[-1]219 pre_id = temp_dict['record_id']220 post_id = raw_dict['id']221 temp = Temp(record_id=post_id, record_db=db)222 temp.save()223 updates = Message.objects.filter(id__gt=pre_id)224 print(updates)225 for update in updates:226 update_dict = model_to_dict(update)227 ex = ExtractAndRecommend()228 recom_kws = ex.recommend_kws(update_dict)229 try:230 if recom_kws:231 for rkws in recom_kws:232 recom = Recommend(recommendkws=rkws)233 recom.save()234 # retrieve kws235 data = Recommend.objects.filter(recommendkws=rkws)236 raw_d_dict = []237 for d in data:238 d_dict = model_to_dict(d)239 raw_d_dict.append(d_dict)240 set_only = []241 set_only.append(raw_d_dict[0])242 # drop reqeated243 for item in raw_d_dict:244 k = 0245 for iitem in set_only:246 if item['recommendkws'] != iitem['recommendkws']:247 k += 1248 else:249 break250 if k == len(set_only):251 set_only.append(item)252 for only in set_only:253 pkid = only['id']254 recommend = RecommendSerializer(data=only, context=serializer_context)255 if recommend.is_valid():256 ordered_li = recommend.validated_data257 ordered_li['pk'] = pkid258 ordered_li['url'] = base_router + str(pkid) + '/'259 ordered_li = dict(ordered_li)260 recommends.append(ordered_li)261 if recommends:262 recommends.sort(key=lambda x: (x['pk']), reverse=False)263 recommends_group.extend(recommends)264 recommends.clear()265 else:266 print('The recommends are empty!')267 except Exception as e:268 return Response(['暂无推荐'])269 try:270 return Response(recommends_group)271 except Exception as e:272 return Response(['暂无推荐'])273 else:274 pre_id = raw_dict['id']275 print(pre_id)276 temp = Temp(record_id=pre_id, record_db=db)277 temp.save()278 updates = Message.objects.all()279 print(updates)280 for update in updates:281 update_dict = model_to_dict(update)282 ex = ExtractAndRecommend()283 recom_kws = ex.recommend_kws(update_dict)284 try:285 if recom_kws:286 for rkws in recom_kws:287 recom = Recommend(recommendkws=rkws)288 recom.save()289 # retrieve kws290 data = Recommend.objects.filter(recommendkws=rkws)291 raw_d_dict = []292 for d in data:293 d_dict = model_to_dict(d)294 raw_d_dict.append(d_dict)295 set_only = []296 set_only.append(raw_d_dict[0])297 # drop reqeated298 for item in raw_d_dict:299 k = 0300 for iitem in set_only:301 if item['recommendkws'] != iitem['recommendkws']:302 k += 1303 else:304 break305 if k == len(set_only):306 set_only.append(item)307 for only in set_only:308 pkid = only['id']309 recommend = RecommendSerializer(data=only, context=serializer_context)310 if recommend.is_valid():311 ordered_li = recommend.validated_data312 ordered_li['pk'] = pkid313 ordered_li['url'] = base_router + str(pkid) + '/'314 ordered_li = dict(ordered_li)315 recommends.append(ordered_li)316 if recommends is not None:317 recommends.sort(key=lambda x: (x['pk']), reverse=False)318 recommends_group.extend(recommends)319 recommends.clear()320 else:321 print('The recommends are empty!')322 except Exception as e:323 return Response(['暂无推荐'])324 try:325 return Response(recommends_group)326 except Exception as e:327 return Response(['暂无推荐'])328@api_view(('DELETE','GET',))329def deletextract(request):330 if request.method == 'DELETE':331 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))332 raw_dict_key = list(raw_dict.keys())[0]333 extract_dict = ast.literal_eval(raw_dict_key)334 delete_id = extract_dict['delid']335 print(delete_id)336 if not delete_id:337 return Response('failed')338 else:339 print('1')340 get_object_or_404(Extractor, pk=int(delete_id)).delete()341 return Response('success')342 if request.method == 'GET':343 return Response('No method!')344@api_view(('DELETE','GET',))345def deleterecommend(request):346 if request.method == 'DELETE':347 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))348 raw_dict_key = list(raw_dict.keys())[0]349 recommend_dict = ast.literal_eval(raw_dict_key)350 delete_id = recommend_dict['delid']351 print(delete_id)352 if not delete_id:353 return Response('failed')354 else:355 get_object_or_404(Recommend, pk=int(delete_id)).delete()356 return Response('success')357 if request.method == 'GET':358 return Response('No method!')359# 承接前台upload组件action路径360# 不作实际操作361@api_view(('POST', 'GET',))362def mockupload(request):363 if request.method == 'POST':364 raw_dict = dict(zip(request.FILES.keys(), request.FILES.values()))365 corpus_name = raw_dict['file']366 print(corpus_name)367 return Response('Saved')368 if request.method == 'GET':369 return Response('No method!')370# 处理前台解析后的词表数据371@api_view(('POST', 'GET',))372def parseupload(request):373 if request.method == 'POST':374 serializer_context = {375 'request': request,376 }377 base_router = 'http://127.0.0.1:8000/api/extract/'378 # 包含抽取词和推荐词的列表379 total = []380 extractors = []381 extractors_group = []382 recommends = []383 recommends_group = []384 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))385 print(raw_dict)386 raw_dict_key = list(raw_dict.keys())[0]387 upload_dict = ast.literal_eval(raw_dict_key)388 print(upload_dict)389 raw_upload_data = upload_dict['content']390 upload_name = upload_dict['name']391 print(upload_name)392 up = Uploadcorpus(corpus_name=upload_name, corpus_kws=raw_upload_data)393 up.save()394 upload_data = raw_upload_data.split('\n')395 print(upload_data)396 ex = ExtractAndRecommend()397 extract_data = upload_data398 recommend_data = ex.recommend_upload_kws(upload_data)399 # ********************************************************* #400 # 有推荐数据401 if recommend_data:402 # ********************************************************* #403 # 存储清洗抽取词/词表内词汇404 for kws in extract_data:405 extr = Extractor(originkws=kws)406 extr.save()407 # retrieve kws408 data = Extractor.objects.filter(originkws=kws)409 raw_d_dict = []410 for d in data:411 d_dict = model_to_dict(d)412 raw_d_dict.append(d_dict)413 set_only = []414 set_only.append(raw_d_dict[0])415 # drop reqeated416 for item in raw_d_dict:417 k = 0418 for iitem in set_only:419 if item['originkws'] != iitem['originkws']:420 k += 1421 else:422 break423 if k == len(set_only):424 set_only.append(item)425 for only in set_only:426 pkid = only['id']427 extractor = ExtractorSerializer(data=only, context=serializer_context)428 if extractor.is_valid():429 ordered_li = extractor.validated_data430 ordered_li['pk'] = pkid431 ordered_li['url'] = base_router + str(pkid) + '/'432 ordered_li = dict(ordered_li)433 extractors.append(ordered_li)434 extractors.sort(key=lambda x: (x['pk']), reverse=False)435 extractors_group.extend(extractors)436 extractors.clear()437 # ********************************************************* #438 # 存储清洗推荐词439 for rkws in recommend_data:440 recom = Recommend(recommendkws=rkws)441 recom.save()442 # retrieve kws443 data = Recommend.objects.filter(recommendkws=rkws)444 raw_d_dict = []445 for d in data:446 d_dict = model_to_dict(d)447 raw_d_dict.append(d_dict)448 set_only = []449 set_only.append(raw_d_dict[0])450 # drop reqeated451 for item in raw_d_dict:452 k = 0453 for iitem in set_only:454 if item['recommendkws'] != iitem['recommendkws']:455 k += 1456 else:457 break458 if k == len(set_only):459 set_only.append(item)460 for only in set_only:461 pkid = only['id']462 recommend = RecommendSerializer(data=only, context=serializer_context)463 if recommend.is_valid():464 ordered_li = recommend.validated_data465 ordered_li['pk'] = pkid466 ordered_li['url'] = base_router + str(pkid) + '/'467 ordered_li = dict(ordered_li)468 recommends.append(ordered_li)469 recommends.sort(key=lambda x: (x['pk']), reverse=False)470 recommends_group.extend(recommends)471 recommends.clear()472 try:473 if extractors_group and recommends_group:474 total.append(extractors_group)475 total.append(recommends_group)476 print(total)477 return Response(total)478 else:479 return Response('failed')480 except Exception as e:481 return Response('failed')482 # 无推荐词汇, 只返回抽取词即可(一般若无推荐词, 会随机推荐, 此处防止意外)483 else:484 print('The recommends are empty')485 # ********************************************************* #486 # 存储清洗抽取词/词表内词汇487 for kws in extract_data:488 extr = Extractor(originkws=kws)489 extr.save()490 # retrieve kws491 data = Extractor.objects.filter(originkws=kws)492 raw_d_dict = []493 for d in data:494 d_dict = model_to_dict(d)495 raw_d_dict.append(d_dict)496 set_only = []497 set_only.append(raw_d_dict[0])498 # drop reqeated499 for item in raw_d_dict:500 k = 0501 for iitem in set_only:502 if item['originkws'] != iitem['originkws']:503 k += 1504 else:505 break506 if k == len(set_only):507 set_only.append(item)508 for only in set_only:509 pkid = only['id']510 extractor = ExtractorSerializer(data=only, context=serializer_context)511 if extractor.is_valid():512 ordered_li = extractor.validated_data513 ordered_li['pk'] = pkid514 ordered_li['url'] = base_router + str(pkid) + '/'515 ordered_li = dict(ordered_li)516 extractors.append(ordered_li)517 extractors.sort(key=lambda x: (x['pk']), reverse=False)518 extractors_group.extend(extractors)519 extractors.clear()520 try:521 if extractors_group:522 total.append(extractors_group)523 print(total)524 return Response(total)525 else:526 return Response('failed')527 except Exception as e:528 return Response('failed')529 if request.method == 'GET':530 return Response('No method!')531@api_view(('POST', 'GET',))532def startspider(request):533 if request.method == 'POST':534 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))535 raw_dict_key = list(raw_dict.keys())[0]536 spider_dict = ast.literal_eval(raw_dict_key)537 print(spider_dict)538 print(spider_dict['recommends'])539 # spider_names = spider_dict['spiders']540 # 转义符处理541 drop_slash_extractors = re.sub('\\\\*', '', spider_dict['extractors'])542 drop_quotation_extractors = re.sub('^"|"$', '', drop_slash_extractors)543 spider_extractors = ast.literal_eval(drop_quotation_extractors.strip('][')) # tuple544 extractors, recommends, search_keywords = [], [], []545 if isinstance(spider_extractors, tuple):546 for extractors_dict in spider_extractors:547 extracted_words = extractors_dict['originkws']548 extractors.append(extracted_words)549 else:550 extracted_words = spider_extractors['originkws']551 extractors.append(extracted_words)552 search_keywords.extend(extractors)553 if spider_dict['recommends'] != '"[]"' and spider_dict['recommends'] != '[]':554 drop_slash_recommends = re.sub('\\\\*', '', spider_dict['recommends'])555 drop_quotation_recommends = re.sub('^"|"$', '', drop_slash_recommends)556 spider_recommends = ast.literal_eval(drop_quotation_recommends.strip(']['))557 if isinstance(spider_recommends, tuple):558 for recommends_dict in spider_recommends:559 recommend_words = recommends_dict['recommendkws']560 recommends.append(recommend_words)561 else:562 recommend_words = spider_recommends['recommendkws']563 recommends.append(recommend_words)564 search_keywords.extend(recommends)565 else:566 print('暂无推荐词')567 keywords = list(set(search_keywords))568 print(keywords)569 new_keywords = []570 # 匹配检索表达式1571 if len(keywords) < 2:572 new_keywords.append(keywords[0])573 else:574 for word in keywords[:-2]:575 new_word = word + ' OR '576 new_keywords.append(new_word)577 new_keywords.append(keywords[-1])578 query = ''.join(new_keywords)579 sum_count = 0580 sum_doc = []581 # 爬虫字段存储582 queries = [] # 结构复杂,前台暂无法解析 --- {'query': {'multi_match': {'query': '氨基酸', 'fields': ['abstract', 'kws', 'title', 'info', 'fund', 'source']}}}583 # title, author, source, info, date, kws, fund, abstract, cited, downed, download = [], [], [], [], [], [], [], [], [], [], []584 for word in keywords:585 raw_search_result = GetRawResult()586 # 匹配检索表达式2587 query_body = raw_search_result.get_raw_result(word)[0] # {}588 queries.append(query_body)589 word_count = raw_search_result.get_raw_result(word)[1]590 sum_count += word_count591 word_doc = raw_search_result.get_raw_result(word)[2] # [{}]592 sum_doc.extend(word_doc) # [{}]593 set_only = []594 set_only.append(sum_doc[0])595 # drop reqeated596 for item in sum_doc:597 k = 0598 for iitem in set_only:599 if item['title'] != iitem['title']:600 k += 1601 else:602 break603 if k == len(set_only):604 set_only.append(item) # [{no repeated}]605 # 过滤后的搜索结果数606 filter_count = len(set_only)607 for each_word_doc in set_only:608 title = each_word_doc['title']609 author = each_word_doc['author']610 if re.search(';', author):611 author = re.sub(';', '', author)612 source = each_word_doc['source']613 info = each_word_doc['info']614 date = each_word_doc['date']615 kws = each_word_doc['kws']616 if kws == 'nan':617 kws = '暂无'618 fund = each_word_doc['fund']619 if fund == 'nan':620 fund = '暂无'621 abstract = each_word_doc['abstract']622 cited = each_word_doc['cited'].rstrip('.0')623 if cited == 'nan':624 cited = '0'625 downed = each_word_doc['downed'].rstrip('.0')626 if downed == 'nan':627 downed = '0'628 download = each_word_doc['download']629 sim = Simplesearch(title=title, author=author, source=source, info=info, date=date, kws=kws, fund=fund,630 abstract=abstract, cited=cited, downed=downed, download=download)631 sim.save()632 data = {633 'keywords': keywords,634 'query': query,635 'raw_search_count': sum_count,636 'filter_search_count': filter_count637 }638 # 暂时指定idata,后续增加引擎639 # for name in spider_names:640 # for word in keywords:641 # execute_spider(name, word)642 # subprocess.check_output(['scrapy', 'crawl', name, '-a', 'keyword='+word])643 if filter_count > 50: # 阈值根据es数据量设置644 return Response(data)645 else:646 return Response('failed')647 if request.method == 'GET':648 return Response('No method!')649@api_view(('GET',))650def rawresult(request):651 if request.method == 'GET':652 latest = Simplesearch.objects.last()653 raw_dict = model_to_dict(latest)654 db = 'rawresult'655 raw_temps = Temp.objects.filter(record_db=db)656 if raw_temps:657 temps = []658 for raw_temp in raw_temps:659 raw_temp_dict = model_to_dict(raw_temp)660 temps.append(raw_temp_dict)661 temp_dict = temps[-1]662 pre_id = temp_dict['record_id']663 post_id = raw_dict['id']664 temp = Temp(record_id=post_id, record_db=db)665 temp.save()666 updates = Simplesearch.objects.filter(id__gt=pre_id)667 # updates = Simplesearch.objects.all()[:50] # for test668 rawresults = []669 # 自增序号刷新【前端改】670 # uid = 1671 for update in updates:672 update_dict = model_to_dict(update)673 # update_dict['id'] = str(uid)674 # uid += 1675 rawresults.append(update_dict)676 if rawresults is not None:677 print(rawresults)678 data = {679 'record_id': pre_id,680 'result': rawresults681 }682 return Response(data)683 else:684 return Response('No suitable data!')685 else:686 pre_id = raw_dict['id']687 print(pre_id)688 temp = Temp(record_id=pre_id, record_db=db)689 temp.save()690 updates = Simplesearch.objects.all()691 # updates = Simplesearch.objects.all()[:20] # for test692 rawresults = []693 # 自增序号刷新【前端改】694 # uid = 1695 for update in updates:696 update_dict = model_to_dict(update)697 # update_dict['id'] = str(uid)698 # uid += 1699 rawresults.append(update_dict)700 if rawresults is not None:701 print(rawresults)702 data = {703 'record_id': '0',704 'result': rawresults705 }706 return Response(data)707 else:708 return Response('No suitable data!')709@api_view(('POST', 'GET',))710def getrecordrawId(request):711 if request.method == 'POST':712 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))713 raw_dict_key = list(raw_dict.keys())[0]714 id_dict = ast.literal_eval(raw_dict_key)715 record_id = int(id_dict['record_id'].strip('"'))716 # 按前台存储id查找717 if record_id == '0':718 results = []719 updates = Simplesearch.objects.all()720 for update in updates:721 update_dict = model_to_dict(update)722 results.append(update_dict)723 if results:724 print(results)725 data = {726 'record_id': '0', # 首次检索727 'result': results728 }729 return Response(data)730 else:731 return Response('No suitable data!')732 else:733 pre_id = record_id734 results = []735 updates = Simplesearch.objects.filter(id__gt=pre_id)736 for update in updates:737 update_dict = model_to_dict(update)738 results.append(update_dict)739 if results is not None:740 print(results)741 data = {742 'record_id': pre_id,743 'result': results744 }745 return Response(data)746 else:747 return Response('No suitable data!')748 if request.method == 'GET':749 return Response('No method!')750@api_view(('POST', 'GET',))751def selectedrawresult(request):752 if request.method == 'POST':753 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))754 raw_dict_key = list(raw_dict.keys())[0]755 target_dict = ast.literal_eval(raw_dict_key)756 target_id = int(target_dict['target'].strip('"'))757 # 按对应id查找758 target_data = Simplesearch.objects.filter(id=target_id)759 for data in target_data:760 clean_data = model_to_dict(data)761 return Response(clean_data)762 if request.method == 'GET':763 return Response('No method!')764@api_view(('POST', 'GET',))765def getexpression(request):766 if request.method == 'POST':767 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))768 raw_dict_key = list(raw_dict.keys())[0]769 expression_dict = ast.literal_eval(raw_dict_key)770 print(expression_dict)771 subrepo_name = expression_dict['name']772 subrepo_intro = expression_dict['introduction']773 raw_expression_body = expression_dict['expression']774 expression_body = ast.literal_eval(raw_expression_body.strip(']['))775 print(expression_body)776 # 有无日期筛选777 expression_date = expression_body[-1]778 start_date = expression_date['startdate']779 end_date = expression_date['endate']780 # 无有效日期781 if start_date == 'null' or end_date == 'null':782 # 1.有且仅有一个表达式783 if len(expression_body) == 2:784 expression_context = expression_body[0]785 # 转换成ES中的字段786 if expression_context['type'] == '标题':787 expression_context['type'] = 'title'788 elif expression_context['type'] == '作者':789 expression_context['type'] = 'author'790 elif expression_context['type'] == '来源':791 expression_context['type'] = 'source'792 elif expression_context['type'] == '机构/单位':793 expression_context['type'] = 'info'794 elif expression_context['type'] == '基金':795 expression_context['type'] = 'fund'796 elif expression_context['type'] == '关键词':797 expression_context['type'] = 'kws'798 elif expression_context['type'] == '摘要':799 expression_context['type'] = 'abstract'800 # 1.1 有且仅有必填项 关键词+单字段801 if not expression_context.get('relation'):802 # 1.1.1 无正则803 if expression_context['regex'] == '否':804 expression_type = expression_context['type'].split()805 expression_info = expression_context['info']806 detail = GetDetailResult()807 results = detail.get_only_expression(expression_type, expression_info)808 print(results)809 sum_doc = results[2]810 set_only = []811 set_only.append(sum_doc[0])812 # drop reqeated813 for item in sum_doc:814 k = 0815 for iitem in set_only:816 if item['title'] != iitem['title']:817 k += 1818 else:819 break820 if k == len(set_only):821 set_only.append(item) # [{no repeated}]822 # 过滤后的搜索结果数823 filter_count = len(set_only)824 # 清洗字段825 for each_word_doc in set_only:826 title = each_word_doc['title']827 author = each_word_doc['author']828 if re.search(';', author):829 author = re.sub(';', '', author)830 source = each_word_doc['source']831 info = each_word_doc['info']832 date = each_word_doc['date']833 kws = each_word_doc['kws']834 if kws == 'nan':835 kws = '暂无'836 fund = each_word_doc['fund']837 if fund == 'nan':838 fund = '暂无'839 abstract = each_word_doc['abstract']840 cited = each_word_doc['cited'].rstrip('.0')841 if cited == 'nan':842 cited = '0'843 downed = each_word_doc['downed'].rstrip('.0')844 if downed == 'nan':845 downed = '0'846 download = each_word_doc['download']847 det = Detailsearch(title=title, author=author, source=source, info=info, date=date,848 kws=kws, fund=fund, abstract=abstract, cited=cited, downed=downed,849 download=download, name=subrepo_name, introduction=subrepo_intro)850 det.save()851 data = {852 'query': results[0],853 'raw_count': results[1],854 'filter_search_count': filter_count855 }856 print(data)857 return Response(data)858 # 1.1.2 有正则859 else:860 expression_type = expression_context['type'].split()861 expression_info = expression_context['info']862 detail = GetDetailResult()863 results = detail.get_only_expression_with_regexp(expression_type, expression_info)864 sum_doc = results[2]865 set_only = []866 set_only.append(sum_doc[0])867 # drop reqeated868 for item in sum_doc:869 k = 0870 for iitem in set_only:871 if item['title'] != iitem['title']:872 k += 1873 else:874 break875 if k == len(set_only):876 set_only.append(item) # [{no repeated}]877 # 过滤后的搜索结果数878 filter_count = len(set_only)879 # 清洗字段880 for each_word_doc in set_only:881 title = each_word_doc['title']882 author = each_word_doc['author']883 if re.search(';', author):884 author = re.sub(';', '', author)885 source = each_word_doc['source']886 info = each_word_doc['info']887 date = each_word_doc['date']888 kws = each_word_doc['kws']889 if kws == 'nan':890 kws = '暂无'891 fund = each_word_doc['fund']892 if fund == 'nan':893 fund = '暂无'894 abstract = each_word_doc['abstract']895 cited = each_word_doc['cited'].rstrip('.0')896 if cited == 'nan':897 cited = '0'898 downed = each_word_doc['downed'].rstrip('.0')899 if downed == 'nan':900 downed = '0'901 download = each_word_doc['download']902 det = Detailsearch(title=title, author=author, source=source, info=info, date=date,903 kws=kws, fund=fund, abstract=abstract, cited=cited, downed=downed,904 download=download, name=subrepo_name, introduction=subrepo_intro)905 det.save()906 data = {907 'query': results[0],908 'raw_count': results[1],909 'filter_search_count': filter_count910 }911 print(data)912 return Response(data)913 # 1.2 不止必填项 多关键字+单字段914 else:915 # 1.2.1 无正则916 if expression_context['regex'] == '否':917 expression_type = expression_context['type'].split()918 expression_info = expression_context['info']919 expression_relation = expression_context['relation']920 expression_otherinfo = expression_context['otherinfo']921 detail = GetDetailResult()922 if expression_relation == '并含':923 in_method = '1'924 include_fields = expression_type925 include_kws = expression_info926 exclude_fields = ['']927 exclude_kws = ''928 results = detail.get_only_relation_expression(include_fields, include_kws, exclude_fields,929 exclude_kws, in_method)930 sum_doc = results[2]931 set_only = []932 set_only.append(sum_doc[0])933 # drop reqeated934 for item in sum_doc:935 k = 0936 for iitem in set_only:937 if item['title'] != iitem['title']:938 k += 1939 else:940 break941 if k == len(set_only):942 set_only.append(item) # [{no repeated}]943 # 过滤后的搜索结果数944 filter_count = len(set_only)945 # 清洗字段946 for each_word_doc in set_only:947 title = each_word_doc['title']948 author = each_word_doc['author']949 if re.search(';', author):950 author = re.sub(';', '', author)951 source = each_word_doc['source']952 info = each_word_doc['info']953 date = each_word_doc['date']954 kws = each_word_doc['kws']955 if kws == 'nan':956 kws = '暂无'957 fund = each_word_doc['fund']958 if fund == 'nan':959 fund = '暂无'960 abstract = each_word_doc['abstract']961 cited = each_word_doc['cited'].rstrip('.0')962 if cited == 'nan':963 cited = '0'964 downed = each_word_doc['downed'].rstrip('.0')965 if downed == 'nan':966 downed = '0'967 download = each_word_doc['download']968 det = Detailsearch(title=title, author=author, source=source, info=info, date=date,969 kws=kws, fund=fund, abstract=abstract, cited=cited, downed=downed,970 download=download, name=subrepo_name, introduction=subrepo_intro)971 det.save()972 data = {973 'query': results[0],974 'raw_count': results[1],975 'filter_search_count': filter_count976 }977 print(data)978 return Response(data)979 elif expression_relation == '或含':980 in_method = '2'981 include_fields = expression_type982 include_kws = expression_info983 exclude_fields = ['']984 exclude_kws = ''985 results = detail.get_only_relation_expression(include_fields, include_kws, exclude_fields,986 exclude_kws, in_method)987 sum_doc = results[2]988 set_only = []989 set_only.append(sum_doc[0])990 # drop reqeated991 for item in sum_doc:992 k = 0993 for iitem in set_only:994 if item['title'] != iitem['title']:995 k += 1996 else:997 break998 if k == len(set_only):999 set_only.append(item) # [{no repeated}]1000 # 过滤后的搜索结果数1001 filter_count = len(set_only)1002 # 清洗字段1003 for each_word_doc in set_only:1004 title = each_word_doc['title']1005 author = each_word_doc['author']1006 if re.search(';', author):1007 author = re.sub(';', '', author)1008 source = each_word_doc['source']1009 info = each_word_doc['info']1010 date = each_word_doc['date']1011 kws = each_word_doc['kws']1012 if kws == 'nan':1013 kws = '暂无'1014 fund = each_word_doc['fund']1015 if fund == 'nan':1016 fund = '暂无'1017 abstract = each_word_doc['abstract']1018 cited = each_word_doc['cited'].rstrip('.0')1019 if cited == 'nan':1020 cited = '0'1021 downed = each_word_doc['downed'].rstrip('.0')1022 if downed == 'nan':1023 downed = '0'1024 download = each_word_doc['download']1025 det = Detailsearch(title=title, author=author, source=source, info=info, date=date,1026 kws=kws, fund=fund, abstract=abstract, cited=cited, downed=downed,1027 download=download, name=subrepo_name, introduction=subrepo_intro)1028 det.save()1029 data = {1030 'query': results[0],1031 'raw_count': results[1],1032 'filter_search_count': filter_count1033 }1034 print(data)1035 return Response(data)1036 elif expression_relation == '不含':1037 in_method = '2'1038 include_fields = expression_type1039 include_kws = expression_info1040 exclude_fields = expression_type1041 exclude_kws = expression_otherinfo1042 results = detail.get_only_relation_expression(include_fields, include_kws, exclude_fields,1043 exclude_kws, in_method)1044 sum_doc = results[2]1045 set_only = []1046 set_only.append(sum_doc[0])1047 # drop reqeated1048 for item in sum_doc:1049 k = 01050 for iitem in set_only:1051 if item['title'] != iitem['title']:1052 k += 11053 else:1054 break1055 if k == len(set_only):1056 set_only.append(item) # [{no repeated}]1057 # 过滤后的搜索结果数1058 filter_count = len(set_only)1059 # 清洗字段1060 for each_word_doc in set_only:1061 title = each_word_doc['title']1062 author = each_word_doc['author']1063 if re.search(';', author):1064 author = re.sub(';', '', author)1065 source = each_word_doc['source']1066 info = each_word_doc['info']1067 date = each_word_doc['date']1068 kws = each_word_doc['kws']1069 if kws == 'nan':1070 kws = '暂无'1071 fund = each_word_doc['fund']1072 if fund == 'nan':1073 fund = '暂无'1074 abstract = each_word_doc['abstract']1075 cited = each_word_doc['cited'].rstrip('.0')1076 if cited == 'nan':1077 cited = '0'1078 downed = each_word_doc['downed'].rstrip('.0')1079 if downed == 'nan':1080 downed = '0'1081 download = each_word_doc['download']1082 det = Detailsearch(title=title, author=author, source=source, info=info, date=date,1083 kws=kws, fund=fund, abstract=abstract, cited=cited, downed=downed,1084 download=download, name=subrepo_name, introduction=subrepo_intro)1085 det.save()1086 data = {1087 'query': results[0],1088 'raw_count': results[1],1089 'filter_search_count': filter_count1090 }1091 print(data)1092 return Response(data)1093 # 1.2.2 有正则1094 else:1095 expression_type = expression_context['type'].split()1096 expression_info = expression_context['info']1097 expression_relation = expression_context['relation']1098 expression_otherinfo = expression_context['otherinfo']1099 detail = GetDetailResult()1100 if expression_relation == '并含':1101 in_method = '1'1102 include_fields = expression_type1103 include_kws = expression_info1104 exclude_fields = ['']1105 exclude_kws = ''1106 results = detail.get_only_relation_expression_with_regexp(include_fields, include_kws, exclude_fields,1107 exclude_kws, in_method)1108 sum_doc = results[2]1109 set_only = []1110 set_only.append(sum_doc[0])1111 # drop reqeated1112 for item in sum_doc:1113 k = 01114 for iitem in set_only:1115 if item['title'] != iitem['title']:1116 k += 11117 else:1118 break1119 if k == len(set_only):1120 set_only.append(item) # [{no repeated}]1121 # 过滤后的搜索结果数1122 filter_count = len(set_only)1123 # 清洗字段1124 for each_word_doc in set_only:1125 title = each_word_doc['title']1126 author = each_word_doc['author']1127 if re.search(';', author):1128 author = re.sub(';', '', author)1129 source = each_word_doc['source']1130 info = each_word_doc['info']1131 date = each_word_doc['date']1132 kws = each_word_doc['kws']1133 if kws == 'nan':1134 kws = '暂无'1135 fund = each_word_doc['fund']1136 if fund == 'nan':1137 fund = '暂无'1138 abstract = each_word_doc['abstract']1139 cited = each_word_doc['cited'].rstrip('.0')1140 if cited == 'nan':1141 cited = '0'1142 downed = each_word_doc['downed'].rstrip('.0')1143 if downed == 'nan':1144 downed = '0'1145 download = each_word_doc['download']1146 det = Detailsearch(title=title, author=author, source=source, info=info, date=date,1147 kws=kws, fund=fund, abstract=abstract, cited=cited, downed=downed,1148 download=download, name=subrepo_name, introduction=subrepo_intro)1149 det.save()1150 data = {1151 'query': results[0],1152 'raw_count': results[1],1153 'filter_search_count': filter_count1154 }1155 print(data)1156 return Response(data)1157 elif expression_relation == '或含':1158 in_method = '2'1159 include_fields = expression_type1160 include_kws = expression_info1161 exclude_fields = ['']1162 exclude_kws = ''1163 results = detail.get_only_relation_expression_with_regexp(include_fields, include_kws, exclude_fields,1164 exclude_kws, in_method)1165 sum_doc = results[2]1166 set_only = []1167 set_only.append(sum_doc[0])1168 # drop reqeated1169 for item in sum_doc:1170 k = 01171 for iitem in set_only:1172 if item['title'] != iitem['title']:1173 k += 11174 else:1175 break1176 if k == len(set_only):1177 set_only.append(item) # [{no repeated}]1178 # 过滤后的搜索结果数1179 filter_count = len(set_only)1180 # 清洗字段1181 for each_word_doc in set_only:1182 title = each_word_doc['title']1183 author = each_word_doc['author']1184 if re.search(';', author):1185 author = re.sub(';', '', author)1186 source = each_word_doc['source']1187 info = each_word_doc['info']1188 date = each_word_doc['date']1189 kws = each_word_doc['kws']1190 if kws == 'nan':1191 kws = '暂无'1192 fund = each_word_doc['fund']1193 if fund == 'nan':1194 fund = '暂无'1195 abstract = each_word_doc['abstract']1196 cited = each_word_doc['cited'].rstrip('.0')1197 if cited == 'nan':1198 cited = '0'1199 downed = each_word_doc['downed'].rstrip('.0')1200 if downed == 'nan':1201 downed = '0'1202 download = each_word_doc['download']1203 det = Detailsearch(title=title, author=author, source=source, info=info, date=date,1204 kws=kws, fund=fund, abstract=abstract, cited=cited, downed=downed,1205 download=download, name=subrepo_name, introduction=subrepo_intro)1206 det.save()1207 data = {1208 'query': results[0],1209 'raw_count': results[1],1210 'filter_search_count': filter_count1211 }1212 print(data)1213 return Response(data)1214 elif expression_relation == '不含':1215 in_method = '2'1216 include_fields = expression_type1217 include_kws = expression_info1218 exclude_fields = expression_type1219 exclude_kws = expression_otherinfo1220 results = detail.get_only_relation_expression_with_regexp(include_fields, include_kws, exclude_fields,1221 exclude_kws, in_method)1222 sum_doc = results[2]1223 set_only = []1224 set_only.append(sum_doc[0])1225 # drop reqeated1226 for item in sum_doc:1227 k = 01228 for iitem in set_only:1229 if item['title'] != iitem['title']:1230 k += 11231 else:1232 break1233 if k == len(set_only):1234 set_only.append(item) # [{no repeated}]1235 # 过滤后的搜索结果数1236 filter_count = len(set_only)1237 # 清洗字段1238 for each_word_doc in set_only:1239 title = each_word_doc['title']1240 author = each_word_doc['author']1241 if re.search(';', author):1242 author = re.sub(';', '', author)1243 source = each_word_doc['source']1244 info = each_word_doc['info']1245 date = each_word_doc['date']1246 kws = each_word_doc['kws']1247 if kws == 'nan':1248 kws = '暂无'1249 fund = each_word_doc['fund']1250 if fund == 'nan':1251 fund = '暂无'1252 abstract = each_word_doc['abstract']1253 cited = each_word_doc['cited'].rstrip('.0')1254 if cited == 'nan':1255 cited = '0'1256 downed = each_word_doc['downed'].rstrip('.0')1257 if downed == 'nan':1258 downed = '0'1259 download = each_word_doc['download']1260 det = Detailsearch(title=title, author=author, source=source, info=info, date=date,1261 kws=kws, fund=fund, abstract=abstract, cited=cited, downed=downed,1262 download=download, name=subrepo_name, introduction=subrepo_intro)1263 det.save()1264 data = {1265 'query': results[0],1266 'raw_count': results[1],1267 'filter_search_count': filter_count1268 }1269 print(data)1270 return Response(data)1271 # 2.多个表达式1272 else:1273 new_expression_body = []1274 print(expression_body[:-1])1275 for expression_context in expression_body[:-1]:1276 # 转换成ES中的字段1277 if expression_context['type'] == '标题':1278 expression_context['type'] = 'title'1279 new_expression_body.append(expression_context)1280 elif expression_context['type'] == '作者':1281 expression_context['type'] = 'author'1282 new_expression_body.append(expression_context)1283 elif expression_context['type'] == '来源':1284 expression_context['type'] = 'source'1285 new_expression_body.append(expression_context)1286 elif expression_context['type'] == '机构/单位':1287 expression_context['type'] = 'info'1288 new_expression_body.append(expression_context)1289 elif expression_context['type'] == '基金':1290 expression_context['type'] = 'fund'1291 new_expression_body.append(expression_context)1292 elif expression_context['type'] == '关键词':1293 expression_context['type'] = 'kws'1294 new_expression_body.append(expression_context)1295 elif expression_context['type'] == '摘要':1296 expression_context['type'] = 'abstract'1297 new_expression_body.append(expression_context)1298 print('元数据:' + str(new_expression_body))1299 detail = GetDetailResult()1300 results = detail.get_multiple_expression(new_expression_body)1301 sum_doc = results[2]1302 set_only = []1303 set_only.append(sum_doc[0])1304 # drop reqeated1305 for item in sum_doc:1306 k = 01307 for iitem in set_only:1308 if item['title'] != iitem['title']:1309 k += 11310 else:1311 break1312 if k == len(set_only):1313 set_only.append(item) # [{no repeated}]1314 # 过滤后的搜索结果数1315 filter_count = len(set_only)1316 # 清洗字段1317 for each_word_doc in set_only:1318 title = each_word_doc['title']1319 author = each_word_doc['author']1320 if re.search(';', author):1321 author = re.sub(';', '', author)1322 source = each_word_doc['source']1323 info = each_word_doc['info']1324 date = each_word_doc['date']1325 kws = each_word_doc['kws']1326 if kws == 'nan':1327 kws = '暂无'1328 fund = each_word_doc['fund']1329 if fund == 'nan':1330 fund = '暂无'1331 abstract = each_word_doc['abstract']1332 cited = each_word_doc['cited'].rstrip('.0')1333 if cited == 'nan':1334 cited = '0'1335 downed = each_word_doc['downed'].rstrip('.0')1336 if downed == 'nan':1337 downed = '0'1338 download = each_word_doc['download']1339 det = Detailsearch(title=title, author=author, source=source, info=info, date=date,1340 kws=kws, fund=fund, abstract=abstract, cited=cited, downed=downed,1341 download=download, name=subrepo_name, introduction=subrepo_intro)1342 det.save()1343 data = {1344 'query': results[0],1345 'raw_count': results[1],1346 'filter_search_count': filter_count1347 }1348 print(data)1349 return Response(data)1350 # 有日期筛选1351 else:1352 # 1.有且仅有一个表达式1353 if len(expression_body) == 2:1354 expression_context = expression_body[0]1355 # 转换成ES中的字段1356 if expression_context['type'] == '标题':1357 expression_context['type'] = 'title'1358 elif expression_context['type'] == '作者':1359 expression_context['type'] = 'author'1360 elif expression_context['type'] == '来源':1361 expression_context['type'] = 'source'1362 elif expression_context['type'] == '机构/单位':1363 expression_context['type'] = 'info'1364 elif expression_context['type'] == '基金':1365 expression_context['type'] = 'fund'1366 elif expression_context['type'] == '关键词':1367 expression_context['type'] = 'kws'1368 elif expression_context['type'] == '摘要':1369 expression_context['type'] = 'abstract'1370 # 1.1 有且仅有必填项 关键词+单字段1371 if not expression_context['relation']:1372 # 1.1.1 无正则1373 if expression_context['regex'] == '否':1374 expression_type = expression_context['type'].split()1375 expression_info = expression_context['info']1376 detail = GetDetailResult()1377 results = detail.get_only_expression_with_date(expression_type, expression_info, start_date, end_date)1378 sum_doc = results[2]1379 set_only = []1380 set_only.append(sum_doc[0])1381 # drop reqeated1382 for item in sum_doc:1383 k = 01384 for iitem in set_only:1385 if item['title'] != iitem['title']:1386 k += 11387 else:1388 break1389 if k == len(set_only):1390 set_only.append(item) # [{no repeated}]1391 # 过滤后的搜索结果数1392 filter_count = len(set_only)1393 # 清洗字段1394 for each_word_doc in set_only:1395 title = each_word_doc['title']1396 author = each_word_doc['author']1397 if re.search(';', author):1398 author = re.sub(';', '', author)1399 source = each_word_doc['source']1400 info = each_word_doc['info']1401 date = each_word_doc['date']1402 kws = each_word_doc['kws']1403 if kws == 'nan':1404 kws = '暂无'1405 fund = each_word_doc['fund']1406 if fund == 'nan':1407 fund = '暂无'1408 abstract = each_word_doc['abstract']1409 cited = each_word_doc['cited'].rstrip('.0')1410 if cited == 'nan':1411 cited = '0'1412 downed = each_word_doc['downed'].rstrip('.0')1413 if downed == 'nan':1414 downed = '0'1415 download = each_word_doc['download']1416 det = Detailsearch(title=title, author=author, source=source, info=info, date=date,1417 kws=kws, fund=fund, abstract=abstract, cited=cited, downed=downed,1418 download=download, name=subrepo_name, introduction=subrepo_intro)1419 det.save()1420 data = {1421 'query': results[0],1422 'raw_count': results[1],1423 'filter_search_count': filter_count1424 }1425 print(data)1426 return Response(data)1427 # 1.1.2 有正则1428 else:1429 expression_type = expression_context['type'].split()1430 expression_info = expression_context['info']1431 detail = GetDetailResult()1432 results = detail.get_only_expression_with_regexp(expression_type, expression_info, start_date, end_date)1433 sum_doc = results[2]1434 set_only = []1435 set_only.append(sum_doc[0])1436 # drop reqeated1437 for item in sum_doc:1438 k = 01439 for iitem in set_only:1440 if item['title'] != iitem['title']:1441 k += 11442 else:1443 break1444 if k == len(set_only):1445 set_only.append(item) # [{no repeated}]1446 # 过滤后的搜索结果数1447 filter_count = len(set_only)1448 # 清洗字段1449 for each_word_doc in set_only:1450 title = each_word_doc['title']1451 author = each_word_doc['author']1452 if re.search(';', author):1453 author = re.sub(';', '', author)1454 source = each_word_doc['source']1455 info = each_word_doc['info']1456 date = each_word_doc['date']1457 kws = each_word_doc['kws']1458 if kws == 'nan':1459 kws = '暂无'1460 fund = each_word_doc['fund']1461 if fund == 'nan':1462 fund = '暂无'1463 abstract = each_word_doc['abstract']1464 cited = each_word_doc['cited'].rstrip('.0')1465 if cited == 'nan':1466 cited = '0'1467 downed = each_word_doc['downed'].rstrip('.0')1468 if downed == 'nan':1469 downed = '0'1470 download = each_word_doc['download']1471 det = Detailsearch(title=title, author=author, source=source, info=info, date=date,1472 kws=kws, fund=fund, abstract=abstract, cited=cited, downed=downed,1473 download=download, name=subrepo_name, introduction=subrepo_intro)1474 det.save()1475 data = {1476 'query': results[0],1477 'raw_count': results[1],1478 'filter_search_count': filter_count1479 }1480 print(data)1481 return Response(data)1482 # 1.2 不止必填项 多关键字+单字段1483 else:1484 # 1.2.1 无正则1485 if expression_context['regex'] == '否':1486 expression_type = expression_context['type'].split()1487 expression_info = expression_context['info']1488 expression_relation = expression_context['relation']1489 expression_otherinfo = expression_context['otherinfo']1490 detail = GetDetailResult()1491 if expression_relation == '并含':1492 in_method = '1'1493 include_fields = expression_type1494 include_kws = expression_info1495 exclude_fields = ['']1496 exclude_kws = ''1497 results = detail.get_only_relation_expression_with_date(include_fields, include_kws, exclude_fields,1498 exclude_kws, start_date, end_date, in_method)1499 sum_doc = results[2]1500 set_only = []1501 set_only.append(sum_doc[0])1502 # drop reqeated1503 for item in sum_doc:1504 k = 01505 for iitem in set_only:1506 if item['title'] != iitem['title']:1507 k += 11508 else:1509 break1510 if k == len(set_only):1511 set_only.append(item) # [{no repeated}]1512 # 过滤后的搜索结果数1513 filter_count = len(set_only)1514 # 清洗字段1515 for each_word_doc in set_only:1516 title = each_word_doc['title']1517 author = each_word_doc['author']1518 if re.search(';', author):1519 author = re.sub(';', '', author)1520 source = each_word_doc['source']1521 info = each_word_doc['info']1522 date = each_word_doc['date']1523 kws = each_word_doc['kws']1524 if kws == 'nan':1525 kws = '暂无'1526 fund = each_word_doc['fund']1527 if fund == 'nan':1528 fund = '暂无'1529 abstract = each_word_doc['abstract']1530 cited = each_word_doc['cited'].rstrip('.0')1531 if cited == 'nan':1532 cited = '0'1533 downed = each_word_doc['downed'].rstrip('.0')1534 if downed == 'nan':1535 downed = '0'1536 download = each_word_doc['download']1537 det = Detailsearch(title=title, author=author, source=source, info=info, date=date,1538 kws=kws, fund=fund, abstract=abstract, cited=cited, downed=downed,1539 download=download, name=subrepo_name, introduction=subrepo_intro)1540 det.save()1541 data = {1542 'query': results[0],1543 'raw_count': results[1],1544 'filter_search_count': filter_count1545 }1546 print(data)1547 return Response(data)1548 elif expression_relation == '或含':1549 in_method = '2'1550 include_fields = expression_type1551 include_kws = expression_info1552 exclude_fields = ['']1553 exclude_kws = ''1554 results = detail.get_only_relation_expression_with_date(include_fields, include_kws, exclude_fields,1555 exclude_kws, start_date, end_date, in_method)1556 sum_doc = results[2]1557 set_only = []1558 set_only.append(sum_doc[0])1559 # drop reqeated1560 for item in sum_doc:1561 k = 01562 for iitem in set_only:1563 if item['title'] != iitem['title']:1564 k += 11565 else:1566 break1567 if k == len(set_only):1568 set_only.append(item) # [{no repeated}]1569 # 过滤后的搜索结果数1570 filter_count = len(set_only)1571 # 清洗字段1572 for each_word_doc in set_only:1573 title = each_word_doc['title']1574 author = each_word_doc['author']1575 if re.search(';', author):1576 author = re.sub(';', '', author)1577 source = each_word_doc['source']1578 info = each_word_doc['info']1579 date = each_word_doc['date']1580 kws = each_word_doc['kws']1581 if kws == 'nan':1582 kws = '暂无'1583 fund = each_word_doc['fund']1584 if fund == 'nan':1585 fund = '暂无'1586 abstract = each_word_doc['abstract']1587 cited = each_word_doc['cited'].rstrip('.0')1588 if cited == 'nan':1589 cited = '0'1590 downed = each_word_doc['downed'].rstrip('.0')1591 if downed == 'nan':1592 downed = '0'1593 download = each_word_doc['download']1594 det = Detailsearch(title=title, author=author, source=source, info=info, date=date,1595 kws=kws, fund=fund, abstract=abstract, cited=cited, downed=downed,1596 download=download, name=subrepo_name, introduction=subrepo_intro)1597 det.save()1598 data = {1599 'query': results[0],1600 'raw_count': results[1],1601 'filter_search_count': filter_count1602 }1603 print(data)1604 return Response(data)1605 elif expression_relation == '不含':1606 in_method = '2'1607 include_fields = expression_type1608 include_kws = expression_info1609 exclude_fields = expression_type1610 exclude_kws = expression_otherinfo1611 results = detail.get_only_relation_expression_with_date(include_fields, include_kws, exclude_fields,1612 exclude_kws, start_date, end_date, in_method)1613 sum_doc = results[2]1614 set_only = []1615 set_only.append(sum_doc[0])1616 # drop reqeated1617 for item in sum_doc:1618 k = 01619 for iitem in set_only:1620 if item['title'] != iitem['title']:1621 k += 11622 else:1623 break1624 if k == len(set_only):1625 set_only.append(item) # [{no repeated}]1626 # 过滤后的搜索结果数1627 filter_count = len(set_only)1628 # 清洗字段1629 for each_word_doc in set_only:1630 title = each_word_doc['title']1631 author = each_word_doc['author']1632 if re.search(';', author):1633 author = re.sub(';', '', author)1634 source = each_word_doc['source']1635 info = each_word_doc['info']1636 date = each_word_doc['date']1637 kws = each_word_doc['kws']1638 if kws == 'nan':1639 kws = '暂无'1640 fund = each_word_doc['fund']1641 if fund == 'nan':1642 fund = '暂无'1643 abstract = each_word_doc['abstract']1644 cited = each_word_doc['cited'].rstrip('.0')1645 if cited == 'nan':1646 cited = '0'1647 downed = each_word_doc['downed'].rstrip('.0')1648 if downed == 'nan':1649 downed = '0'1650 download = each_word_doc['download']1651 det = Detailsearch(title=title, author=author, source=source, info=info, date=date,1652 kws=kws, fund=fund, abstract=abstract, cited=cited, downed=downed,1653 download=download, name=subrepo_name, introduction=subrepo_intro)1654 det.save()1655 data = {1656 'query': results[0],1657 'raw_count': results[1],1658 'filter_search_count': filter_count1659 }1660 print(data)1661 return Response(data)1662 # 1.2.2 有正则1663 else:1664 expression_type = expression_context['type'].split()1665 expression_info = expression_context['info']1666 expression_relation = expression_context['relation']1667 expression_otherinfo = expression_context['otherinfo']1668 detail = GetDetailResult()1669 if expression_relation == '并含':1670 in_method = '1'1671 include_fields = expression_type1672 include_kws = expression_info1673 exclude_fields = ['']1674 exclude_kws = ''1675 results = detail.get_only_relation_expression_with_regexp_and_date(include_fields, include_kws, exclude_fields, exclude_kws, start_date, end_date, in_method)1676 sum_doc = results[2]1677 set_only = []1678 set_only.append(sum_doc[0])1679 # drop reqeated1680 for item in sum_doc:1681 k = 01682 for iitem in set_only:1683 if item['title'] != iitem['title']:1684 k += 11685 else:1686 break1687 if k == len(set_only):1688 set_only.append(item) # [{no repeated}]1689 # 过滤后的搜索结果数1690 filter_count = len(set_only)1691 # 清洗字段1692 for each_word_doc in set_only:1693 title = each_word_doc['title']1694 author = each_word_doc['author']1695 if re.search(';', author):1696 author = re.sub(';', '', author)1697 source = each_word_doc['source']1698 info = each_word_doc['info']1699 date = each_word_doc['date']1700 kws = each_word_doc['kws']1701 if kws == 'nan':1702 kws = '暂无'1703 fund = each_word_doc['fund']1704 if fund == 'nan':1705 fund = '暂无'1706 abstract = each_word_doc['abstract']1707 cited = each_word_doc['cited'].rstrip('.0')1708 if cited == 'nan':1709 cited = '0'1710 downed = each_word_doc['downed'].rstrip('.0')1711 if downed == 'nan':1712 downed = '0'1713 download = each_word_doc['download']1714 det = Detailsearch(title=title, author=author, source=source, info=info, date=date,1715 kws=kws, fund=fund, abstract=abstract, cited=cited, downed=downed,1716 download=download, name=subrepo_name, introduction=subrepo_intro)1717 det.save()1718 data = {1719 'query': results[0],1720 'raw_count': results[1],1721 'filter_search_count': filter_count1722 }1723 print(data)1724 return Response(data)1725 elif expression_relation == '或含':1726 in_method = '2'1727 include_fields = expression_type1728 include_kws = expression_info1729 exclude_fields = ['']1730 exclude_kws = ''1731 results = detail.get_only_relation_expression_with_regexp_and_date(include_fields, include_kws, exclude_fields, exclude_kws, start_date, end_date, in_method)1732 sum_doc = results[2]1733 set_only = []1734 set_only.append(sum_doc[0])1735 # drop reqeated1736 for item in sum_doc:1737 k = 01738 for iitem in set_only:1739 if item['title'] != iitem['title']:1740 k += 11741 else:1742 break1743 if k == len(set_only):1744 set_only.append(item) # [{no repeated}]1745 # 过滤后的搜索结果数1746 filter_count = len(set_only)1747 # 清洗字段1748 for each_word_doc in set_only:1749 title = each_word_doc['title']1750 author = each_word_doc['author']1751 if re.search(';', author):1752 author = re.sub(';', '', author)1753 source = each_word_doc['source']1754 info = each_word_doc['info']1755 date = each_word_doc['date']1756 kws = each_word_doc['kws']1757 if kws == 'nan':1758 kws = '暂无'1759 fund = each_word_doc['fund']1760 if fund == 'nan':1761 fund = '暂无'1762 abstract = each_word_doc['abstract']1763 cited = each_word_doc['cited'].rstrip('.0')1764 if cited == 'nan':1765 cited = '0'1766 downed = each_word_doc['downed'].rstrip('.0')1767 if downed == 'nan':1768 downed = '0'1769 download = each_word_doc['download']1770 det = Detailsearch(title=title, author=author, source=source, info=info, date=date,1771 kws=kws, fund=fund, abstract=abstract, cited=cited, downed=downed,1772 download=download, name=subrepo_name, introduction=subrepo_intro)1773 det.save()1774 data = {1775 'query': results[0],1776 'raw_count': results[1],1777 'filter_search_count': filter_count1778 }1779 print(data)1780 return Response(data)1781 elif expression_relation == '不含':1782 in_method = '2'1783 include_fields = expression_type1784 include_kws = expression_info1785 exclude_fields = expression_type1786 exclude_kws = expression_otherinfo1787 results = detail.get_only_relation_expression_with_regexp_and_date(include_fields, include_kws, exclude_fields, exclude_kws, start_date, end_date, in_method)1788 sum_doc = results[2]1789 set_only = []1790 set_only.append(sum_doc[0])1791 # drop reqeated1792 for item in sum_doc:1793 k = 01794 for iitem in set_only:1795 if item['title'] != iitem['title']:1796 k += 11797 else:1798 break1799 if k == len(set_only):1800 set_only.append(item) # [{no repeated}]1801 # 过滤后的搜索结果数1802 filter_count = len(set_only)1803 # 清洗字段1804 for each_word_doc in set_only:1805 title = each_word_doc['title']1806 author = each_word_doc['author']1807 if re.search(';', author):1808 author = re.sub(';', '', author)1809 source = each_word_doc['source']1810 info = each_word_doc['info']1811 date = each_word_doc['date']1812 kws = each_word_doc['kws']1813 if kws == 'nan':1814 kws = '暂无'1815 fund = each_word_doc['fund']1816 if fund == 'nan':1817 fund = '暂无'1818 abstract = each_word_doc['abstract']1819 cited = each_word_doc['cited'].rstrip('.0')1820 if cited == 'nan':1821 cited = '0'1822 downed = each_word_doc['downed'].rstrip('.0')1823 if downed == 'nan':1824 downed = '0'1825 download = each_word_doc['download']1826 det = Detailsearch(title=title, author=author, source=source, info=info, date=date,1827 kws=kws, fund=fund, abstract=abstract, cited=cited, downed=downed,1828 download=download, name=subrepo_name, introduction=subrepo_intro)1829 det.save()1830 data = {1831 'query': results[0],1832 'raw_count': results[1],1833 'filter_search_count': filter_count1834 }1835 print(data)1836 return Response(data)1837 # 2.多个表达式1838 else:1839 new_expression_body = []1840 for expression_context in expression_body[:-1]:1841 # 转换成ES中的字段1842 if expression_context['type'] == '标题':1843 expression_context['type'] = 'title'1844 new_expression_body.append(expression_context)1845 elif expression_context['type'] == '作者':1846 expression_context['type'] = 'author'1847 new_expression_body.append(expression_context)1848 elif expression_context['type'] == '来源':1849 expression_context['type'] = 'source'1850 new_expression_body.append(expression_context)1851 elif expression_context['type'] == '机构/单位':1852 expression_context['type'] = 'info'1853 new_expression_body.append(expression_context)1854 elif expression_context['type'] == '基金':1855 expression_context['type'] = 'fund'1856 new_expression_body.append(expression_context)1857 elif expression_context['type'] == '关键词':1858 expression_context['type'] = 'kws'1859 new_expression_body.append(expression_context)1860 elif expression_context['type'] == '摘要':1861 expression_context['type'] = 'abstract'1862 new_expression_body.append(expression_context)1863 detail = GetDetailResult()1864 results = detail.get_multiple_expression_with_date(new_expression_body, start_date, end_date)1865 sum_doc = results[2]1866 set_only = []1867 set_only.append(sum_doc[0])1868 # drop reqeated1869 for item in sum_doc:1870 k = 01871 for iitem in set_only:1872 if item['title'] != iitem['title']:1873 k += 11874 else:1875 break1876 if k == len(set_only):1877 set_only.append(item) # [{no repeated}]1878 # 过滤后的搜索结果数1879 filter_count = len(set_only)1880 # 清洗字段1881 for each_word_doc in set_only:1882 title = each_word_doc['title']1883 author = each_word_doc['author']1884 if re.search(';', author):1885 author = re.sub(';', '', author)1886 source = each_word_doc['source']1887 info = each_word_doc['info']1888 date = each_word_doc['date']1889 kws = each_word_doc['kws']1890 if kws == 'nan':1891 kws = '暂无'1892 fund = each_word_doc['fund']1893 if fund == 'nan':1894 fund = '暂无'1895 abstract = each_word_doc['abstract']1896 cited = each_word_doc['cited'].rstrip('.0')1897 if cited == 'nan':1898 cited = '0'1899 downed = each_word_doc['downed'].rstrip('.0')1900 if downed == 'nan':1901 downed = '0'1902 download = each_word_doc['download']1903 det = Detailsearch(title=title, author=author, source=source, info=info, date=date,1904 kws=kws, fund=fund, abstract=abstract, cited=cited, downed=downed,1905 download=download, name=subrepo_name, introduction=subrepo_intro)1906 det.save()1907 data = {1908 'query': results[0],1909 'raw_count': results[1],1910 'filter_search_count': filter_count1911 }1912 print(data)1913 return Response(data)1914 if request.method == 'GET':1915 return Response('No method!')1916@api_view(('GET',))1917def filteresult(request):1918 if request.method == 'GET':1919 latest = Detailsearch.objects.last()1920 raw_dict = model_to_dict(latest)1921 db = 'filteresult'1922 raw_temps = Temp.objects.filter(record_db=db)1923 if raw_temps:1924 temps = []1925 for raw_temp in raw_temps:1926 raw_temp_dict = model_to_dict(raw_temp)1927 temps.append(raw_temp_dict)1928 temp_dict = temps[-1]1929 pre_id = temp_dict['record_id']1930 post_id = raw_dict['id']1931 temp = Temp(record_id=post_id, record_db=db)1932 temp.save()1933 filteresults = []1934 updates = Detailsearch.objects.filter(id__gt=pre_id)1935 # updates = Detailsearch.objects.all()[:5] # for test1936 # 自增序号刷新【前端改】1937 # uid = 11938 for update in updates:1939 update_dict = model_to_dict(update)1940 # update_dict['id'] = str(uid)1941 # uid += 11942 filteresults.append(update_dict)1943 if filteresults is not None:1944 print(filteresults)1945 data = {1946 'record_id': pre_id,1947 'result': filteresults1948 }1949 return Response(data)1950 else:1951 return Response('No suitable data!')1952 else:1953 pre_id = raw_dict['id']1954 print(pre_id)1955 temp = Temp(record_id=pre_id, record_db=db)1956 temp.save()1957 filteresults = []1958 updates = Detailsearch.objects.all()1959 # updates = Detailsearch.objects.all()[:5] # for test1960 # 自增序号刷新 【前端改】1961 # uid = 11962 for update in updates:1963 update_dict = model_to_dict(update)1964 # update_dict['id'] = str(uid)1965 # uid += 11966 filteresults.append(update_dict)1967 if filteresults:1968 print(filteresults)1969 data = {1970 'record_id': '0', # 首次检索1971 'result': filteresults1972 }1973 return Response(data)1974 else:1975 return Response('No suitable data!')1976@api_view(('GET',))1977def getsubrepo(request):1978 if request.method == 'GET':1979 latest = Projectinfo.objects.last()1980 if latest:1981 name = model_to_dict(latest)['subrepo']1982 intro = model_to_dict(latest)['introduction']1983 data = {1984 'name': name,1985 'intro': intro1986 }1987 return Response(data)1988 else:1989 return Response('failed')1990@api_view(('POST', 'GET',))1991def getrecordId(request):1992 if request.method == 'POST':1993 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))1994 raw_dict_key = list(raw_dict.keys())[0]1995 id_dict = ast.literal_eval(raw_dict_key)1996 record_id = int(id_dict['record_id'].strip('"'))1997 # 按前台存储id查找1998 if record_id == '0':1999 results = []2000 updates = Detailsearch.objects.all()2001 for update in updates:2002 update_dict = model_to_dict(update)2003 results.append(update_dict)2004 if results:2005 print(results)2006 data = {2007 'record_id': '0', # 首次检索2008 'result': results2009 }2010 return Response(data)2011 else:2012 return Response('No suitable data!')2013 else:2014 pre_id = record_id2015 results = []2016 updates = Detailsearch.objects.filter(id__gt=pre_id)2017 for update in updates:2018 update_dict = model_to_dict(update)2019 results.append(update_dict)2020 if results is not None:2021 print(results)2022 data = {2023 'record_id': pre_id,2024 'result': results2025 }2026 return Response(data)2027 else:2028 return Response('No suitable data!')2029 if request.method == 'GET':2030 return Response('No method!')2031@api_view(('POST', 'GET',))2032def selectedfilterresult(request):2033 if request.method == 'POST':2034 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))2035 raw_dict_key = list(raw_dict.keys())[0]2036 target_dict = ast.literal_eval(raw_dict_key)2037 target_id = int(target_dict['target'].strip('"'))2038 # 按对应id查找2039 target_data = Detailsearch.objects.filter(id=target_id)2040 for data in target_data:2041 clean_data = model_to_dict(data)2042 return Response(clean_data)2043 if request.method == 'GET':2044 return Response('No method!')2045@api_view(('POST','GET',))2046def getcollection(request):2047 if request.method == 'POST':2048 serializer_context = {2049 'request': request,2050 }2051 base_router = 'http://127.0.0.1:8000/api/collection/'2052 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))2053 print(raw_dict)2054 raw_dict_key = list(raw_dict.keys())[0]2055 collection_dict = ast.literal_eval(raw_dict_key)2056 print(collection_dict)2057 # 收藏词表2058 collection = collection_dict['collection'].strip('"')2059 collections = []2060 if Collection.objects.filter(folder=collection):2061 data = Collection.objects.filter(folder=collection)2062 raw_d_dict = []2063 for d in data:2064 d_dict = model_to_dict(d)2065 print(d_dict)2066 raw_d_dict.append(d_dict)2067 set_only = []2068 set_only.append(raw_d_dict[0])2069 # drop reqeated2070 for item in raw_d_dict:2071 k = 02072 for iitem in set_only:2073 if item['title'] != iitem['title'] and item['author'] != iitem['author']:2074 k += 12075 else:2076 break2077 if k == len(set_only):2078 set_only.append(item)2079 for only in set_only:2080 print(only)2081 pkid = only['id']2082 collection_data = CollectionSerializer(data=only, context=serializer_context)2083 if collection_data.is_valid():2084 ordered_li = collection_data.validated_data2085 ordered_li['pk'] = pkid2086 ordered_li['url'] = base_router + str(pkid) + '/'2087 ordered_li = dict(ordered_li)2088 collections.append(ordered_li)2089 print(collections)2090 return Response(collections)2091 else:2092 return Response('failed')2093 if request.method == 'GET':2094 return Response('No method!')2095@api_view(('POST','GET',))2096def addcollection(request):2097 if request.method == 'POST':2098 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))2099 print(raw_dict)2100 raw_dict_key = list(raw_dict.keys())[0]2101 collection_dict = ast.literal_eval(raw_dict_key)2102 print(collection_dict)2103 # 接口数据2104 raw_collected_data = collection_dict['collect']2105 flag = collection_dict['flag'].strip('"')2106 folder = collection_dict['folder'].strip('"')2107 drop_quotation_data = re.sub('^"|"$', '', raw_collected_data)2108 collected_data = ast.literal_eval(drop_quotation_data.strip(']['))2109 # 收藏论文字段2110 title = collected_data['title']2111 author = collected_data['author']2112 info = collected_data['info']2113 date = collected_data['date']2114 if not Collection.objects.filter(title=title, author=author, info=info, date=date, flag=flag, folder=folder):2115 collect = Collection(title=title, author=author, info=info, date=date, flag=flag, folder=folder)2116 collect.save()2117 return Response('success')2118 else:2119 return Response('failed')2120 if request.method == 'GET':2121 return Response('No method!')2122@api_view(('DELETE','GET',))2123def deletecollection(request):2124 if request.method == 'DELETE':2125 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))2126 raw_dict_key = list(raw_dict.keys())[0]2127 collection_dict = ast.literal_eval(raw_dict_key)2128 delete_id = collection_dict['delid']2129 print(delete_id)2130 if not delete_id:2131 return Response('failed')2132 else:2133 get_object_or_404(Collection, pk=int(delete_id)).delete()2134 return Response('success')2135 if request.method == 'GET':2136 return Response('No method!')2137@api_view(('POST','GET',))2138def getcorpus(request):2139 if request.method == 'POST':2140 serializer_context = {2141 'request': request,2142 }2143 base_router = 'http://127.0.0.1:8000/api/corpus/'2144 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))2145 print(raw_dict)2146 raw_dict_key = list(raw_dict.keys())[0]2147 corpus_dict = ast.literal_eval(raw_dict_key)2148 print(corpus_dict)2149 # 收藏词表2150 corpus = corpus_dict['corpus'].strip('"')2151 corpuss = []2152 if Corpus.objects.filter(repository=corpus):2153 data = Corpus.objects.filter(repository=corpus)2154 raw_d_dict = []2155 for d in data:2156 d_dict = model_to_dict(d)2157 raw_d_dict.append(d_dict)2158 set_only = []2159 set_only.append(raw_d_dict[0])2160 # drop reqeated2161 for item in raw_d_dict:2162 k = 02163 for iitem in set_only:2164 if item['kws'] != iitem['kws']:2165 k += 12166 else:2167 break2168 if k == len(set_only):2169 set_only.append(item)2170 for only in set_only:2171 pkid = only['id']2172 corpus_data = CorpusSerializer(data=only, context=serializer_context)2173 if corpus_data.is_valid():2174 ordered_li = corpus_data.validated_data2175 ordered_li['pk'] = pkid2176 ordered_li['url'] = base_router + str(pkid) + '/'2177 ordered_li = dict(ordered_li)2178 corpuss.append(ordered_li)2179 print(corpuss)2180 return Response(corpuss)2181 else:2182 return Response('failed')2183 if request.method == 'GET':2184 return Response('No method!')2185# 添加词汇方式1. 详情页点击收藏2186@api_view(('POST','GET',))2187def appendcorpus(request):2188 if request.method == 'POST':2189 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))2190 print(raw_dict)2191 raw_dict_key = list(raw_dict.keys())[0]2192 corpus_dict = ast.literal_eval(raw_dict_key)2193 print(corpus_dict)2194 # 接口数据2195 raw_corpus_data = corpus_dict['corpus']2196 repo = corpus_dict['repository'].strip('"')2197 # 收藏词汇2198 kws = ast.literal_eval(raw_corpus_data.strip(']['))['name']2199 print(kws)2200 print(repo)2201 if not Corpus.objects.filter(kws=kws, repository=repo):2202 corp = Corpus(kws=kws, repository=repo)2203 corp.save()2204 return Response('success')2205 else:2206 return Response('failed')2207 if request.method == 'GET':2208 return Response('No method!')2209# 添加词汇方式2. 词表库内直接添加2210@api_view(('POST','GET',))2211def addcorpus(request):2212 if request.method == 'POST':2213 serializer_context = {2214 'request': request,2215 }2216 base_router = 'http://127.0.0.1:8000/api/corpus/'2217 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))2218 raw_dict_key = list(raw_dict.keys())[0]2219 corpus_dict = ast.literal_eval(raw_dict_key)2220 print(corpus_dict)2221 # 词汇名称2222 kws = corpus_dict['kws']2223 print(kws)2224 corpus = corpus_dict['corpus'].strip('"')2225 print(corpus)2226 corpuss = []2227 # 重复值判断2228 if Corpus.objects.filter(kws=kws):2229 return Response('failed')2230 else:2231 corp = Corpus(kws=kws, repository=corpus)2232 corp.save()2233 data = Corpus.objects.filter(kws=kws, repository=corpus)2234 print(data)2235 for d in data:2236 raw_dict = model_to_dict(d)2237 pkid = raw_dict['id']2238 corpus_data = CorpusSerializer(data=raw_dict, context=serializer_context)2239 if corpus_data.is_valid():2240 ordered_li = corpus_data.validated_data2241 ordered_li['pk'] = pkid2242 ordered_li['url'] = base_router + str(pkid) + '/'2243 ordered_li = dict(ordered_li)2244 corpuss.append(ordered_li)2245 print(ordered_li)2246 return Response(corpuss[0])2247 if request.method == 'GET':2248 return Response('No method!')2249@api_view(('DELETE','GET',))2250def deletecorpus(request):2251 if request.method == 'DELETE':2252 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))2253 raw_dict_key = list(raw_dict.keys())[0]2254 corpus_dict = ast.literal_eval(raw_dict_key)2255 delete_id = corpus_dict['delid']2256 print(delete_id)2257 if not delete_id:2258 return Response('failed')2259 else:2260 get_object_or_404(Corpus, pk=int(delete_id)).delete()2261 return Response('success')2262 if request.method == 'GET':2263 return Response('No method!')2264@api_view(('GET',))2265def getfolder(request):2266 if request.method == 'GET':2267 serializer_context = {2268 'request': request,2269 }2270 base_router = 'http://127.0.0.1:8000/api/folder/'2271 data = Folder.objects.all()2272 folders = []2273 if data:2274 raw_d_dict = []2275 for d in data:2276 d_dict = model_to_dict(d)2277 raw_d_dict.append(d_dict)2278 set_only = []2279 set_only.append(raw_d_dict[0])2280 # drop reqeated2281 for item in raw_d_dict:2282 k = 02283 for iitem in set_only:2284 if item['folder'] != iitem['folder']:2285 k += 12286 else:2287 break2288 if k == len(set_only):2289 set_only.append(item)2290 for only in set_only:2291 pkid = only['id']2292 folder_data = FolderSerializer(data=only, context=serializer_context)2293 if folder_data.is_valid():2294 ordered_li = folder_data.validated_data2295 ordered_li['pk'] = pkid2296 ordered_li['url'] = base_router + str(pkid) + '/'2297 ordered_li = dict(ordered_li)2298 folders.append(ordered_li)2299 print(folders)2300 return Response(folders)2301 else:2302 return Response('failed')2303@api_view(('POST','GET',))2304def addfolder(request):2305 if request.method == 'POST':2306 serializer_context = {2307 'request': request,2308 }2309 base_router = 'http://127.0.0.1:8000/api/folder/'2310 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))2311 raw_dict_key = list(raw_dict.keys())[0]2312 folder_dict = ast.literal_eval(raw_dict_key)2313 print(folder_dict)2314 # 收藏夹名称2315 fold_name = folder_dict['foldername']2316 print(fold_name)2317 folders = []2318 if not Folder.objects.filter(folder=fold_name): # 未找到重复值, 先存再取2319 folder = Folder(folder=fold_name)2320 folder.save()2321 data = Folder.objects.filter(folder=fold_name)2322 for d in data:2323 raw_dict = model_to_dict(d)2324 pkid = raw_dict['id']2325 corpus_data = FolderSerializer(data=raw_dict, context=serializer_context)2326 if corpus_data.is_valid():2327 ordered_li = corpus_data.validated_data2328 ordered_li['pk'] = pkid2329 ordered_li['url'] = base_router + str(pkid) + '/'2330 ordered_li = dict(ordered_li)2331 folders.append(ordered_li)2332 print(folders)2333 return Response(folders[0])2334 else:2335 return Response('failed')2336 if request.method == 'GET':2337 return Response('No method!')2338@api_view(('DELETE','GET',))2339def deletefolder(request):2340 if request.method == 'DELETE':2341 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))2342 raw_dict_key = list(raw_dict.keys())[0]2343 folder_dict = ast.literal_eval(raw_dict_key)2344 delete_id = folder_dict['delid']2345 print(delete_id)2346 if not delete_id:2347 return Response('failed')2348 else:2349 data = Folder.objects.filter(pk=int(delete_id))2350 d_dict = model_to_dict(data[0])2351 folder_name = d_dict['folder']2352 Collection.objects.filter(folder=folder_name).delete()2353 get_object_or_404(Folder, pk=int(delete_id)).delete()2354 return Response('success')2355 if request.method == 'GET':2356 return Response('No method!')2357@api_view(('GET',))2358def getrepository(request):2359 if request.method == 'GET':2360 serializer_context = {2361 'request': request,2362 }2363 base_router = 'http://127.0.0.1:8000/api/repository/'2364 data = Repository.objects.all()2365 repositorys = []2366 if data:2367 raw_d_dict = []2368 for d in data:2369 d_dict = model_to_dict(d)2370 raw_d_dict.append(d_dict)2371 set_only = []2372 set_only.append(raw_d_dict[0])2373 # drop reqeated2374 for item in raw_d_dict:2375 k = 02376 for iitem in set_only:2377 if item['repository'] != iitem['repository']:2378 k += 12379 else:2380 break2381 if k == len(set_only):2382 set_only.append(item)2383 for only in set_only:2384 pkid = only['id']2385 repository_data = RepositorySerializer(data=only, context=serializer_context)2386 if repository_data.is_valid():2387 ordered_li = repository_data.validated_data2388 ordered_li['pk'] = pkid2389 ordered_li['url'] = base_router + str(pkid) + '/'2390 ordered_li = dict(ordered_li)2391 repositorys.append(ordered_li)2392 print(repositorys)2393 return Response(repositorys)2394 else:2395 return Response('failed')2396@api_view(('POST','GET',))2397def addrepository(request):2398 if request.method == 'POST':2399 serializer_context = {2400 'request': request,2401 }2402 base_router = 'http://127.0.0.1:8000/api/repository/'2403 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))2404 raw_dict_key = list(raw_dict.keys())[0]2405 repository_dict = ast.literal_eval(raw_dict_key)2406 print(repository_dict)2407 # 词表库名称2408 repo_name = repository_dict['reponame']2409 print(repo_name)2410 repos = []2411 if not Repository.objects.filter(repository=repo_name):2412 repository = Repository(repository=repo_name)2413 repository.save()2414 data = Repository.objects.filter(repository=repo_name)2415 for d in data:2416 raw_dict = model_to_dict(d)2417 pkid = raw_dict['id']2418 corpus_data = RepositorySerializer(data=raw_dict, context=serializer_context)2419 if corpus_data.is_valid():2420 ordered_li = corpus_data.validated_data2421 ordered_li['pk'] = pkid2422 ordered_li['url'] = base_router + str(pkid) + '/'2423 ordered_li = dict(ordered_li)2424 repos.append(ordered_li)2425 print(repos)2426 return Response(repos[0])2427 else:2428 return Response('failed')2429 if request.method == 'GET':2430 return Response('No method!')2431@api_view(('DELETE','GET',))2432def deleterepository(request):2433 if request.method == 'DELETE':2434 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))2435 raw_dict_key = list(raw_dict.keys())[0]2436 repository_dict = ast.literal_eval(raw_dict_key)2437 delete_id = repository_dict['delid']2438 print(delete_id)2439 if not delete_id:2440 return Response('failed')2441 else:2442 data = Repository.objects.filter(pk=int(delete_id))2443 d_dict = model_to_dict(data[0])2444 repo_name = d_dict['repository']2445 Corpus.objects.filter(repository=repo_name).delete()2446 get_object_or_404(Repository, pk=int(delete_id)).delete()2447 return Response('success')2448 if request.method == 'GET':2449 return Response('No method!')2450@api_view(('GET',))2451def getfilerepo(request):2452 if request.method == 'GET':2453 serializer_context = {2454 'request': request,2455 }2456 base_router = 'http://127.0.0.1:8000/api/filerepo/'2457 data = Filerepo.objects.all()2458 filerepos = []2459 if data:2460 raw_d_dict = []2461 for d in data:2462 d_dict = model_to_dict(d)2463 raw_d_dict.append(d_dict)2464 set_only = []2465 set_only.append(raw_d_dict[0])2466 # drop reqeated2467 for item in raw_d_dict:2468 k = 02469 for iitem in set_only:2470 if item['name'] != iitem['name']:2471 k += 12472 else:2473 break2474 if k == len(set_only):2475 set_only.append(item)2476 print(set_only)2477 for only in set_only:2478 pkid = only['id']2479 filerepo_data = FilerepoSerializer(data=only, context=serializer_context)2480 if filerepo_data.is_valid():2481 ordered_li = filerepo_data.validated_data2482 ordered_li['pk'] = pkid2483 ordered_li['url'] = base_router + str(pkid) + '/'2484 ordered_li = dict(ordered_li)2485 filerepos.append(ordered_li)2486 print(filerepos)2487 return Response(filerepos)2488 else:2489 return Response('failed')2490@api_view(('DELETE','GET',))2491def deletefilerepo(request):2492 if request.method == 'DELETE':2493 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))2494 raw_dict_key = list(raw_dict.keys())[0]2495 filerepo_dict = ast.literal_eval(raw_dict_key)2496 delete_id = filerepo_dict['delid']2497 print(delete_id)2498 if not delete_id:2499 return Response('failed')2500 else:2501 data = Filerepo.objects.filter(pk=int(delete_id))2502 d_dict = model_to_dict(data[0])2503 subrepo_name = d_dict['name']2504 Detailsearch.objects.filter(name=subrepo_name).delete()2505 get_object_or_404(Filerepo, pk=int(delete_id)).delete()2506 return Response('success')2507 if request.method == 'GET':2508 return Response('No method!')2509@api_view(('GET', 'POST',))2510def getfile(request):2511 if request.method == 'POST':2512 serializer_context = {2513 'request': request,2514 }2515 base_router = 'http://127.0.0.1:8000/api/detailsearch/'2516 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))2517 raw_dict_key = list(raw_dict.keys())[0]2518 file_dict = ast.literal_eval(raw_dict_key)2519 subrepo_name = file_dict['name'].strip('"')2520 print(subrepo_name)2521 if Detailsearch.objects.filter(name=subrepo_name):2522 data = Detailsearch.objects.filter(name=subrepo_name)2523 files = []2524 raw_d_dict = []2525 for d in data:2526 d_dict = model_to_dict(d)2527 print(d_dict)2528 raw_d_dict.append(d_dict)2529 set_only = []2530 set_only.append(raw_d_dict[0])2531 # drop reqeated2532 for item in raw_d_dict:2533 k = 02534 for iitem in set_only:2535 if item['title'] != iitem['title'] and item['author'] != iitem['author']:2536 k += 12537 else:2538 break2539 if k == len(set_only):2540 set_only.append(item)2541 for only in set_only:2542 print(only)2543 pkid = only['id']2544 file_data = DetailsearchSerializer(data=only, context=serializer_context)2545 if file_data.is_valid():2546 ordered_li = file_data.validated_data2547 ordered_li['pk'] = pkid2548 ordered_li['url'] = base_router + str(pkid) + '/'2549 ordered_li = dict(ordered_li)2550 files.append(ordered_li)2551 print(files)2552 return Response(files)2553 else:2554 return Response('failed')2555 if request.method == 'GET':2556 return Response('No method!')2557@api_view(('GET', 'POST',))2558def saveproject(request):2559 if request.method == 'POST':2560 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))2561 raw_dict_key = list(raw_dict.keys())[0]2562 project_dict = ast.literal_eval(raw_dict_key)2563 print(project_dict)2564 name = project_dict['name']2565 date = project_dict['date']2566 type = project_dict['type']2567 source = project_dict['source']2568 description = project_dict['description']2569 method = project_dict['method']2570 corpus = project_dict['corpus']2571 subrepo = project_dict['subrepo']2572 introduction = project_dict['intro']2573 raw_extract = project_dict['extract']2574 raw_recommend = project_dict['recommend']2575 extract, recommend = [], []2576 extracts = ast.literal_eval(raw_extract.strip(']['))2577 if isinstance(extracts, tuple):2578 for extractors_dict in extracts:2579 extracted_words = extractors_dict['originkws']2580 extract.append(extracted_words)2581 else:2582 extracted_words = extracts['originkws']2583 extract.append(extracted_words)2584 if raw_recommend != '[]':2585 recommends = ast.literal_eval(raw_recommend.strip(']['))2586 if isinstance(recommends, tuple):2587 for recommends_dict in recommends:2588 recommend_words = recommends_dict['recommendkws']2589 recommend.append(recommend_words)2590 else:2591 recommend_words = recommends['recommendkws']2592 recommend.append(recommend_words)2593 else:2594 print('暂无推荐词')2595 # print(extract)2596 # print(recommend)2597 print(Project.objects.filter(project=name))2598 print(Projectinfo.objects.filter(project=name))2599 if not Projectinfo.objects.filter(project=name):2600 projectinfo = Projectinfo(project=name, date=date, type=type, source=source, description=description, method=method, extract=','.join(extract), recommend=','.join(recommend), corpus=corpus, subrepo=subrepo, introduction=introduction)2601 projectinfo.save()2602 return Response('success')2603 else:2604 return Response('failed')2605 if request.method == 'GET':2606 return Response('No method!')2607@api_view(('GET',))2608def getproject(request):2609 if request.method == 'GET':2610 serializer_context = {2611 'request': request,2612 }2613 base_router = 'http://127.0.0.1:8000/api/project/'2614 data = Project.objects.all()2615 print(data)2616 projects = []2617 if data:2618 raw_d_dict = []2619 for d in data:2620 d_dict = model_to_dict(d)2621 raw_d_dict.append(d_dict)2622 set_only = []2623 set_only.append(raw_d_dict[0])2624 # drop reqeated2625 for item in raw_d_dict:2626 k = 02627 for iitem in set_only:2628 if item['project'] != iitem['project']:2629 k += 12630 else:2631 break2632 if k == len(set_only):2633 set_only.append(item)2634 for only in set_only:2635 pkid = only['id']2636 project_data = ProjectSerializer(data=only, context=serializer_context)2637 if project_data.is_valid():2638 ordered_li = project_data.validated_data2639 ordered_li['pk'] = pkid2640 ordered_li['url'] = base_router + str(pkid) + '/'2641 ordered_li = dict(ordered_li)2642 projects.append(ordered_li)2643 print(projects)2644 return Response(projects)2645 else:2646 return Response('failed')2647@api_view(('GET', 'POST',))2648def addproject(request):2649 if request.method == 'POST':2650 serializer_context = {2651 'request': request,2652 }2653 base_router = 'http://127.0.0.1:8000/api/project/'2654 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))2655 raw_dict_key = list(raw_dict.keys())[0]2656 project_dict = ast.literal_eval(raw_dict_key)2657 print(project_dict)2658 # 收藏夹名称2659 project_name = project_dict['name']2660 print(project_name)2661 projects = []2662 if not Project.objects.filter(project=project_name): # 未找到重复值, 先存再取2663 project = Project(project=project_name)2664 project.save()2665 data = Project.objects.filter(project=project_name)2666 for d in data:2667 raw_dict = model_to_dict(d)2668 pkid = raw_dict['id']2669 project_data = ProjectSerializer(data=raw_dict, context=serializer_context)2670 if project_data.is_valid():2671 ordered_li = project_data.validated_data2672 ordered_li['pk'] = pkid2673 ordered_li['url'] = base_router + str(pkid) + '/'2674 ordered_li = dict(ordered_li)2675 projects.append(ordered_li)2676 print(projects)2677 return Response(projects[0])2678 else:2679 return Response('failed')2680 if request.method == 'GET':2681 return Response('No method!')2682@api_view(('DELETE','GET',))2683def deleteproject(request):2684 if request.method == 'DELETE':2685 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))2686 raw_dict_key = list(raw_dict.keys())[0]2687 project_dict = ast.literal_eval(raw_dict_key)2688 delete_id = project_dict['delid']2689 print(delete_id)2690 if not delete_id:2691 return Response('failed')2692 else:2693 data = Project.objects.filter(pk=int(delete_id))2694 d_dict = model_to_dict(data[0])2695 project_name = d_dict['project']2696 Projectinfo.objects.filter(project=project_name).delete()2697 get_object_or_404(Project, pk=int(delete_id)).delete()2698 return Response('success')2699 if request.method == 'GET':2700 return Response('No method!')2701@api_view(('GET',))2702def getpending(request):2703 if request.method == 'GET':2704 serializer_context = {2705 'request': request,2706 }2707 base_router = 'http://127.0.0.1:8000/api/pending/'2708 data = Pending.objects.all()2709 print(data)2710 pendings = []2711 if data:2712 raw_d_dict = []2713 for d in data:2714 d_dict = model_to_dict(d)2715 raw_d_dict.append(d_dict)2716 set_only = []2717 set_only.append(raw_d_dict[0])2718 # drop reqeated2719 for item in raw_d_dict:2720 k = 02721 for iitem in set_only:2722 if item['project'] != iitem['project']:2723 k += 12724 else:2725 break2726 if k == len(set_only):2727 set_only.append(item)2728 for only in set_only:2729 pkid = only['id']2730 if only['recommend'] == '':2731 temp = []2732 only['recommend'] = str(temp.append('暂无'))2733 pending_data = PendingSerializer(data=only, context=serializer_context)2734 if pending_data.is_valid():2735 ordered_li = pending_data.validated_data2736 ordered_li['pk'] = pkid2737 ordered_li['url'] = base_router + str(pkid) + '/'2738 ordered_li = dict(ordered_li)2739 pendings.append(ordered_li)2740 print(pendings)2741 return Response(pendings)2742 else:2743 return Response('failed')2744@api_view(('DELETE','GET',))2745def deletepending(request):2746 if request.method == 'DELETE':2747 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))2748 raw_dict_key = list(raw_dict.keys())[0]2749 pending_dict = ast.literal_eval(raw_dict_key)2750 delete_id = pending_dict['delid']2751 print(delete_id)2752 if not delete_id:2753 return Response('failed')2754 else:2755 get_object_or_404(Pending, pk=int(delete_id)).delete()2756 return Response('success')2757 if request.method == 'GET':2758 return Response('No method!')2759@api_view(('GET', 'POST',))2760def getprojectinfo(request):2761 if request.method == 'POST':2762 serializer_context = {2763 'request': request,2764 }2765 base_router = 'http://127.0.0.1:8000/api/projectinfo/'2766 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))2767 raw_dict_key = list(raw_dict.keys())[0]2768 projectinfo_dict = ast.literal_eval(raw_dict_key)2769 project_name = projectinfo_dict['name'].strip('"')2770 print(project_name)2771 if Projectinfo.objects.filter(project=project_name):2772 data = Projectinfo.objects.filter(project=project_name)2773 infos = []2774 raw_d_dict = []2775 for d in data:2776 d_dict = model_to_dict(d)2777 raw_d_dict.append(d_dict)2778 set_only = []2779 set_only.append(raw_d_dict[0])2780 # drop reqeated2781 for item in raw_d_dict:2782 k = 02783 for iitem in set_only:2784 if item['project'] != iitem['project'] and item['date'] != iitem['date']:2785 k += 12786 else:2787 break2788 if k == len(set_only):2789 set_only.append(item)2790 for only in set_only:2791 pkid = only['id']2792 if only['recommend'] == '':2793 temp = []2794 only['recommend'] = str(temp.append('暂无'))2795 info_data = ProjectinfoSerializer(data=only, context=serializer_context)2796 if info_data.is_valid():2797 ordered_li = info_data.validated_data2798 ordered_li['pk'] = pkid2799 ordered_li['url'] = base_router + str(pkid) + '/'2800 ordered_li = dict(ordered_li)2801 ordered_li['extract'] = re.sub('\'', '', ordered_li['extract'].strip(']['))2802 ordered_li['recommend'] = re.sub('\'', '', ordered_li['recommend'].strip(']['))2803 infos.append(ordered_li)2804 else:2805 print('暂无推荐词')2806 print(infos[0])2807 return Response(infos[0])2808 else:2809 return Response('failed')2810 if request.method == 'GET':2811 return Response('No method!')2812@api_view(('GET', 'POST',))2813def savepersonal(request):2814 if request.method == 'POST':2815 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))2816 raw_dict_key = list(raw_dict.keys())[0]2817 print(raw_dict_key)2818 personal_dict = ast.literal_eval(raw_dict_key)2819 print(personal_dict)2820 name = personal_dict['name']2821 gender = personal_dict['gender']2822 age = personal_dict['age']2823 email = personal_dict['email']2824 job = personal_dict['job']2825 place = personal_dict['place']2826 post = personal_dict['post']2827 sign = personal_dict['sign']2828 # 调用命令行2829 """2830 order = 'nohup python3.7 /Users/felix_zhao/Desktop/sourcetree_file/SSRP-Dev/IData/IDataSearch/backdoor/recommend/main.py >nohup.out 2>&1 &'2831 if post == 'true':2832 print('开启推送入口')2833 import os2834 os.system(order)2835 2836 else:2837 import os2838 print('关闭推送入口')2839 from utils import NohupProcess2840 output = NohupProcess().execCmd(order)2841 pid = re.sub('[1]', '', output)2842 kill = 'kill -' + pid2843 os.system(kill)2844 """2845 if not Personal.objects.filter(name=name, gender=gender, age=age, email=email, job=job, place=place, post=post, sign=sign):2846 personalinfo = Personal(name=name, gender=gender, age=age, email=email, job=job, place=place, post=post, sign=sign)2847 personalinfo.save()2848 return Response('success')2849 else:2850 return Response('failed')2851 if request.method == 'GET':2852 return Response('No method!')2853@api_view(('GET',))2854def getpersonal(request):2855 if request.method == 'GET':2856 serializer_context = {2857 'request': request,2858 }2859 base_router = 'http://127.0.0.1:8000/api/personal/'2860 data = Personal.objects.last()2861 print(data)2862 personals = []2863 if data:2864 raw_d_dict = []2865 d_dict = model_to_dict(data)2866 raw_d_dict.append(d_dict)2867 set_only = []2868 set_only.append(raw_d_dict[0])2869 # drop reqeated2870 for item in raw_d_dict:2871 k = 02872 for iitem in set_only:2873 if item['name'] != iitem['name'] and item['gender'] != iitem['gender'] and item['age'] != iitem['age'] and item['email'] != iitem['email']:2874 k += 12875 else:2876 break2877 if k == len(set_only):2878 set_only.append(item)2879 for only in set_only:2880 pkid = only['id']2881 personal_data = PersonalSerializer(data=only, context=serializer_context)2882 if personal_data.is_valid():2883 ordered_li = personal_data.validated_data2884 ordered_li['pk'] = pkid2885 ordered_li['url'] = base_router + str(pkid) + '/'2886 ordered_li = dict(ordered_li)2887 personals.append(ordered_li)2888 print(personals)2889 return Response(personals[0])2890 else:2891 return Response('failed')2892@api_view(('GET',))2893def getrecommend(request):2894 if request.method == 'GET':2895 data = Personal.objects.last()2896 print(data)2897 if data:2898 d_dict = model_to_dict(data)2899 region = d_dict['job']2900 print(region)2901 else:2902 print('还未填写个人信息!')2903 region = '氨基酸'2904 from .recommend.SSRP_recommend_data import GetRecommendResult2905 get = GetRecommendResult()2906 daily_recommend = get.get_daily_recommend(region)2907 print(daily_recommend)2908 return Response(daily_recommend)2909@api_view(('POST','GET',))2910def titleprediction(request):2911 if request.method == 'POST':2912 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))2913 print(raw_dict)2914 raw_dict_key = list(raw_dict.keys())[0]2915 title_dict = ast.literal_eval(raw_dict_key)2916 print(title_dict)2917 title = title_dict['titles'].split()2918 print(title)2919 data = {2920 'titles': title2921 }2922 api = 'https://innovaapi.aminer.cn/tools/v1/predict/nsfc?'2923 try:2924 response = requests.post(url=api, data=json.dumps(data), timeout=10)2925 res = ast.literal_eval(response.text)['data']2926 print(res)2927 levels = res.keys()2928 print(levels)2929 tableData = []2930 for k in range(1, (len(levels)+1)):2931 for v in list(res.values())[k-1]:2932 v['group'] = 'level' + str(k)2933 v.pop('code')2934 tableData.append(v)2935 print(tableData)2936 return Response(tableData)2937 except Exception as e:2938 print(e)2939 return Response('failed')2940 if request.method == 'GET':2941 return Response('No method!')2942@api_view(('POST','GET',))2943def aititleprediction(request):2944 if request.method == 'POST':2945 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))2946 print(raw_dict)2947 raw_dict_key = list(raw_dict.keys())[0]2948 title_dict = ast.literal_eval(raw_dict_key)2949 print(title_dict)2950 words = title_dict['words']2951 if len(words) > 1:2952 words = words.split(',')2953 else:2954 words = words.split()2955 print(words)2956 data = {2957 'words': words2958 }2959 api = 'https://innovaapi.aminer.cn/tools/v1/predict/nsfc/ai?'2960 try:2961 response = requests.post(url=api, data=json.dumps(data), timeout=10)2962 res = ast.literal_eval(response.text)['data']2963 res.pop('tree')2964 print(res)2965 levels = list(res.keys())2966 print(levels)2967 tableData = []2968 for k in range(1, (len(levels)+1)):2969 for v in list(res.values())[k-1]:2970 v.pop('name')2971 v['name'] = v.pop('name_zh')2972 v['group'] = 'level' + str(k)2973 tableData.append(v)2974 print(tableData)2975 return Response(tableData)2976 except Exception as e:2977 print(e)2978 return Response('failed')2979 if request.method == 'GET':2980 return Response('No method!')2981@api_view(('POST','GET',))2982def genderprediction(request):2983 if request.method == 'POST':2984 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))2985 print(raw_dict)2986 raw_dict_key = list(raw_dict.keys())[0]2987 gender_dict = ast.literal_eval(raw_dict_key)2988 print(gender_dict)2989 name = gender_dict['name']2990 org = gender_dict['org']2991 image_url = gender_dict['image_url']2992 print(name)2993 print(org)2994 # api请求内容发生变化2995 # api = 'https://innovaapi.aminer.cn/tools/v1/predict/gender?name=' + name + '&org=' + org + '&image_url=' + image_url2996 # print(api)2997 # try:2998 # response = requests.get(url=api, timeout=50)2999 # res = ast.literal_eval(response.text)['data']3000 # print(response.text)3001 try:3002 # 选择导入脚本执行预测3003 from .prediction_api.src.gender import Gender3004 g = Gender()3005 res = g.predict(name=name, org=org, image_url=image_url)3006 print(res)3007 data = {}3008 if res['male'] > res['female']:3009 data['result'] = '男'3010 elif res['male'] < res['female']:3011 data['result'] = '女'3012 else:3013 data['result'] = '性别不明'3014 res.pop('male')3015 res.pop('female')3016 values = list(res.values())3017 genders = []3018 datas = []3019 for v in values:3020 if v['male'] > v['female']:3021 genders.append('男')3022 elif v['male'] < v['female']:3023 genders.append('女')3024 else:3025 genders.append('男女都可能')3026 print(genders)3027 data['name'] = genders[0]3028 data['search'] = genders[1]3029 data['face'] = genders[2]3030 print(data)3031 datas.append(data)3032 return Response(datas)3033 except Exception as e:3034 print(e)3035 return Response('failed')3036 if request.method == 'GET':3037 return Response('No method!')3038@api_view(('POST','GET',))3039def identityprediction(request):3040 if request.method == 'POST':3041 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))3042 print(raw_dict)3043 raw_dict_key = list(raw_dict.keys())[0]3044 identity_dict = ast.literal_eval(raw_dict_key)3045 print(identity_dict)3046 pc = identity_dict['pc']3047 cn = identity_dict['cn']3048 hi = identity_dict['hi']3049 gi = identity_dict['gi']3050 year_range = identity_dict['year_range']3051 print(pc)3052 print(cn)3053 print(hi)3054 print(gi)3055 print(year_range)3056 api = 'https://innovaapi.aminer.cn/tools/v1/predict/identity?' + 'pc=' + pc + '&cn=' + cn + '&hi=' + hi + '&gi=' + gi + '&year_range=' + year_range3057 try:3058 response = requests.get(url=api, timeout=10)3059 res = ast.literal_eval(response.text)['data']3060 print(res)3061 datas = []3062 data = {}3063 if res['label'] == 'teacher':3064 data['label'] = '老师'3065 else:3066 data['label'] = '学生'3067 if res['degree'] == 'undergraduate':3068 data['degree'] = '本科'3069 elif res['degree'] == 'master':3070 data['degree'] = '硕士'3071 else:3072 data['degree'] = '博士'3073 data['p'] = res['p']3074 datas.append(data)3075 return Response(datas)3076 except Exception as e:3077 print(e)3078 return Response('failed')3079 if request.method == 'GET':3080 return Response('No method!')3081@api_view(('POST','GET',))3082def hoppingprediction(request):3083 if request.method == 'POST':3084 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))3085 print(raw_dict)3086 raw_dict_key = list(raw_dict.keys())[0]3087 hopping_dict = ast.literal_eval(raw_dict_key)3088 print(hopping_dict)3089 ntop = hopping_dict['ntop']3090 org_name = hopping_dict['org_name']3091 if re.search(',', org_name):3092 org_names = org_name.split(',')3093 print(org_names)3094 else:3095 org_names = []3096 org_names.append(org_name)3097 print(org_names)3098 # api请求内容发生变化3099 # api = 'https://innovaapi.aminer.cn/tools/v1/predict/career?' + 'per_name=' + per_name + '&org_name=' + org_name3100 # try:3101 # response = requests.get(url=api, timeout=10, verify=False)3102 # res = ast.literal_eval(response.text)['data']3103 # print(response.text)3104 try:3105 # 调用脚本执行预测3106 from .prediction_api.src.jobhopping import JobHopping3107 j = JobHopping()3108 res = j.predict(org_names, int(ntop))3109 print(res)3110 return Response(res)3111 except Exception as e:3112 print(e)3113 return Response('failed')3114 if request.method == 'GET':3115 return Response('No method!')3116@api_view(('POST','GET',))3117def scholarrecommend(request):3118 if request.method == 'POST':3119 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))3120 print(raw_dict)3121 raw_dict_key = list(raw_dict.keys())[0]3122 scholar_dict = ast.literal_eval(raw_dict_key)3123 print(scholar_dict)3124 text = scholar_dict['text']3125 num = scholar_dict['num']3126 data = {3127 'text': text,3128 'num': int(num)3129 }3130 api = 'https://innovaapi.aminer.cn/tools/v1/predict/experts?'3131 try:3132 from .data.user_agent import USER_AGENT3133 import random3134 headers = {3135 'User-Agent': random.choice(USER_AGENT)3136 }3137 response = requests.post(url=api, data=json.dumps(data), headers=headers, timeout=10)3138 res = ast.literal_eval(response.text)['data']3139 print(res)3140 return Response(res)3141 except Exception as e:3142 print(e)3143 return Response('failed')3144 if request.method == 'GET':3145 return Response('No method!')3146@api_view(('POST','GET',))3147def paperprediction(request):3148 if request.method == 'POST':3149 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))3150 print(raw_dict)3151 raw_dict_key = list(raw_dict.keys())[0]3152 paper_dict = ast.literal_eval(raw_dict_key)3153 print(paper_dict)3154 pid = paper_dict['pid']3155 api = 'https://innovaapi.aminer.cn/tools/v1/predict/nsfc/person?' + 'pid=' + pid3156 try:3157 from .data.user_agent import USER_AGENT3158 import random3159 headers = {3160 'User-Agent': random.choice(USER_AGENT)3161 }3162 response = requests.get(url=api, headers=headers, timeout=10)3163 print(response.text)3164 res = ast.literal_eval(response.text)['data']3165 flag = list(res.keys())[0]3166 tableData = []3167 if res[flag] == 'No pubs':3168 return Response('No result')3169 else:3170 res.pop('tree')3171 print(res)3172 levels = list(res.keys())3173 print(levels)3174 for k in range(1, (len(levels) + 1)):3175 for v in list(res.values())[k - 1]:3176 v.pop('name')3177 v['name'] = v.pop('name_zh')3178 v['group'] = 'level' + str(k)3179 tableData.append(v)3180 print(tableData)3181 return Response(tableData)3182 except Exception as e:3183 print(e)3184 return Response('failed')3185 if request.method == 'GET':3186 return Response('No method!')3187@api_view(('POST','GET',))3188def frequencyanalyze(request):3189 if request.method == 'POST':3190 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))3191 raw_dict_key = list(raw_dict.keys())[0]3192 frequency_dict = ast.literal_eval(raw_dict_key)3193 source = frequency_dict['source']3194 print(source)3195 if Detailsearch.objects.filter(name=source):3196 data = Detailsearch.objects.filter(name=source)3197 kwss = []3198 titles = []3199 abstracts = []3200 for d in data:3201 d_dict = model_to_dict(d)3202 if d_dict['kws'] == '暂无':3203 pass3204 else:3205 if re.search(';', d_dict['kws']):3206 new_kws = re.sub('"', '', d_dict['kws'])3207 new_kws1 = re.sub('; |;', ', ', new_kws)3208 kwss.append(new_kws1)3209 else:3210 new_kws = re.sub(';', ', ', d_dict['kws'])3211 kwss.append(new_kws)3212 titles.append(d_dict['title'])3213 abstracts.append(d_dict['abstract'])3214 kws_data = []3215 for kws in kwss:3216 for kw in kws.split():3217 kws_data.append(kw.strip(','))3218 from .analysis.SSRP_cloud_analysis import CloudAnalysis3219 cloud = CloudAnalysis()3220 frequent_kws = cloud.keyword_count(kws_data)3221 frequent_title = cloud.title_or_abstract_count(titles)3222 frequent_abstract = cloud.title_or_abstract_count(abstracts)3223 print(frequent_kws)3224 print(frequent_title)3225 print(frequent_abstract)3226 new_data = {3227 'kws': frequent_kws,3228 'title': frequent_title,3229 'abstract': frequent_abstract3230 }3231 return Response(new_data)3232 else:3233 return Response('failed')3234 if request.method == 'GET':3235 return Response('No method!')3236@api_view(('POST','GET',))3237def volumeanalyze(request):3238 if request.method == 'POST':3239 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))3240 raw_dict_key = list(raw_dict.keys())[0]3241 volume_dict = ast.literal_eval(raw_dict_key)3242 start_date = volume_dict['start']3243 end_date = volume_dict['end']3244 source = volume_dict['source']3245 print(start_date)3246 print(end_date)3247 print(source)3248 if Detailsearch.objects.filter(name=source, source='期刊', date__range=(start_date, end_date)):3249 data = Detailsearch.objects.filter(name=source, source='期刊', date__range=(start_date, end_date))3250 infos = []3251 for d in data:3252 d_dict = model_to_dict(d)3253 infos.append(d_dict['info'])3254 from .analysis.SSRP_cloud_analysis import CloudAnalysis3255 cloud = CloudAnalysis()3256 frequent_infos = cloud.keyword_count(infos)3257 print(frequent_infos)3258 return Response(frequent_infos)3259 else:3260 return Response('failed')3261 if request.method == 'GET':3262 return Response('No method!')3263@api_view(('POST','GET',))3264def relationanalyze(request):3265 if request.method == 'POST':3266 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))3267 raw_dict_key = list(raw_dict.keys())[0]3268 relation_dict = ast.literal_eval(raw_dict_key)3269 source = relation_dict['source']3270 print(source)3271 if Detailsearch.objects.filter(name=source):3272 data = Detailsearch.objects.filter(name=source)3273 kwss = []3274 for d in data:3275 d_dict = model_to_dict(d)3276 if d_dict['kws'] != '暂无':3277 if re.search(';', d_dict['kws']):3278 new_kws = re.sub('"', '', d_dict['kws'])3279 new_kws1 = re.sub('; |;', ',', new_kws)3280 kws = new_kws1.split(',')3281 kwss.append(kws)3282 else:3283 new_kws = re.sub(';', ',', d_dict['kws'])3284 kws = new_kws.split(',')3285 kwss.append(kws)3286 else:3287 continue3288 from .analysis.SSRP_network_analysis import WordVector3289 vec = WordVector()3290 network_data = {3291 'nodes': vec.build_network_scipy_data()[0],3292 'edges': vec.build_network_scipy_data()[1]3293 }3294 return Response(network_data)3295 else:3296 return Response('failed')3297 if request.method == 'GET':3298 return Response('No method!')3299@api_view(('POST','GET',))3300def classifyanalyze(request):3301 if request.method == 'POST':3302 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))3303 raw_dict_key = list(raw_dict.keys())[0]3304 classify_dict = ast.literal_eval(raw_dict_key)3305 source = classify_dict['source']3306 print(source)3307 theme = int(classify_dict['theme'])3308 themeword = int(classify_dict['themeword'])3309 if Detailsearch.objects.filter(name=source):3310 data = Detailsearch.objects.filter(name=source)3311 abs = []3312 for d in data:3313 d_dict = model_to_dict(d)3314 if d_dict['abstract']:3315 abs.append(d_dict['abstract'])3316 print(abs)3317 from .analysis.SSRP_classify_analysis import ClassifyAnalysis3318 classify = ClassifyAnalysis()3319 lda_data = classify.simple_LDA_analysis(abs, theme, themeword)3320 return Response(lda_data)3321 else:3322 return Response('failed')3323 if request.method == 'GET':3324 return Response('No method!')3325@api_view(('POST','GET',))3326def cooperationanalyze(request):3327 if request.method == 'POST':3328 raw_dict = dict(zip(request.POST.keys(), request.POST.values()))3329 raw_dict_key = list(raw_dict.keys())[0]3330 cooperation_dict = ast.literal_eval(raw_dict_key)3331 source = cooperation_dict['source']3332 print(source)3333 if Detailsearch.objects.filter(name=source):3334 data = Detailsearch.objects.filter(name=source)3335 two_aus = []3336 more_aus = []3337 for d in data:3338 d_dict = model_to_dict(d)3339 if not re.search('[a-zA-Z]', d_dict['author']):3340 if len(d_dict['author'].split()) == 2:3341 two_aus.append(d_dict['author'])3342 elif len(d_dict['author'].split()) > 2:3343 more_aus.append(d_dict['author'])3344 else:3345 continue3346 else:3347 continue3348 print(len(two_aus))3349 print(len(more_aus))3350 from .analysis.SSRP_cooperation_analysis import CooperateAnalysis3351 cooper = CooperateAnalysis()3352 id_relation = cooper.build_au_relation(two_aus[:5], more_aus[:3])3353 print(id_relation)3354 cooper_data = {3355 'nodes': id_relation[0],3356 'edges': id_relation[1]3357 }3358 return Response(cooper_data)3359 else:3360 return Response('failed')3361 if request.method == 'GET':3362 return Response('No method!')3363class MessageViewSet(viewsets.ModelViewSet):3364 """3365 API endpoint that allows messages to be viewed or edited.3366 """3367 queryset = Message.objects.all()3368 serializer_class = MessageSerializer3369class UploadcorpusViewSet(viewsets.ModelViewSet):3370 queryset = Uploadcorpus.objects.all()3371 serializer_class = UploadcorpusSerializer3372class ExtractorViewset(viewsets.ModelViewSet):3373 queryset = Extractor.objects.all()3374 serializer_class = ExtractorSerializer3375class RecommedViewset(viewsets.ModelViewSet):3376 queryset = Recommend.objects.all()3377 serializer_class = RecommendSerializer3378class SimplesearchViewset(viewsets.ModelViewSet):3379 queryset = Simplesearch.objects.all()3380 serializer_class = SimplesearchSerializer3381class DetailsearchViewset(viewsets.ModelViewSet):3382 queryset = Detailsearch.objects.all()3383 serializer_class = DetailsearchSerializer3384class TempViewset(viewsets.ModelViewSet):3385 queryset = Temp.objects.all()3386 serializer_class = TempSerializer3387class FolderViewset(viewsets.ModelViewSet):3388 queryset = Folder.objects.all()3389 serializer_class = FolderSerializer3390class CollectionViewset(viewsets.ModelViewSet):3391 queryset = Collection.objects.all()3392 serializer_class = CollectionSerializer3393class RepositoryViewset(viewsets.ModelViewSet):3394 queryset = Repository.objects.all()3395 serializer_class = RepositorySerializer3396class CorpusViewset(viewsets.ModelViewSet):3397 queryset = Corpus.objects.all()3398 serializer_class = CorpusSerializer3399class FilerepoViewset(viewsets.ModelViewSet):3400 queryset = Filerepo.objects.all()3401 serializer_class = FilerepoSerializer3402class PendingViewset(viewsets.ModelViewSet):3403 queryset = Pending.objects.all()3404 serializer_class = PendingSerializer3405class ProjectViewset(viewsets.ModelViewSet):3406 queryset = Project.objects.all()3407 serializer_class = ProjectSerializer3408class ProjectinfoViewset(viewsets.ModelViewSet):3409 queryset = Projectinfo.objects.all()3410 serializer_class = ProjectinfoSerializer3411class PersonalViewset(viewsets.ModelViewSet):3412 queryset = Personal.objects.all()...

Full Screen

Full Screen

main.py

Source:main.py Github

copy

Full Screen

1class Test:2 def __init__(self):3 self.__get_only = "get_only"4 self.__set_only = "set_only"5 self.__get_and_set = "get_and_set"6 self.another_get_and_set = "another_get_and_set"7 8 @property9 def get_only(self):10 return self.__get_only11 @property12 def get_and_set(self):13 return self.__get_and_set14 @get_and_set.setter15 def get_and_set(self, get_and_set):16 self.__get_and_set = get_and_set17if __name__ == "__main__":18 test = Test()19 try:20 _ = test.set_only21 print("set_only CAN be referenced")22 except AttributeError:23 print("set_only CAN NOT be referenced")24 try:25 test.set_only = "set_only changed"26 print("set_only CAN set the value")27 except AttributeError:28 print("set_only CAN NOT set the value")29 try:30 _ = test.get_only31 print("get_only CAN be referenced")32 except AttributeError:33 print("get_only CAN NOT be referenced")34 try:35 test.get_only = "get_only changed"36 print("get_only CAN set the value")37 except AttributeError:38 print("get_only CAN NOT set the value")39 try:40 _ = test.get_and_set41 print("get_and_set CAN be referenced")42 except AttributeError:43 print("get_and_set CAN NOT be referenced")44 try:45 test.get_and_set = "get_and_set changed"46 print("get_and_set CAN set the value")47 except AttributeError:48 print("get_and_set CAN NOT set the value")49 try:50 _ = test.another_get_and_set51 print("another_get_and_set CAN be referenced")52 except AttributeError:53 print("another_get_and_set CAN NOT be referenced")54 try:55 test.another_get_and_set = "another_get_and_set changed"56 print("another_get_and_set CAN set the value")57 except AttributeError:...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run autotest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful