How to use keyword_line method in SeleniumLibrary

Best Python code snippet using SeleniumLibrary

io.py

Source:io.py Github

copy

Full Screen

1# -*- coding: utf-8 -*-2from __future__ import print_function, absolute_import3import os.path, sys4import numpy as np5# PHYSICAL CONSTANTS UNITS6KCAL_TO_AU = 627.509541 # UNIT CONVERSION7# Radii used to determine connectivity in symmetry corrections8# Covalent radii taken from Cambridge Structural Database9RADII = {'H': 0.32, 'He': 0.93, 'Li': 1.23, 'Be': 0.90, 'B': 0.82, 'C': 0.77, 'N': 0.75, 'O': 0.73, 'F': 0.72,10 'Ne': 0.71, 'Na': 1.54, 'Mg': 1.36, 'Al': 1.18, 'Si': 1.11, 'P': 1.06, 'S': 1.02, 'Cl': 0.99, 'Ar': 0.98,11 'K': 2.03, 'Ca': 1.74, 'Sc': 1.44, 'Ti': 1.32, 'V': 1.22, 'Cr': 1.18, 'Mn': 1.17, 'Fe': 1.17, 'Co': 1.16,12 'Ni': 1.15, 'Cu': 1.17, 'Zn': 1.25, 'Ga': 1.26, 'Ge': 1.22, 'As': 1.20, 'Se': 1.16, 'Br': 1.14, 'Kr': 1.12,13 'Rb': 2.16, 'Sr': 1.91, 'Y': 1.62, 'Zr': 1.45, 'Nb': 1.34, 'Mo': 1.30, 'Tc': 1.27, 'Ru': 1.25, 'Rh': 1.25,14 'Pd': 1.28, 'Ag': 1.34, 'Cd': 1.48, 'In': 1.44, 'Sn': 1.41, 'Sb': 1.40, 'Te': 1.36, 'I': 1.33, 'Xe': 1.31,15 'Cs': 2.35, 'Ba': 1.98, 'La': 1.69, 'Lu': 1.60, 'Hf': 1.44, 'Ta': 1.34, 'W': 1.30, 'Re': 1.28, 'Os': 1.26,16 'Ir': 1.27, 'Pt': 1.30, 'Au': 1.34, 'Hg': 1.49, 'Tl': 1.48, 'Pb': 1.47, 'Bi': 1.46, 'X': 0}17# Bondi van der Waals radii for all atoms from: Bondi, A. J. Phys. Chem. 1964, 68, 441-452,18# except hydrogen, which is taken from Rowland, R. S.; Taylor, R. J. Phys. Chem. 1996, 100, 7384-7391.19# Radii unavailable in either of these publications are set to 2 Angstrom20# (Unfinished)21BONDI = {'H': 1.09, 'He': 1.40, 'Li': 1.82, 'Be': 2.00, 'B': 2.00, 'C': 1.70, 'N': 1.55, 'O': 1.52, 'F': 1.47,22 'Ne': 1.54}23# Some useful arrays24periodictable = ["", "H", "He", "Li", "Be", "B", "C", "N", "O", "F", "Ne", "Na", "Mg", "Al", "Si",25 "P", "S", "Cl", "Ar", "K", "Ca", "Sc", "Ti", "V", "Cr", "Mn", "Fe", "Co", "Ni", "Cu", "Zn",26 "Ga", "Ge", "As", "Se", "Br", "Kr", "Rb", "Sr", "Y", "Zr", "Nb", "Mo", "Tc", "Ru", "Rh", "Pd",27 "Ag", "Cd", "In", "Sn", "Sb", "Te", "I", "Xe", "Cs", "Ba", "La", "Ce", "Pr", "Nd", "Pm", "Sm",28 "Eu", "Gd", "Tb", "Dy", "Ho", "Er", "Tm", "Yb", "Lu", "Hf", "Ta", "W", "Re", "Os", "Ir", "Pt",29 "Au", "Hg", "Tl", "Pb", "Bi", "Po", "At", "Rn", "Fr", "Ra", "Ac", "Th", "Pa", "U", "Np", "Pu",30 "Am", "Cm", "Bk", "Cf", "Es", "Fm", "Md", "No", "Lr", "Rf", "Db", "Sg", "Bh", "Hs", "Mt", "Ds",31 "Rg", "Uub", "Uut", "Uuq", "Uup", "Uuh", "Uus", "Uuo"]32def element_id(massno, num=False):33 """34 Get element symbol from mass number.35 Used in parsing output files to determine elements present in file.36 Parameter:37 massno (int): mass of element.38 Returns:39 str: element symbol, or 'XX' if not found in periodic table.40 """41 try:42 if num:43 return periodictable.index(massno)44 return periodictable[massno]45 except IndexError:46 return "XX"47class xyz_out:48 """49 Enables output of optimized coordinates to a single xyz-formatted file.50 Writes Cartesian coordinates of parsed chemical input.51 Attributes:52 xyz (file object): path in current working directory to write Cartesian coordinates.53 """54 def __init__(self, filein, suffix, append):55 self.xyz = open('{}_{}.{}'.format(filein, append, suffix), 'w')56 def write_text(self, message):57 self.xyz.write(message + "\n")58 def write_coords(self, atoms, coords):59 for n, carts in enumerate(coords):60 self.xyz.write('{:>1}'.format(atoms[n]))61 for cart in carts:62 self.xyz.write('{:13.6f}'.format(cart))63 self.xyz.write('\n')64 def finalize(self):65 self.xyz.close()66class getoutData:67 """68 Read molecule data from a computational chemistry output file.69 Currently supports Gaussian and ORCA output types.70 Attributes:71 FREQS (list): list of frequencies parsed from Gaussian file.72 REDMASS (list): list of reduced masses parsed from Gaussian file.73 FORCECONST (list): list of force constants parsed from Gaussian file.74 NORMALMODE (list): list of normal modes parsed from Gaussian file.75 atom_nums (list): list of atom number IDs.76 atom_types (list): list of atom element symbols.77 cartesians (list): list of cartesian coordinates for each atom.78 atomictypes (list): list of atomic types output in Gaussian files.79 connectivity (list): list of atomic connectivity in a molecule, based on covalent radii80 """81 def __init__(self, file):82 with open(file) as f:83 data = f.readlines()84 program = 'none'85 for line in data:86 if "Gaussian" in line:87 program = "Gaussian"88 break89 if "* O R C A *" in line:90 program = "Orca"91 break92 if "NWChem" in line:93 program = "NWChem"94 break95 def get_freqs(self, outlines, natoms, format):96 self.FREQS = []97 self.REDMASS = []98 self.FORCECONST = []99 self.NORMALMODE = []100 freqs_so_far = 0101 if format == "Gaussian":102 for i in range(0, len(outlines)):103 if outlines[i].find(" Frequencies -- ") > -1:104 nfreqs = len(outlines[i].split())105 for j in range(2, nfreqs):106 self.FREQS.append(float(outlines[i].split()[j]))107 self.NORMALMODE.append([])108 for j in range(3, nfreqs + 1): self.REDMASS.append(float(outlines[i + 1].split()[j]))109 for j in range(3, nfreqs + 1): self.FORCECONST.append(float(outlines[i + 2].split()[j]))110 for j in range(0, natoms):111 for k in range(0, nfreqs - 2):112 self.NORMALMODE[(freqs_so_far + k)].append(113 [float(outlines[i + 5 + j].split()[3 * k + 2]),114 float(outlines[i + 5 + j].split()[3 * k + 3]),115 float(outlines[i + 5 + j].split()[3 * k + 4])])116 freqs_so_far = freqs_so_far + nfreqs - 2117 def getatom_types(self, outlines, program):118 if program == "Gaussian":119 for i, oline in enumerate(outlines):120 if "Input orientation" in oline or "Standard orientation" in oline:121 self.atom_nums, self.atom_types, self.cartesians, self.atomictypes, carts = [], [], [], [], \122 outlines[i + 5:]123 for j, line in enumerate(carts):124 if "-------" in line:125 break126 self.atom_nums.append(int(line.split()[1]))127 self.atom_types.append(element_id(int(line.split()[1])))128 self.atomictypes.append(int(line.split()[2]))129 if len(line.split()) > 5:130 self.cartesians.append(131 [float(line.split()[3]), float(line.split()[4]), float(line.split()[5])])132 else:133 self.cartesians.append(134 [float(line.split()[2]), float(line.split()[3]), float(line.split()[4])])135 if program == "Orca":136 for i, oline in enumerate(outlines):137 if "*" in oline and ">" in oline and "xyz" in oline:138 self.atom_nums, self.atom_types, self.cartesians, carts = [], [], [], outlines[i + 1:]139 for j, line in enumerate(carts):140 if ">" in line and "*" in line:141 break142 if len(line.split()) > 5:143 self.cartesians.append(144 [float(line.split()[3]), float(line.split()[4]), float(line.split()[5])])145 self.atom_types.append(line.split()[2])146 self.atom_nums.append(element_id(line.split()[2], num=True))147 else:148 self.cartesians.append(149 [float(line.split()[2]), float(line.split()[3]), float(line.split()[4])])150 self.atom_types.append(line.split()[1])151 self.atom_nums.append(element_id(line.split()[1], num=True))152 if program == "NWChem":153 for i, oline in enumerate(outlines):154 if "Output coordinates" in oline:155 self.atom_nums, self.atom_types, self.cartesians, self.atomictypes, carts = [], [], [], [], outlines[i+4:]156 for j, line in enumerate(carts):157 if line.strip()=='' :158 break159 self.atom_nums.append(int(float(line.split()[2])))160 self.atom_types.append(element_id(int(float(line.split()[2]))))161 self.atomictypes.append(int(float(line.split()[2])))162 self.cartesians.append([float(line.split()[3]),float(line.split()[4]),float(line.split()[5])])163 getatom_types(self, data, program)164 natoms = len(self.atom_types)165 try:166 get_freqs(self, data, natoms, program)167 except:168 pass169 # Convert coordinates to string that can be used by the symmetry.c program170 def coords_string(self):171 xyzstring = str(len(self.atom_nums)) + '\n'172 for atom, xyz in zip(self.atom_nums, self.cartesians):173 xyzstring += "{0} {1:.6f} {2:.6f} {3:.6f}\n".format(atom, *xyz)174 return xyzstring175 # Obtain molecule connectivity to be used for internal symmetry determination176 def get_connectivity(self):177 connectivity = []178 tolerance = 0.2179 for i, ai in enumerate(self.atom_types):180 row = []181 for j, aj in enumerate(self.atom_types):182 if i == j:183 continue184 cutoff = RADII[ai] + RADII[aj] + tolerance185 distance = np.linalg.norm(np.array(self.cartesians[i]) - np.array(self.cartesians[j]))186 if distance < cutoff:187 row.append(j)188 connectivity.append(row)189 self.connectivity = connectivity190def cosmo_rs_out(datfile, names, interval=False):191 """192 Read solvation free energies from a COSMO-RS data file193 Parameters:194 datfile (str): name of COSMO-RS output file.195 names (list): list of species in COSMO-RS file that correspond to names of other computational output files.196 interval (bool): flag for parser to read COSMO-RS temperature interval calculation.197 """198 gsolv = {}199 if os.path.exists(datfile):200 with open(datfile) as f:201 data = f.readlines()202 else:203 raise ValueError("File {} does not exist".format(datfile))204 temp = 0205 t_interval = []206 gsolv_dicts = []207 found = False208 oldtemp = 0209 gsolv_temp = {}210 if interval:211 for i, line in enumerate(data):212 for name in names:213 if line.find('(' + name.split('.')[0] + ')') > -1 and line.find('Compound') > -1:214 if data[i - 5].find('Temperature') > -1:215 temp = data[i - 5].split()[2]216 if float(temp) > float(interval[0]) and float(temp) < float(interval[1]):217 if float(temp) not in t_interval:218 t_interval.append(float(temp))219 if data[i + 10].find('Gibbs') > -1:220 gsolv = float(data[i + 10].split()[6].strip()) / KCAL_TO_AU221 gsolv_temp[name] = gsolv222 found = True223 if found:224 if oldtemp is 0:225 oldtemp = temp226 if temp is not oldtemp:227 gsolv_dicts.append(gsolv) # Store dict at one temp228 gsolv = {} # Clear gsolv229 gsolv.update(gsolv_temp) # Grab the first one for the new temp230 oldtemp = temp231 gsolv.update(gsolv_temp)232 gsolv_temp = {}233 found = False234 gsolv_dicts.append(gsolv) # Grab last dict235 else:236 for i, line in enumerate(data):237 for name in names:238 if line.find('(' + name.split('.')[0] + ')') > -1 and line.find('Compound') > -1:239 if data[i + 11].find('Gibbs') > -1:240 gsolv = float(data[i + 11].split()[6].strip()) / KCAL_TO_AU241 gsolv[name] = gsolv242 if interval:243 return t_interval, gsolv_dicts244 else:245 return gsolv246def parse_data(file):247 """248 Read computational chemistry output file.249 Attempt to obtain single point energy, program type, program version, solvation_model,250 charge, empirical_dispersion, and multiplicity from file.251 Parameter:252 file (str): name of file to be parsed.253 Returns:254 float: single point energy.255 str: program used to run calculation.256 str: version of program used to run calculation.257 str: solvation model used in chemical calculation (if any).258 str: original filename parsed.259 int: overall charge of molecule or chemical system.260 str: empirical dispersion used in chemical calculation (if any).261 int: multiplicity of molecule or chemical system.262 """263 spe, program, data, version_program, solvation_model, keyword_line, a, charge, multiplicity = 'none', 'none', [], '', '', '', 0, None, None264 if os.path.exists(os.path.splitext(file)[0] + '.log'):265 with open(os.path.splitext(file)[0] + '.log') as f:266 data = f.readlines()267 elif os.path.exists(os.path.splitext(file)[0] + '.out'):268 with open(os.path.splitext(file)[0] + '.out') as f:269 data = f.readlines()270 else:271 raise ValueError("File {} does not exist".format(file))272 for line in data:273 if "Gaussian" in line:274 program = "Gaussian"275 break276 if "* O R C A *" in line:277 program = "Orca"278 break279 if "NWChem" in line:280 program = "NWChem"281 break282 repeated_link1 = 0283 for line in data:284 if program == "Gaussian":285 if line.strip().startswith('SCF Done:'):286 spe = float(line.strip().split()[4])287 elif line.strip().startswith('Counterpoise corrected energy'):288 spe = float(line.strip().split()[4])289 # For MP2 calculations replace with EUMP2290 elif 'EUMP2 =' in line.strip():291 spe = float((line.strip().split()[5]).replace('D', 'E'))292 # For ONIOM calculations use the extrapolated value rather than SCF value293 elif "ONIOM: extrapolated energy" in line.strip():294 spe = (float(line.strip().split()[4]))295 # For G4 calculations look for G4 energies (Gaussian16a bug prints G4(0 K) as DE(HF)) --Brian modified to work for G16c-where bug is fixed.296 elif line.strip().startswith('G4(0 K)'):297 spe = float(line.strip().split()[2])298 spe -= zero_point_corr_G4 #Remove G4 ZPE299 elif line.strip().startswith('E(ZPE)='): #Get G4 ZPE300 zero_point_corr_G4 = float(line.strip().split()[1])301 # For TD calculations look for SCF energies of the first excited state302 elif 'E(TD-HF/TD-DFT)' in line.strip():303 spe = float(line.strip().split()[4])304 # For Semi-empirical or Molecular Mechanics calculations305 elif "Energy= " in line.strip() and "Predicted" not in line.strip() and "Thermal" not in line.strip() and "G4" not in line.strip():306 spe = (float(line.strip().split()[1]))307 elif "Gaussian" in line and "Revision" in line and repeated_link1 == 0:308 for i in range(len(line.strip(",").split(",")) - 1):309 line.strip(",").split(",")[i]310 version_program += line.strip(",").split(",")[i]311 repeated_link1 = 1312 version_program = version_program[1:]313 elif "Charge" in line.strip() and "Multiplicity" in line.strip():314 charge = int(line.split('Multiplicity')[0].split('=')[-1].strip())315 multiplicity = line.split('=')[-1].strip()316 if program == "Orca":317 if line.strip().startswith('FINAL SINGLE POINT ENERGY'):318 spe = float(line.strip().split()[4])319 if 'Program Version' in line.strip():320 version_program = "ORCA version " + line.split()[2]321 if "Total Charge" in line.strip() and "...." in line.strip():322 charge = int(line.strip("=").split()[-1])323 if "Multiplicity" in line.strip() and "...." in line.strip():324 multiplicity = int(line.strip("=").split()[-1])325 if program == "NWChem":326 if line.strip().startswith('Total DFT energy'):327 spe = float(line.strip().split()[4])328 if 'nwchem branch' in line.strip():329 version_program = "NWChem version " + line.split()[3]330 if "charge" in line.strip():331 charge = int(line.strip().split()[-1])332 if "mult " in line.strip():333 multiplicity = int(line.strip().split()[-1])334 # Solvation model and empirical dispersion detection335 if 'Gaussian' in version_program.strip():336 for i, line in enumerate(data):337 if '#' in line.strip() and a == 0:338 for j, line in enumerate(data[i:i + 10]):339 if '--' in line.strip():340 a = a + 1341 break342 if a != 0:343 break344 else:345 for k in range(len(line.strip().split("\n"))):346 line.strip().split("\n")[k]347 keyword_line += line.strip().split("\n")[k]348 keyword_line = keyword_line.lower()349 if 'scrf' not in keyword_line.strip():350 solvation_model = "gas phase"351 else:352 start_scrf = keyword_line.strip().find('scrf') + 4353 if '(' in keyword_line[start_scrf:start_scrf + 4]:354 start_scrf += keyword_line[start_scrf:start_scrf + 4].find('(') + 1355 end_scrf = keyword_line.find(")", start_scrf)356 display_solvation_model = "scrf=(" + ','.join(357 keyword_line[start_scrf:end_scrf].lower().split(',')) + ')'358 sorted_solvation_model = "scrf=(" + ','.join(359 sorted(keyword_line[start_scrf:end_scrf].lower().split(','))) + ')'360 else:361 if ' = ' in keyword_line[start_scrf:start_scrf + 4]:362 start_scrf += keyword_line[start_scrf:start_scrf + 4].find(' = ') + 3363 elif ' =' in keyword_line[start_scrf:start_scrf + 4]:364 start_scrf += keyword_line[start_scrf:start_scrf + 4].find(' =') + 2365 elif '=' in keyword_line[start_scrf:start_scrf + 4]:366 start_scrf += keyword_line[start_scrf:start_scrf + 4].find('=') + 1367 end_scrf = keyword_line.find(" ", start_scrf)368 if end_scrf == -1:369 display_solvation_model = "scrf=(" + ','.join(keyword_line[start_scrf:].lower().split(',')) + ')'370 sorted_solvation_model = "scrf=(" + ','.join(371 sorted(keyword_line[start_scrf:].lower().split(','))) + ')'372 else:373 display_solvation_model = "scrf=(" + ','.join(374 keyword_line[start_scrf:end_scrf].lower().split(',')) + ')'375 sorted_solvation_model = "scrf=(" + ','.join(376 sorted(keyword_line[start_scrf:end_scrf].lower().split(','))) + ')'377 if solvation_model != "gas phase":378 solvation_model = [sorted_solvation_model, display_solvation_model]379 empirical_dispersion = ''380 if keyword_line.strip().find('empiricaldispersion') == -1 and keyword_line.strip().find(381 'emp=') == -1 and keyword_line.strip().find('emp =') == -1 and keyword_line.strip().find('emp(') == -1:382 empirical_dispersion = "No empirical dispersion detected"383 elif keyword_line.strip().find('empiricaldispersion') > -1:384 start_emp_disp = keyword_line.strip().find('empiricaldispersion') + 19385 if '(' in keyword_line[start_emp_disp:start_emp_disp + 4]:386 start_emp_disp += keyword_line[start_emp_disp:start_emp_disp + 4].find('(') + 1387 end_emp_disp = keyword_line.find(")", start_emp_disp)388 empirical_dispersion = 'empiricaldispersion=(' + ','.join(389 sorted(keyword_line[start_emp_disp:end_emp_disp].lower().split(','))) + ')'390 else:391 if ' = ' in keyword_line[start_emp_disp:start_emp_disp + 4]:392 start_emp_disp += keyword_line[start_emp_disp:start_emp_disp + 4].find(' = ') + 3393 elif ' =' in keyword_line[start_emp_disp:start_emp_disp + 4]:394 start_emp_disp += keyword_line[start_emp_disp:start_emp_disp + 4].find(' =') + 2395 elif '=' in keyword_line[start_emp_disp:start_emp_disp + 4]:396 start_emp_disp += keyword_line[start_emp_disp:start_emp_disp + 4].find('=') + 1397 end_emp_disp = keyword_line.find(" ", start_emp_disp)398 if end_emp_disp == -1:399 empirical_dispersion = "empiricaldispersion=(" + ','.join(400 sorted(keyword_line[start_emp_disp:].lower().split(','))) + ')'401 else:402 empirical_dispersion = "empiricaldispersion=(" + ','.join(403 sorted(keyword_line[start_emp_disp:end_emp_disp].lower().split(','))) + ')'404 elif keyword_line.strip().find('emp=') > -1 or keyword_line.strip().find(405 'emp =') > -1 or keyword_line.strip().find('emp(') > -1:406 # Check for temp keyword407 temp, emp_e, emp_p = False, False, False408 check_temp = keyword_line.strip().find('emp=')409 start_emp_disp = keyword_line.strip().find('emp=')410 if check_temp == -1:411 check_temp = keyword_line.strip().find('emp =')412 start_emp_disp = keyword_line.strip().find('emp =')413 if check_temp == -1:414 check_temp = keyword_line.strip().find('emp=(')415 start_emp_disp = keyword_line.strip().find('emp(')416 check_temp += -1417 if keyword_line[check_temp].lower() == 't':418 temp = True # Look for a new one419 if keyword_line.strip().find('emp=', check_temp + 5) > -1:420 emp_e = True421 start_emp_disp = keyword_line.strip().find('emp=', check_temp + 5) + 3422 elif keyword_line.strip().find('emp =', check_temp + 5) > -1:423 emp_e = True424 start_emp_disp = keyword_line.strip().find('emp =', check_temp + 5) + 3425 elif keyword_line.strip().find('emp(', check_temp + 5) > -1:426 emp_p = True427 start_emp_disp = keyword_line.strip().find('emp(', check_temp + 5) + 3428 else:429 empirical_dispersion = "No empirical dispersion detected"430 else:431 start_emp_disp += 3432 if (temp and emp_e) or (not temp and keyword_line.strip().find('emp=') > -1) or (433 not temp and keyword_line.strip().find('emp =')):434 if '(' in keyword_line[start_emp_disp:start_emp_disp + 4]:435 start_emp_disp += keyword_line[start_emp_disp:start_emp_disp + 4].find('(') + 1436 end_emp_disp = keyword_line.find(")", start_emp_disp)437 empirical_dispersion = 'empiricaldispersion=(' + ','.join(438 sorted(keyword_line[start_emp_disp:end_emp_disp].lower().split(','))) + ')'439 else:440 if ' = ' in keyword_line[start_emp_disp:start_emp_disp + 4]:441 start_emp_disp += keyword_line[start_emp_disp:start_emp_disp + 4].find(' = ') + 3442 elif ' =' in keyword_line[start_emp_disp:start_emp_disp + 4]:443 start_emp_disp += keyword_line[start_emp_disp:start_emp_disp + 4].find(' =') + 2444 elif '=' in keyword_line[start_emp_disp:start_emp_disp + 4]:445 start_emp_disp += keyword_line[start_emp_disp:start_emp_disp + 4].find('=') + 1446 end_emp_disp = keyword_line.find(" ", start_emp_disp)447 if end_emp_disp == -1:448 empirical_dispersion = "empiricaldispersion=(" + ','.join(449 sorted(keyword_line[start_emp_disp:].lower().split(','))) + ')'450 else:451 empirical_dispersion = "empiricaldispersion=(" + ','.join(452 sorted(keyword_line[start_emp_disp:end_emp_disp].lower().split(','))) + ')'453 elif (temp and emp_p) or (not temp and keyword_line.strip().find('emp(') > -1):454 start_emp_disp += keyword_line[start_emp_disp:start_emp_disp + 4].find('(') + 1455 end_emp_disp = keyword_line.find(")", start_emp_disp)456 empirical_dispersion = 'empiricaldispersion=(' + ','.join(457 sorted(keyword_line[start_emp_disp:end_emp_disp].lower().split(','))) + ')'458 if 'ORCA' in version_program.strip():459 keyword_line_1 = "gas phase"460 keyword_line_2 = ''461 keyword_line_3 = ''462 for i, line in enumerate(data):463 if 'CPCM SOLVATION MODEL' in line.strip():464 keyword_line_1 = "CPCM,"465 if 'SMD CDS free energy correction energy' in line.strip():466 keyword_line_2 = "SMD,"467 if "Solvent: " in line.strip():468 keyword_line_3 = line.strip().split()[-1]469 solvation_model = keyword_line_1 + keyword_line_2 + keyword_line_3470 empirical_dispersion1 = 'No empirical dispersion detected'471 empirical_dispersion2 = ''472 empirical_dispersion3 = ''473 for i, line in enumerate(data):474 if keyword_line.strip().find('DFT DISPERSION CORRECTION') > -1:475 empirical_dispersion1 = ''476 if keyword_line.strip().find('DFTD3') > -1:477 empirical_dispersion2 = "D3"478 if keyword_line.strip().find('USING zero damping') > -1:479 empirical_dispersion3 = ' with zero damping'480 empirical_dispersion = empirical_dispersion1 + empirical_dispersion2 + empirical_dispersion3481 if 'NWChem' in version_program.strip():482 # keyword_line_1 = "gas phase"483 # keyword_line_2 = ''484 # keyword_line_3 = ''485 # for i, line in enumerate(data):486 # if 'CPCM SOLVATION MODEL' in line.strip():487 # keyword_line_1 = "CPCM,"488 # if 'SMD CDS free energy correction energy' in line.strip():489 # keyword_line_2 = "SMD,"490 # if "Solvent: " in line.strip():491 # keyword_line_3 = line.strip().split()[-1]492 # solvation_model = keyword_line_1 + keyword_line_2 + keyword_line_3493 empirical_dispersion1 = 'No empirical dispersion detected'494 empirical_dispersion2 = ''495 empirical_dispersion3 = ''496 for i, line in enumerate(data):497 if keyword_line.strip().find('Dispersion correction') > -1:498 empirical_dispersion1 = ''499 if keyword_line.strip().find('disp vdw 3') > -1:500 empirical_dispersion2 = "D3"501 if keyword_line.strip().find('disp vdw 4') > -1:502 empirical_dispersion2 = "D3BJ"503 empirical_dispersion = empirical_dispersion1 + empirical_dispersion2 + empirical_dispersion3504 return spe, program, version_program, solvation_model, file, charge, empirical_dispersion, multiplicity505def sp_cpu(file):506 """Read single-point output for cpu time."""507 spe, program, data, cpu = None, None, [], None508 if os.path.exists(os.path.splitext(file)[0] + '.log'):509 with open(os.path.splitext(file)[0] + '.log') as f:510 data = f.readlines()511 elif os.path.exists(os.path.splitext(file)[0] + '.out'):512 with open(os.path.splitext(file)[0] + '.out') as f:513 data = f.readlines()514 else:515 raise ValueError("File {} does not exist".format(file))516 for line in data:517 if line.find("Gaussian") > -1:518 program = "Gaussian"519 break520 if line.find("* O R C A *") > -1:521 program = "Orca"522 break523 if line.find("NWChem") > -1:524 program = "NWChem"525 break526 for line in data:527 if program == "Gaussian":528 if line.strip().startswith('SCF Done:'):529 spe = float(line.strip().split()[4])530 if line.strip().find("Job cpu time") > -1:531 days = int(line.split()[3])532 hours = int(line.split()[5])533 mins = int(line.split()[7])534 secs = 0535 msecs = int(float(line.split()[9]) * 1000.0)536 cpu = [days, hours, mins, secs, msecs]537 if program == "Orca":538 if line.strip().startswith('FINAL SINGLE POINT ENERGY'):539 spe = float(line.strip().split()[4])540 if line.strip().find("TOTAL RUN TIME") > -1:541 days = int(line.split()[3])542 hours = int(line.split()[5])543 mins = int(line.split()[7])544 secs = int(line.split()[9])545 msecs = float(line.split()[11])546 cpu = [days, hours, mins, secs, msecs]547 if program == "NWChem":548 if line.strip().startswith('Total DFT energy ='):549 spe = float(line.strip().split()[4])550 if line.strip().find("Total times") > -1:551 days = 0552 hours = 0553 mins = 0554 secs = float(line.split()[3][0:-1])555 msecs = 0556 cpu = [days,hours,mins,secs,msecs]557 return cpu558def level_of_theory(file):559 """Read output for the level of theory and basis set used."""560 repeated_theory = 0561 with open(file) as f:562 data = f.readlines()563 level, bs = 'none', 'none'564 for line in data:565 if line.strip().find('External calculation') > -1:566 level, bs = 'ext', 'ext'567 break568 if '\\Freq\\' in line.strip() and repeated_theory == 0:569 try:570 level, bs = (line.strip().split("\\")[4:6])571 repeated_theory = 1572 except IndexError:573 pass574 elif '|Freq|' in line.strip() and repeated_theory == 0:575 try:576 level, bs = (line.strip().split("|")[4:6])577 repeated_theory = 1578 except IndexError:579 pass580 if '\\SP\\' in line.strip() and repeated_theory == 0:581 try:582 level, bs = (line.strip().split("\\")[4:6])583 repeated_theory = 1584 except IndexError:585 pass586 elif '|SP|' in line.strip() and repeated_theory == 0:587 try:588 level, bs = (line.strip().split("|")[4:6])589 repeated_theory = 1590 except IndexError:591 pass592 if 'DLPNO BASED TRIPLES CORRECTION' in line.strip():593 level = 'DLPNO-CCSD(T)'594 if 'Estimated CBS total energy' in line.strip():595 try:596 bs = ("Extrapol." + line.strip().split()[4])597 except IndexError:598 pass599 # Remove the restricted R or unrestricted U label600 if level[0] in ('R', 'U'):601 level = level[1:]602 level_of_theory = '/'.join([level, bs])603 return level_of_theory604def read_initial(file):605 """At beginning of procedure, read level of theory, solvation model, and check for normal termination"""606 with open(file) as f:607 data = f.readlines()608 level, bs, program, keyword_line = 'none', 'none', 'none', 'none'609 progress, orientation = 'Incomplete', 'Input'610 a, repeated_theory = 0, 0611 no_grid = True612 DFT, dft_used, level, bs, scf_iradan, cphf_iradan = False, 'F', 'none', 'none', False, False613 grid_lookup = {1: 'sg1', 2: 'coarse', 4: 'fine', 5: 'ultrafine', 7: 'superfine'}614 for line in data:615 # Determine program616 if "Gaussian" in line:617 program = "Gaussian"618 break619 if "* O R C A *" in line:620 program = "Orca"621 break622 if "NWChem" in line:623 program = "NWChem"624 break625 for line in data:626 # Grab pertinent information from file627 if line.strip().find('External calculation') > -1:628 level, bs = 'ext', 'ext'629 if line.strip().find('Standard orientation:') > -1:630 orientation = 'Standard'631 if line.strip().find('IExCor=') > -1 and no_grid:632 try:633 dft_used = line.split('=')[2].split()[0]634 grid = grid_lookup[int(dft_used)]635 no_grid = False636 except:637 pass638 if '\\Freq\\' in line.strip() and repeated_theory == 0:639 try:640 level, bs = (line.strip().split("\\")[4:6])641 repeated_theory = 1642 except IndexError:643 pass644 elif '|Freq|' in line.strip() and repeated_theory == 0:645 try:646 level, bs = (line.strip().split("|")[4:6])647 repeated_theory = 1648 except IndexError:649 pass650 if '\\SP\\' in line.strip() and repeated_theory == 0:651 try:652 level, bs = (line.strip().split("\\")[4:6])653 repeated_theory = 1654 except IndexError:655 pass656 elif '|SP|' in line.strip() and repeated_theory == 0:657 try:658 level, bs = (line.strip().split("|")[4:6])659 repeated_theory = 1660 except IndexError:661 pass662 if 'DLPNO BASED TRIPLES CORRECTION' in line.strip():663 level = 'DLPNO-CCSD(T)'664 if 'Estimated CBS total energy' in line.strip():665 try:666 bs = ("Extrapol." + line.strip().split()[4])667 except IndexError:668 pass669 # Remove the restricted R or unrestricted U label670 if level[0] in ('R', 'U'):671 level = level[1:]672 #NWChem specific parsing673 if program is 'NWChem':674 keyword_line_1 = "gas phase"675 keyword_line_2 = ''676 keyword_line_3 = ''677 for i, line in enumerate(data):678 if line.strip().startswith("xc "):679 level=line.strip().split()[1]680 if line.strip().startswith("* library "):681 bs = line.strip().replace("* library ",'')682 #need to update these tags for NWChem solvation later683 if 'CPCM SOLVATION MODEL' in line.strip():684 keyword_line_1 = "CPCM,"685 if 'SMD CDS free energy correction energy' in line.strip():686 keyword_line_2 = "SMD,"687 if "Solvent: " in line.strip():688 keyword_line_3 = line.strip().split()[-1]689 #need to update NWChem keyword for error calculation690 if 'Total times' in line:691 progress = 'Normal'692 elif 'error termination' in line:693 progress = 'Error'694 solvation_model = keyword_line_1 + keyword_line_2 + keyword_line_3695 # Grab solvation models - Gaussian files696 if program is 'Gaussian':697 for i, line in enumerate(data):698 if '#' in line.strip() and a == 0:699 for j, line in enumerate(data[i:i + 10]):700 if '--' in line.strip():701 a = a + 1702 break703 if a != 0:704 break705 else:706 for k in range(len(line.strip().split("\n"))):707 line.strip().split("\n")[k]708 keyword_line += line.strip().split("\n")[k]709 if 'Normal termination' in line:710 progress = 'Normal'711 elif 'Error termination' in line:712 progress = 'Error'713 keyword_line = keyword_line.lower()714 if 'scrf' not in keyword_line.strip():715 solvation_model = "gas phase"716 else:717 start_scrf = keyword_line.strip().find('scrf') + 5718 if keyword_line[start_scrf] == "(":719 end_scrf = keyword_line.find(")", start_scrf)720 solvation_model = "scrf=" + keyword_line[start_scrf:end_scrf]721 if solvation_model[-1] != ")":722 solvation_model = solvation_model + ")"723 else:724 start_scrf2 = keyword_line.strip().find('scrf') + 4725 if keyword_line.find(" ", start_scrf) > -1:726 end_scrf = keyword_line.find(" ", start_scrf)727 else:728 end_scrf = len(keyword_line)729 if keyword_line[start_scrf2] == "(":730 solvation_model = "scrf=(" + keyword_line[start_scrf:end_scrf]731 if solvation_model[-1] != ")":732 solvation_model = solvation_model + ")"733 else:734 if keyword_line.find(" ", start_scrf) > -1:735 end_scrf = keyword_line.find(" ", start_scrf)736 else:737 end_scrf = len(keyword_line)738 solvation_model = "scrf=" + keyword_line[start_scrf:end_scrf]739 # ORCA parsing for solvation model740 elif program is 'Orca':741 keyword_line_1 = "gas phase"742 keyword_line_2 = ''743 keyword_line_3 = ''744 for i, line in enumerate(data):745 if 'CPCM SOLVATION MODEL' in line.strip():746 keyword_line_1 = "CPCM,"747 if 'SMD CDS free energy correction energy' in line.strip():748 keyword_line_2 = "SMD,"749 if "Solvent: " in line.strip():750 keyword_line_3 = line.strip().split()[-1]751 if 'ORCA TERMINATED NORMALLY' in line:752 progress = 'Normal'753 elif 'error termination' in line:754 progress = 'Error'755 solvation_model = keyword_line_1 + keyword_line_2 + keyword_line_3756 level_of_theory = '/'.join([level, bs])757 return level_of_theory, solvation_model, progress, orientation, dft_used758def jobtype(file):759 """Read output for the level of theory and basis set used."""760 with open(file) as f:761 data = f.readlines()762 job = ''763 for line in data:764 if line.strip().find('\\SP\\') > -1:765 job += 'SP'766 if line.strip().find('\\FOpt\\') > -1:767 job += 'GS'768 if line.strip().find('\\FTS\\') > -1:769 job += 'TS'770 if line.strip().find('\\Freq\\') > -1:771 job += 'Freq'...

Full Screen

Full Screen

__main__.py

Source:__main__.py Github

copy

Full Screen

1#!/usr/bin/python32import argparse3import collections4import datetime5import re6import sys7import tempfile8import numpy as np9class Model:10 def __init__(self):11 self.assembly = None12 @staticmethod13 def parse(cls, file_or_name):14 NotImplemented()15 def write(cls, file_or_name):16 NotImplemented()17class Assembly:18 def __init__(self):19 self.name = None20 self.instances = []21 self.sets = {}22class Instance:23 def __init__(self):24 self.name = None25 self.part = None26 self.translation = None27 self.rotation = None28class Part:29 def __init__(self):30 self.name = None31 self.nodes = None32 self.elements = None33 self.sets = None34class AbaqusInput:35 def __init__(self):36 self.assembly = None37 self.header = None38 @staticmethod39 def parse(cls, file_or_name):40 ret = cls()41 return ret42class Orientation():43 system = None44 a = None45 b = None46 c = None47 rot_axis = None48 rot = None49 def __init__(self):50 self.a = np.empty(3)51 self.a.fill(np.nan)52 self.b = np.empty(3)53 self.b.fill(np.nan)54 self.c = np.empty(3)55 self.c.fill(0)56class Element():57 id = 058 type = None59 def __init__(self):60 self.node = []61class Node():62 id = 063 pos = None64 def __init__(self):65 self.pos = np.zeros(3)66class Section():67 elset = None68 orientation = None69class Part():70 id = 071class Set(list):72 name = None73 instance = None74 part = None75class AbaqusPart(Part):76 orientation = None77 set = None78 section = None79 def __init__(self, name):80 self.name = name81 self.element = {}82 self.node = {}83 self.set = {}84 self.set['node'] = {}85 self.set['element'] = {}86 self.section = []87 self.orientation = {}88def GetRotationMatrix(a,b,th):89 ux = b[0] - a[0]90 uy = b[1] - a[1]91 uz = b[2] - a[2]92 rm = np.ndarray([3,3])93 th = th * np.pi/180.94 cos = np.cos(th)95 sin = np.sin(th)96 one_cos = 1-cos97 rm[0,0] = cos + ux**2*one_cos98 rm[0,1] = ux*uy*one_cos - uz*sin99 rm[0,2] = ux*uz*one_cos + uy*sin100 rm[1,0] = uy*ux*one_cos + uz*sin101 rm[1,1] = cos + uy**2*one_cos102 rm[1,2] = uy*uz*one_cos - ux*sin103 rm[2,0] = uz*ux*one_cos - uy*sin104 rm[2,1] = uz*uy*one_cos + ux*sin105 rm[2,2] = cos + uz**2*one_cos106 return rm107class AbaqusInstance():108 name = None109 part = None110 translation = None111 rotation = None112 @property113 def rotation_matrix(self):114 th = self.rotation['deg']115 a = self.rotation['a']116 b = self.rotation['b']117 rm = GetRotationMatrix(a,b,th)118 return rm119 def __init__(self):120 self.translation = np.zeros(3)121 self.rotation = {}122 self.rotation['a'] = np.zeros(3)123 self.rotation['b'] = np.zeros(3)124 self.rotation['deg'] = 0125class AbaqusInput():126 def __init__(self):127 self.part = {}128 self.instance = {}129 self.count = collections.Counter()130 self.set = {}131 self.set['node'] = {}132 self.set['element'] = {}133# parse Abaqus input file134def ParseAbaqus(file):135 ret = AbaqusInput()136 count = collections.Counter()137 regex = {}138 regex['comment'] = re.compile(r'^\*\*')139 regex['keyword'] = re.compile(r'^\*[a-zA-Z0-9 ]+[,\n]')140 regex['data'] = re.compile(r'^(?!\*)')141 line_counter = collections.Counter()142 line_counter['total'] = len(file.readlines())143 file.seek(0)144 def ends_at(kw,loc):145 if loc == '*':146 if section[len(section)-1] != kw:147 section.pop(section.index(kw))148 if type(loc) is int:149 if count['keyword_line'] == loc:150 section.pop(section.index(kw))151 if type(loc) is str:152 if loc in section:153 section.pop(section.index(kw))154 def update_term(complete=False):155 total = line_counter['total']156 count = line_counter['current']157 perc = int(100 * count * 1.0 / total)158 old_perc = int(100 * (count-1)*1.0/total)159 if perc != old_perc or count == 1 or perc == 1:160 perc = count * 1.0 / total161 #import pdb; pdb.set_trace()162 fmt = 'Parsing ({:3.0f}% complete)'.format(100*perc)163 if complete:164 to_write = fmt + '...done!\n'165 sys.stdout.write(to_write)166 else:167 to_write = fmt + '\r'168 sys.stdout.write(to_write)169 sys.stdout.flush()170 for line in file:171 line_counter['current'] += 1172 update_term()173 current_instance = None174 ret.count['line'] += 1175 if regex['comment'].search(line):176 # This is a comment line177 ret.count['comment'] += 1178 continue179 if regex['keyword'].search(line):180 match = regex['keyword'].search(line)181 # This is a keyword line182 ret.count['keyword'] += 1183 kw = match.group(0).upper().strip(' ,\n*')184 ret.count['*' + kw] += 1185 count['keyword_line'] = 0186 # parse keyword arguments187 kwargs = {}188 splitline = line.split(',')189 splitline.pop(0)190 for i in splitline:191 i = i.strip(' \n')192 if '=' in i:193 i = i.split('=')194 kwargs[i[0]] = i[1].strip('\'\"')195 else:196 kwargs[i] = True197 #print('{:5d}:{:15s}'.format(ret.count['line'],kw))198 #if len(kwargs) > 0:199 #print(kwargs)200 if regex['data'].search(line):201 count['keyword_line'] += 1202 # from here on, process the current keyword203 if kw == 'PART':204 if count['keyword_line'] == 0:205 name = kwargs['name']206 ret.part[name] = AbaqusPart(name)207 current_part = name208 if kw == 'END PART':209 current_part = None210 if kw == 'ASSEMBLY':211 in_assembly = True212 if kw == 'END ASSEMBLY':213 in_assembly = False214 if kw == 'INSTANCE':215 if count['keyword_line'] == 0:216 instance = AbaqusInstance()217 instance.name = kwargs['name']218 instance.part = kwargs['part']219 current_instance = instance.name220 ret.instance[instance.name] = instance221 if count['keyword_line'] == 1:222 parts = line.split(',')223 instance.translation[:] = parts224 if count['keyword_line'] == 2:225 parts = line.split(',')226 instance.rotation['a'][:] = parts[0:3]227 instance.rotation['b'][:] = parts[3:6]228 instance.rotation['deg'] = float(parts[6])229 if kw == 'END INSTANCE':230 current_instance = None231 if kw == 'ORIENTATION':232 if count['keyword_line'] == 0:233 orient = Orientation()234 orient.name = kwargs['name']235 if 'system' in kwargs:236 orient.system = kwargs['system']237 else:238 orient.system = 'RECTANGULAR'239 ret.part[current_part].orientation[orient.name] = orient240 elif count['keyword_line'] == 1:241 parts = line.split(',')242 orient.a[:] = parts[0:3]243 orient.b[:] = parts[3:6]244 if len(parts) == 9:245 orient.c[:] = parts[6:9]246 elif count['keyword_line'] == 2:247 parts = line.split(',')248 orient.rot_axis = int(parts[0])249 orient.rot = float(parts[1])250 if 'SECTION' in kw:251 if count['keyword_line'] == 0 and 'orientation' in kwargs:252 section = Section()253 section.elset = kwargs['elset']254 section.orientation = kwargs['orientation']255 ret.part[current_part].section.append(section)256 if kw == 'NODE':257 #import pdb; pdb.set_trace()258 if count['keyword_line'] > 0:259 parts = line.split(',')260 node = Node()261 node.id = int(parts[0])262 node.pos[0] = float(parts[1])263 node.pos[1] = float(parts[2])264 try:265 node.pos[2] = float(parts[3])266 except:267 pass268 ret.part[current_part].node[node.id] = node269 if kw == 'ELEMENT':270 if count['keyword_line'] > 0:271 parts = line.split(',')272 element = Element()273 element.id = int(parts[0])274 for i in range(1,len(parts)):275 element.node.append(int(parts[i]))276 element.type = kwargs['type']277 ret.part[current_part].element[element.id] = element278 if kw == 'ELSET':279 if count['keyword_line'] == 0:280 elset = Set()281 elset.name = kwargs['elset']282 if 'instance' in kwargs:283 elset.instance = kwargs['instance']284 ret.set['element'][elset.name] = elset285 else:286 elset.part = current_part287 ret.part[current_part].set['element'][elset.name] = elset288 if count['keyword_line'] > 0:289 parts = line.split(',')290 try:291 if kwargs['generate']:292 start = int(parts[0])293 stop = int(parts[1]) + 1294 inc = int(parts[2])295 for i in range(start, stop, inc):296 elset.append(i)297 except:298 for i in parts:299 elset.append(int(i))300 if kw == 'NSET':301 if count['keyword_line'] == 0:302 nset = Set()303 nset.name = kwargs['nset']304 if 'instance' in kwargs:305 nset.instance = kwargs['instance']306 ret.set['node'][nset.name] = nset307 else:308 nset.part = current_part309 ret.part[current_part].set['node'][nset.name] = nset310 if count['keyword_line'] > 0:311 parts = line.split(',')312 try:313 if kwargs['generate']:314 start = int(parts[0])315 stop = int(parts[1]) + 1316 inc = int(parts[2])317 for i in range(start, stop, inc):318 nset.append(i)319 except:320 for i in parts:321 nset.append(int(i))322 update_term(True)323 #import pdb; pdb.set_trace()324 return ret325def WriteDynaFromAbaqus(total_nodel, inp_name, abaqus_keyword, ostream):326 #global total_nodel327 written_nodel = 0328 def update_term(complete=False):329 #global total_nodel330 #global ostream331 total = total_nodel332 counted = count['node'] + count['element']333 perc = int(1000 * counted * 1.0 / total)334 old_perc = int(1000 * (counted-1)*1.0/total)335 if perc != old_perc or counted == 1:336 status = 'Compiling data ({:5.1f}% complete)'.format(perc/10)337 if complete:338 to_write = status + '...done\n'339 else:340 to_write = status + '\r'341 sys.stdout.write(to_write)342 sys.stdout.flush()343 inp = abaqus_keyword344 output = {}345 output['header'] = (346 '$ LS-DYNA keyword input file\n' +347 '$ Auto-translated from ' + inp_name +348 ' by abaqus2dyna.py\n')349 output['timestamp'] = ('$ translated at: ' +350 datetime.datetime.utcnow().strftime("%y-%m-%d %H:%M:%S UTC") + '\n')351 output['data'] = tempfile.TemporaryFile(mode='w+')352 output['stats'] = {}353 output['stats']['pid'] = []354 output['stats']['esid'] = []355 output['stats']['nsid'] = []356 def comment_line(string, fill='', newline=True):357 ret = ('${:' + fill + '^78s}$').format(string)358 if newline:359 ret += '\n'360 return ret361 set_comment_sep = comment_line('',fill='-')362 set_comment_fmt = '${:>12s}{:12d}' + ' '*12 + '{:42s}$\n'363 set_comment_head = ('${:_>12s}{:_>12s}'+'_'*12 + '{:_<42s}$\n'364 ).format('type','id','name')365 output['sep'] = comment_line('',fill='*')366 count = collections.Counter()367 offset = collections.Counter()368 # need to write nodes, then elements, then sets369 for i in inp.instance:370 #print('Writing instance ' + i)371 count['part'] += 1372 output['data'].write(comment_line('',fill='*'))373 output['data'].write(comment_line('Data for part ' + i + ', pid=' + str(count['part'])))374 output['data'].write(comment_line('',fill='*'))375 instance = inp.instance[i]376 rotation_matrix = instance.rotation_matrix377 output['stats']['pid'].append([count['part'],instance.name])378 # write nodes379 offset['node'] = count['node']380 offset['element'] = count['element']381 #import pdb; pdb.set_trace()382 if inp.part[instance.part].node:383 output['data'].write('*NODE\n')384 node_fmt = '{0:8d}{1[0]:16.8e}{1[1]:16.8e}{1[2]:16.8e}\n'385 for j in inp.part[instance.part].node:386 node = inp.part[instance.part].node[j]387 id = node.id + offset['node']388 orig_pos = node.pos389 final_pos = rotation_matrix.dot(orig_pos) #rotation390 final_pos += instance.translation #translation391 output['data'].write(node_fmt.format(id,final_pos))392 count['node'] += 1393 update_term()394 element_conversion = {'C3D8R':'SOLID',395 'S4R':'SHELL'}396 if inp.part[instance.part].element:397 current_type = None398 current_has_orient = None399 element_fmt = '{:8d}{:8d}{:8d}{:8d}{:8d}{:8d}{:8d}{:8d}{:8d}{:8d}\n'400 for j in inp.part[instance.part].element:401 element = inp.part[instance.part].element[j]402 id = element.id + offset['element']403 this_has_orient = False404 this_orient = None405 if element_conversion[element.type] == 'SOLID':406 for k in inp.part[instance.part].section:407 if element.id in inp.part[instance.part].set['element'][k.elset]:408 this_has_orient = True409 this_orient = inp.part[instance.part].orientation[k.orientation]410 continue411 if element.type != current_type or this_has_orient != current_has_orient:412 output['data'].write('*ELEMENT_' + element_conversion[element.type])413 if this_has_orient:414 output['data'].write('_ORTHO')415 output['data'].write('\n')416 current_type = element.type417 current_has_orient = this_has_orient418 nodes = (np.array(element.node) + offset['node']).tolist()419 for k in range(8 - len(nodes)):420 nodes.append(0)421 #import pdb; pdb.set_trace()422 output['data'].write(element_fmt.format(id,count['part'],*nodes))423 if current_has_orient:424 a = np.empty(3)425 d = np.empty(3)426 # get element centroid427 centroid = np.zeros(3)428 for k in element.node:429 centroid += inp.part[instance.part].node[k].pos430 centroid /= len(nodes)431 system = this_orient.system432 if system == 'CYLINDRICAL':433 #import pdb; pdb.set_trace()434 c = this_orient.b - this_orient.a435 c /= np.linalg.norm(c)436 g = centroid - this_orient.a # to centroid from a437 h = c.dot(g) # distance along _ab_ to centroid438 h = this_orient.a + h*c # vector along _ab_ to centroid439 a = centroid - h440 a /= np.linalg.norm(a)441 d = np.cross(c,a)442 # maybe an additional rotation?443 if this_orient.rot_axis == 1:444 axis_point = a445 elif this_orient.rot_axis == 2:446 axis_point = d447 else:448 axis_point = c449 rotation_matrix = GetRotationMatrix([0,0,0],axis_point,this_orient.rot)450 a = rotation_matrix.dot(a)451 d = rotation_matrix.dot(d)452 # rotate local CS with instance453 rotation_matrix = instance.rotation_matrix454 a = rotation_matrix.dot(a) #rotation455 #a += instance.translation #translation456 d = rotation_matrix.dot(d) #rotation457 #d += instance.translation #translation458 d /= np.linalg.norm(d)459 a /= np.linalg.norm(a)460 #import pdb; pdb.set_trace()461 if system in ['RECTANGULAR',None]:462 a = this_orient.a463 a /= np.linalg.norm(a)464 b = this_orient.b465 b /= np.linalg.norm(b)466 c = np.cross(a,b)467 d = np.cross(c,a)468 # maybe an additional rotation?469 if this_orient.rot_axis == 1:470 axis_point = a471 elif this_orient.rot_axis == 2:472 axis_point = d473 else:474 axis_point = c475 rotation_matrix = GetRotationMatrix([0,0,0],axis_point,this_orient.rot)476 a = rotation_matrix.dot(a)477 d = rotation_matrix.dot(d)478 # rotate local CS with instance479 rotation_matrix = instance.rotation_matrix480 a = rotation_matrix.dot(a) #rotation481 #a += instance.translation #translation482 d = rotation_matrix.dot(d) #rotation483 #d += instance.translation #translation484 d /= np.linalg.norm(d)485 a /= np.linalg.norm(a)486 #import pdb; pdb.set_trace()487 else:488 raise Exception('yep, ' + system + ' not yet implemented')489 output['data'].write('{0[0]:16.7e}{0[1]:16.7e}{0[2]:16.7e}\n'.format(a))490 output['data'].write('{0[0]:16.7e}{0[1]:16.7e}{0[2]:16.7e}\n'.format(d))491 count['element'] += 1492 update_term()493 # sets494 for k in inp.set['element']:495 if inp.set['element'][k].instance == instance.name:496 #TODO, need to check that all elements are same type (for DYNA sake)497 start = True498 set_naming_parts = k.split(':')499 set_name = set_naming_parts[1]500 set_id = int(set_naming_parts[0][1:])501 tmp_element_count = 0502 for m in inp.set['element'][k]:503 if start:504 element = inp.part[instance.part].element[m]505 output['data'].write('*SET_' + element_conversion[element.type] + '\n')506 output['data'].write('{:10d}\n'.format(set_id))507 start = False508 if tmp_element_count > 7:509 output['data'].write('\n')510 tmp_element_count = 0511 output['data'].write('{:10d}'.format(m + offset['element']))512 tmp_element_count += 1513 if tmp_element_count <= 8:514 output['data'].write('\n')515 count['elset'] += 1516 output['stats']['esid'].append([set_id,set_name])517 for k in inp.set['node']:518 if inp.set['node'][k].instance == instance.name:519 #TODO, need to check that all elements are same type (for DYNA sake)520 start = True521 set_naming_parts = k.split(':')522 set_name = set_naming_parts[1]523 set_id = int(set_naming_parts[0][1:])524 tmp_node_count = 0525 for m in inp.set['node'][k]:526 if start:527 output['data'].write('*SET_NODE\n')528 output['data'].write('{:10d}\n'.format(set_id))529 start = False530 if tmp_node_count > 7:531 output['data'].write('\n')532 tmp_node_count = 0533 output['data'].write('{:10d}'.format(m + offset['node']))534 tmp_node_count += 1535 if tmp_node_count <= 8:536 output['data'].write('\n')537 count['nodeset'] += 1538 output['stats']['nsid'].append([set_id,set_name])539 update_term(True)540 k = ostream541 k.write(output['header'])542 k.write(output['timestamp'])543 k.write(output['sep'])544 # statistics545 k.write(comment_line('Model Stats'))546 k.write(set_comment_head)547 for i in output['stats']:548 for j in output['stats'][i]:549 k.write(set_comment_fmt.format(i,j[0],j[1]))550 k.write(set_comment_sep)551 output['data'].seek(0)552 k.write(output['sep'] + output['data'].read())553 output['data'].close()554 k.write(output['sep'])555 k.write('*END\n')556 k.write(comment_line('End of translated output.', fill='-', newline=False))557 return558def cmdline(argv = None):559 """ command line processor560 returns namespace via argparse, or throws SystemExit561 if argv is none, get from sys.argv (via argparse)562 """563 from . import _version564 version = _version.get_versions()['version']565 # argument parsing definitions566 parser = argparse.ArgumentParser(prog='abaqus2dyna',567 description='Translate Abaqus to LS-DYNA')568 parser.add_argument('--version',569 action='version',570 version='%(prog)s ' + version)571 parser.add_argument('input',572 metavar='INPUT',573 help='Abaqus keyword file')574 parser.add_argument('-o', '--output',575 dest='output',576 metavar='OUTPUT',577 help='LS-DYNA keyword file output location')578 args = parser.parse_args(argv)579 return args580def convert(fin, fout):581 inp = ParseAbaqus(fin)582 #print(inp.count)583 # get nodes + elements (these will take the longest)584 total_nodel = 0585 for i in inp.instance:586 part = inp.part[inp.instance[i].part]587 total_nodel += len(part.node)588 total_nodel += len(part.element)589 # finally, output dyna keyword590 WriteDynaFromAbaqus(total_nodel, fin.name, inp, fout)591def main():592 args = cmdline()593 with open(args.input) as fin:594 if args.output:595 fout = open(args.output, 'w')596 else:597 fout = sys.stdout598 try:599 return convert(fin, fout)600 finally:601 if args.output:602 fout.close()603if __name__ == '__main__':...

Full Screen

Full Screen

SQLlib.py

Source:SQLlib.py Github

copy

Full Screen

1import copy2import pymysql3class mainSQL(object):4 def __init__(self, host, port, user, password, database, charset):5 '''MySQL核心服务'''6 self.host = host7 self.port = port8 self.user = user9 self.password = password10 self.database = database11 self.charset = charset12 self.db = pymysql.connect(host=self.host,13 port=self.port,14 user=self.user,15 passwd=self.password,16 db=self.database,17 charset=self.charset)18 def find(self, mode, line, table, keyword_line, keyword):19 '''搜索数据20 模式:FULLTEXT(精确匹配) DIM(模糊搜索)21 未找到匹配的值时返回False22 line:匹配后提取的列23 table:查询的表24 keyword_line:需要匹配的列25 keyword:需要匹配的关键字26 输出格式:[结果列表[每列结果]]'''27 fill_data = dict(line=line, table=table,28 keyword_line=keyword_line, keyword=keyword)29 sql = "SELECT {line} FROM {table} WHERE {keyword_line}"30 if mode == "FULLTEXT":31 sql += "'{keyword}'"32 if mode == "DIM":33 sql += "LIKE '%%%%{keyword}%%%%'"34 sql = sql.format(**fill_data)35 self._link_checkout()36 cursor = self.db.cursor()37 try:38 cursor.execute(sql)39 results = cursor.fetchall()40 self.db.commit()41 cursor.close()42 return results43 except:44 return False45 def multires_finds(self, line, table, keyword_line, keyword, compare_mode="="):46 '''搜索数据(多结果版)47 【由一个匹配结果输出多个列数据】48 模式:FULLTEXT(精确匹配)49 未找到匹配的值时返回False50 line:匹配后提取的列【列表元素】51 table:查询的表52 keyword_line:需要匹配的列53 keyword:需要匹配的关键字54 compare_mode:比较模式,支持<,>,=,默认= 55 输出格式:[{列名:结果, 列名:结果...}]'''56 fill_data = dict(table=table, keyword_line=keyword_line,57 compare_mode=compare_mode, keyword=keyword)58 sql = "SELECT "59 index = 160 # 1234 >> ["1","2","3","4"] >> ["1",",","2",",","3",",","4"] >> 1,2,3,461 while index < len(line):62 line.insert(index, ',')63 index += 264 for x in line:65 sql += x66 sql += " FROM {table} WHERE {keyword_line} {compare_mode} "67 if isinstance(keyword, (int, float)):68 sql += "{keyword}"69 else:70 sql += "'{keyword}'"71 sql = sql.format(**fill_data)72 self._link_checkout()73 cursor = self.db.cursor(pymysql.cursors.DictCursor)74 try:75 cursor.execute(sql)76 results = cursor.fetchall()77 self.db.commit()78 cursor.close()79 # return tuple(results[0])80 return list(results)81 except:82 results = []83 [results.append(False) for x in range(len(line))]84 return tuple(results)85 def multi_finds(self, fulltext_mode, line, table, keyword_line, keyword, compare_mode=[]):86 '''搜索数据(多关键字多结果版)87 【支持多关键字】88 【支持多关键行】89 【支持输出单列结果//已阉割】90 【支持输出多个结果】91 {实用版}92 fulltext模式:True(精确匹配) False(模糊搜索)93 compare模式:>,<,=(仅有=时模糊搜索生效,未指定时为默认值=)94 未找到匹配的值时返回False95 line:匹配后提取的列96 table:查询的表97 keyword_line:需要匹配的列【列表元素】98 keyword:需要匹配的关键字【列表元素】99 输出格式:[结果, 结果...]100 注:keyword_line与keyword数目必须一致,匹配模式数目不足默认按FULLTEXT模式填充'''101 sql = ""102 sql = self._sel_part(sql, [line], table)103 # Processing keyword&line104 sql = self._where_part(105 sql, fulltext_mode, keyword_line, keyword, compare_mode)106 self._link_checkout()107 cursor = self.db.cursor()108 try:109 cursor.execute(sql)110 results_raw = cursor.fetchall()111 self.db.commit()112 cursor.close()113 results = []114 [results.extend(list(x)) for x in results_raw]115 return results116 except:117 return False118 # 以上为旧版功能实现函数119 def _sel_part(self, sql_sent, line, table):120 '''分离构造:SELECT FROM 部分,多结果121 内置WHERE,因此必须拼接条件'''122 sql_sent += "SELECT "123 index = 1124 dot = 0125 line_queue = copy.deepcopy(line)126 # 1234 >> ["1","2","3","4"] >> ["1",",","2",",","3",",","4"] >> 1,2,3,4127 while index < len(line) + dot:128 line_queue.insert(index, ',')129 index += 2130 dot += 1131 for x in line_queue:132 sql_sent += x133 sql_sent += " FROM {} WHERE ".format(table)134 return sql_sent135 def _del_part(self, sql_sent, table):136 '''分离构造:DELETE FROM 部分137 内置WHERE'''138 sql_sent += "DELETE FROM {} WHERE ".format(table)139 return sql_sent140 def _ins_part(self, sql_sent, line, value, table):141 '''分离构造:INSERT INTO 部分,多列数据'''142 index = 0143 queue_line = "("144 for i in line:145 queue_line += i146 index += 1147 if index < len(line):148 queue_line += ","149 queue_line += ")"150 # 需要额外进行拼接防止line列变为单字符串而导致引号问题151 if len(value) == 1:152 val = "('{}')".format(tuple(value)[0]) # 此处为了修复仅有单个元素转换时残留一个逗号问题153 else:154 val = tuple(value)155 sql_sent += "INSERT INTO {table_name} {field} VALUES {value}".format(156 table_name=table, field=queue_line, value=str(val))157 return sql_sent158 def _up_part(self, sql_sent, line, value, table):159 '''分离构造:UPDATE SET 部分,多列数据160 内置WHERE,因此必须拼接条件'''161 sql_sent += "UPDATE {table_name} SET ".format(table_name=table)162 index = 0163 for k, v in zip(line, value):164 if isinstance(v, (int, float)):165 sql_sent += "{field}={value}".format(field=k, value=v)166 else:167 sql_sent += "{field}='{value}'".format(field=k, value=v)168 index += 1169 if index < len(line):170 sql_sent += ", "171 sql_sent += " WHERE "172 return sql_sent173 def _where_part(self, sql_sent, fulltext_mode, keyword_line, keyword, compare_mode=[]):174 '''分离构造:WHERE 部分(不会自动加WHERE),多条件175 逻辑判断仅有AND方式176 分离WHERE的目的是为了主调函数在之前追加额外条件(如表拼接)'''177 if len(fulltext_mode) < len(keyword):178 [179 fulltext_mode.append(True)180 for i in range(len(keyword) - len(fulltext_mode))181 ] # Auto fill 'false' when mode_list not enough182 if len(compare_mode) < len(keyword):183 [184 compare_mode.append("=")185 for i in range(len(keyword) - len(compare_mode))186 ] # Auto fill '=' when compare_mode not enough by defult187 index = 0188 for m, n, l, w in zip(fulltext_mode, compare_mode, keyword_line, keyword):189 fill_data = dict(compare_mode=n, keyword_line=l, keyword=w)190 sql_sent += "{keyword_line} "191 if n == "=" and not m:192 sql_sent += "LIKE '%%%%{keyword}%%%%'"193 else:194 sql_sent += " {compare_mode} "195 if isinstance(w, (int, float)):196 sql_sent += "{keyword}"197 else:198 sql_sent += "'{keyword}'"199 index += 1200 if index < len(keyword):201 sql_sent += " AND "202 sql_sent = sql_sent.format(**fill_data)203 return sql_sent204 def _link_checkout(self):205 '''分离构造:自动连接检查机制'''206 try:207 self.db.ping()208 except self.db.OperationalError:209 self.__init__(self.host, self.port, self.user, self.password,210 self.database, self.charset)211 def multi_table_find(self, fulltext_mode, line, table, keyword_line,212 keyword, bind_key, compare_mode=[]):213 '''搜索数据(多关键字多结果版以及表联结专版)214 【支持多关键字】215 【支持多关键行】216 【支持输出多列结果//史诗版继承者】217 【支持输出多个结果】218 【多表拼接专用,目前仅支持联结两个表】219 {专用版}220 fulltext模式:True(精确匹配) False(模糊搜索)221 compare模式:>,<,=(仅有=时模糊搜索生效,为指定时为默认值=)222 未找到匹配的值时返回False223 line:匹配后提取的列【列表元素】224 table:查询的表【两个列表元素】225 keyword_line:需要匹配的列【列表元素】226 keyword:需要匹配的关键字【列表元素】227 bind_key:联结使用的绑定键【两个列表元素】228 输出格式:[{列名:结果, 列名:结果...}]229 注:keyword_line与keyword数目必须一致,匹配模式数目不足默认按FULLTEXT模式填充'''230 sql = ""231 index = 1232 index = 1233 str_table = ""234 while index < len(table): # table235 table.insert(index, ',')236 index += 2237 for x in table:238 str_table += x239 sql = self._sel_part(sql, line, str_table)240 sql += '{}.{} = {}.{} AND '.format(table[0], bind_key[0], table[-1:][0],241 bind_key[1])242 sql = self._where_part(243 sql, fulltext_mode, keyword_line, keyword, compare_mode)244 self._link_checkout()245 cursor = self.db.cursor(pymysql.cursors.DictCursor)246 try:247 cursor.execute(sql)248 results = cursor.fetchall()249 self.db.commit()250 cursor.close()251 return list(results)252 except:253 # print(e)254 return False255 def epic_finds(self, fulltext_mode, line, table, keyword_line, keyword):256 '''搜索数据(多关键字多结果版)257 【支持多关键字】258 【支持多关键行】259 【支持输出多列结果】260 【支持输出多个结果】261 {究极终极版(现在不是了)//完全无阉割}262 模式:True(精确匹配) False(模糊搜索)263 未找到匹配的值时返回False264 line:匹配后提取的列【列表元素】265 table:查询的表266 keyword_line:需要匹配的列【列表元素】267 keyword:需要匹配的关键字【列表元素】268 输出格式:[{列名:结果, 列名:结果...}]269 注:keyword_line与keyword数目必须一致,匹配模式数目不足默认按FULLTEXT模式填充'''270 sql = "SELECT "271 index = 1272 # Processing lines273 # 1234 >> ["1","2","3","4"] >> ["1",",","2",",","3",",","4"] >> 1,2,3,4274 while index < len(line):275 line.insert(index, ',')276 index += 2277 for x in line:278 sql += x279 # Processing table280 sql += ' FROM %s WHERE ' % table281 # Processing keyword&line282 index = 1283 if len(fulltext_mode) < len(keyword):284 [285 fulltext_mode.append(False)286 for i in range(len(keyword) - len(fulltext_mode))287 ] # Auto fill 'false' when mode_list not enough288 for m, l, w in zip(fulltext_mode, keyword_line, keyword):289 if m:290 sql += "%s = '%s'" % (l, w)291 index += 1292 else:293 sql += "%s LIKE '%%%%%s%%%%'" % (l, w)294 index += 1295 if index < len(keyword):296 sql += " AND "297 self._link_checkout()298 cursor = self.db.cursor(pymysql.cursors.DictCursor)299 try:300 cursor.execute(sql)301 results = cursor.fetchall()302 self.db.commit()303 cursor.close()304 # return tuple(results[0])305 return results306 except:307 return False308 def finder_single(self, fulltext_mode, line, table, keyword_line, keyword, compare_mode=[],):309 '''数据库搜索器(单表最终版)310 支持多关键字,关键行,多列结果和多个结果311 {高度兼容版}312 table:查询的表313 【以下变量均使用列表元素】314 fulltext模式:True(精确匹配) False(模糊搜索)315 compare模式:>,<,=(仅有=时模糊搜索生效,未指定时为默认值=)316 line:匹配后提取的列317 keyword_line:需要匹配的列318 keyword:需要匹配的关键字319 输出格式:[{列名:结果, 列名:结果...}] #数组对象320 当hook参数启用时,变为数据库选择器,返回构造语句321 【未找到匹配的值时返回False】322 '''323 sql = ""324 sql = self._sel_part(sql, line, table)325 sql = self._where_part(326 sql, fulltext_mode, keyword_line, keyword, compare_mode)327 self._link_checkout()328 cursor = self.db.cursor(pymysql.cursors.DictCursor)329 try:330 cursor.execute(sql)331 results = cursor.fetchall()332 self.db.commit()333 cursor.close()334 return list(results)335 except:336 return False337 def adder_single(self, fulltext_mode, line, table, value, keyword_line, keyword, compare_mode=[]):338 '''数据库添加/更新器(单表型)339 支持多关键字,关键行,多列结果和多输入数据340 会根据请求的数据存在情况使用插入或者更新操作341 table:操作的表342 【以下变量均使用列表元素】343 fulltext模式:True(精确匹配) False(模糊搜索)344 compare模式:>,<,=(仅有=时模糊搜索生效,未指定时为默认值=)345 line:匹配后操作的列346 value:插入/更新内容347 {列和内容必须严格匹配}348 keyword_line:需要匹配的列349 keyword:需要匹配的关键字350 输出格式:操作成功时,按照实际操作情况为 INS(插入), UP(更新),失败时返回False351 '''352 if not len(line) == len(value):353 return False354 sql = ""355 operation = ""356 if self.finder_single(fulltext_mode, line, table, keyword_line, keyword, compare_mode):357 sql = self._up_part(sql, line, value, table)358 sql = self._where_part(sql, fulltext_mode, keyword_line, keyword)359 operation = "UP"360 else:361 sql = self._ins_part(sql, line, value, table)362 operation = "INS"363 self._link_checkout()364 cursor = self.db.cursor(pymysql.cursors.DictCursor)365 try:366 cursor.execute(sql)367 cursor.fetchall()368 self.db.commit()369 cursor.close()370 return operation371 except:372 self.db.rollback()373 return False374 def delete_single(self, fulltext_mode, table, keyword_line, keyword, compare_mode=[]):375 '''376 删除器377 支持多关键字,关键行378 table:查询的表379 【以下变量均使用列表元素】380 fulltext模式:True(精确匹配) False(模糊搜索)381 compare模式:>,<,=(仅有=时模糊搜索生效,未指定时为默认值=)382 keyword_line:需要匹配的列383 keyword:需要匹配的关键字384 无输出数据库内容385 【根据执行情况输出True和False】386 '''387 sql = ""388 sql = self._del_part(sql, table)389 sql = self._where_part(390 sql, fulltext_mode, keyword_line, keyword, compare_mode)391 self._link_checkout()392 cursor = self.db.cursor(pymysql.cursors.DictCursor)393 try:394 cursor.execute(sql)395 cursor.fetchall()396 self.db.commit()397 cursor.close()398 return True399 except:...

Full Screen

Full Screen

extract_model_params_from_terachem.py

Source:extract_model_params_from_terachem.py Github

copy

Full Screen

1#! /usr/bin/env python2import sys 3import os.path4import numpy as np5import math6import numba7from spec_pkg.constants import constants as const8def get_gs_energy(input_path_gs):9 search_phrase='FINAL ENERGY:'10 searchfile = open(input_path_gs,"r")11 line_count=012 keyword_line=99999999913 for line in searchfile:14 if search_phrase in line and keyword_line==999999999:15 keyword_line=line_count16 line_count=line_count+117 # if we haven't found reference geometry, break18 if keyword_line==999999999:19 sys.exit('Error: Could not find final ground state energy in Terachem file: '+input_path)20 linefile=open(input_path_gs,"r")21 lines=linefile.readlines()22 energy_line=lines[keyword_line].split()23 return float(energy_line[2])24# root determines which root is the root of interest. by default this is 125def get_ex_energy_dipole_mom(input_path_ex,root):26 search_phrase='Final Excited State Results:'27 searchfile = open(input_path_ex,"r")28 line_count=029 keyword_line=99999999930 for line in searchfile:31 if search_phrase in line and keyword_line==999999999:32 keyword_line=line_count33 line_count=line_count+134 if keyword_line==999999999:35 sys.exit('Error: Could not find excited state energy and dipole moment in Terachem file: '+input_path)36 searchfile.close()37 linefile=open(input_path_ex,"r")38 lines=linefile.readlines()39 energy_line=lines[keyword_line+3+root].split()40 energy=float(energy_line[1])41 ex_energy=float(energy_line[2])/const.Ha_to_eV # convert to Ha 42 searchfile = open(input_path_ex,"r")43 search_phrase='Transition dipole moments:'44 line_count=045 keyword_line=99999999946 for line in searchfile:47 if search_phrase in line and keyword_line==999999999:48 keyword_line=line_count49 line_count=line_count+150 if keyword_line==999999999:51 sys.exit('Error: Could not find excited state energy and dipole moment in Terachem file: '+input_path)52 searchfile.close()53 linefile=open(input_path_ex,"r")54 lines=linefile.readlines() 55 dipole_line=lines[keyword_line+3+root].split()56 dipole_mom=np.zeros(3)57 dipole_mom[0]=float(dipole_line[1])58 dipole_mom[1]=float(dipole_line[2])59 dipole_mom[2]=float(dipole_line[3])60 return energy,dipole_mom61def get_e_adiabatic_dipole(input_path_gs,input_path_ex,root):62 ex_energy,dipole_mom=get_ex_energy_dipole_mom(input_path_ex,root)63 gs_energy=get_gs_energy(input_path_gs)64 return dipole_mom,ex_energy-gs_energy65# Test this66def get_specific_dipole(input_path,displacement,root,ref_dipole):67 search_phrase1='*** Displacement '+str(displacement)+' ***'68 search_phrase2='Transition dipole moments:'69 dipole_vec=np.zeros(3)70 searchfile = open(input_path,"r")71 line_count=072 keyword_line=99999999973 for line in searchfile:74 if search_phrase1 in line and keyword_line==999999999:75 keyword_line=line_count76 line_count=line_count+177 total_line_number=line_count-178 linefile=open(input_path,"r")79 lines=linefile.readlines()80 # if we haven't found specific displacement, break81 if keyword_line==999999999:82 sys.exit('Error: Could not find transition dipole moment for Displacement '+str(displacement))83 line_count=keyword_line84 keyword_line2=99999999985 while line_count<total_line_number:86 if search_phrase2 in lines[line_count] and keyword_line2==999999999:87 keyword_line2=line_count88 line_count=line_count+189 # if we havent found the gradient following the displacement, break90 if keyword_line2==999999999:91 sys.exit('Error: Could not find transition dipole moment for Displacement '+str(displacement))92 line_start=keyword_line2+3+root93 current_line=lines[line_start].split()94 95 dipole_vec[0]=float(current_line[1])96 dipole_vec[1]=float(current_line[2])97 dipole_vec[2]=float(current_line[3])98 # Check sign relative to reference dipole moment99 if np.dot(dipole_vec,ref_dipole)/(np.dot(dipole_vec,dipole_vec)*np.dot(ref_dipole,ref_dipole))<-0.5:100 dipole_vec=-1.0*dipole_vec101 return dipole_vec102# Works103def get_specific_grad(input_path,displacement,num_atoms):104 search_phrase1='*** Displacement '+str(displacement)+' ***'105 search_phrase2='dE/dX dE/dY dE/dZ'106 grad_vec=np.zeros(num_atoms*3)107 searchfile = open(input_path,"r")108 line_count=0109 keyword_line=999999999110 for line in searchfile:111 if search_phrase1 in line and keyword_line==999999999:112 keyword_line=line_count113 line_count=line_count+1114 total_line_number=line_count-1115 linefile=open(input_path,"r")116 lines=linefile.readlines()117 # if we haven't found specific displacement, break118 if keyword_line==999999999:119 sys.exit('Error: Could not find gradient for Displacement '+str(displacement))120 line_count=keyword_line121 keyword_line2=999999999122 while line_count<total_line_number:123 if search_phrase2 in lines[line_count] and keyword_line2==999999999:124 keyword_line2=line_count125 line_count=line_count+1126 # if we havent found the gradient following the displacement, break127 if keyword_line2==999999999:128 sys.exit('Error: Could not find gradient for Displacement '+str(displacement))129 line_start=keyword_line2+1130 # loop over number of atoms to extract the gradient vec131 atom_count=0132 while atom_count<num_atoms:133 current_line=lines[line_start+atom_count].split()134 dx=float(current_line[0])135 dy=float(current_line[1])136 dz=float(current_line[2])137 grad_vec[atom_count*3]=dx138 grad_vec[atom_count*3+1]=dy139 grad_vec[atom_count*3+2]=dz140 atom_count=atom_count+1141 return grad_vec 142def get_dipole_deriv_from_terachem(input_path,frozen_atom_list,num_frozen_atoms,root):143 # ref dipole: 144 energy,ref_dipole=get_ex_energy_dipole_mom(input_path,root)145 num_atoms=frozen_atom_list.shape[0]146 dipole_deriv=np.zeros((3,num_atoms*3))147 displacement=0.0148 # find displacement:149 searchphrase='Using displacements of '150 searchfile = open(input_path,"r")151 line_count=0152 keyword_line=999999999153 for line in searchfile:154 if searchphrase in line and keyword_line==999999999:155 keyword_line=line_count156 line_count=line_count+1157 searchfile.close()158 if keyword_line < 9999999:159 linefile=open(input_path,"r")160 lines=linefile.readlines()161 keyword=lines[keyword_line].split()162 displacement=float(keyword[3])163 else:164 sys.exit('Error: Could not find displacement used in numerical Hessian calculation in Terachem file') 165 print('FD parameter: 2*displacement')166 print(displacement*2)167 # Now construct the dipole_derivative. First displacement is the (x+delta x) displacement, then the (x-delta x) displacement 168 unfrozen_atoms=num_atoms-num_frozen_atoms169 unfrozen_count=0170 total_atom_count=0171 while total_atom_count<num_atoms:172 # check whether this is a frozen or an unfrozen atom:173 if frozen_atom_list[total_atom_count]==0: # unfrozen174 # loop over xyz coordinates:175 xyz_count=0176 while xyz_count<3:177 print('Getting dipole ', unfrozen_count, ' of ',3*unfrozen_atoms)178 dipole1=get_specific_dipole(input_path,unfrozen_count*2+1,root,ref_dipole)179 dipole2=get_specific_dipole(input_path,unfrozen_count*2+2,root,ref_dipole)180 print(dipole1)181 print(dipole2)182 print(dipole1-dipole2)183 # build the effective row of the Hessian through the finite difference scheme. 184 fd_dipole=(dipole1-dipole2)/(2.0*displacement)185 # this seems to be the correct approach. This way, off diagonal Hessian elements are the average of the numerical and the 186 # analytical force constants187 dipole_deriv[:,total_atom_count*3+xyz_count]=fd_dipole[:]188 print(fd_dipole)189 unfrozen_count=unfrozen_count+1190 xyz_count=xyz_count+1191 total_atom_count=total_atom_count+1192 # no need to symmetrize like in the Hessian. Just return193 print('Full Dipole derivative. Units should be in a.u')194 print(dipole_deriv)195 return dipole_deriv196#Works197def get_hessian_from_terachem(input_path,frozen_atom_list,num_frozen_atoms):198 num_atoms=frozen_atom_list.shape[0] # frozen atom list is a list Natoms long, where frozen atoms199 # are labelled by 1 and unfrozen atoms labelled by 0200 hessian=np.zeros((num_atoms*3,num_atoms*3))201 displacement=0.0202 # find displacement:203 searchphrase='Using displacements of '204 searchfile = open(input_path,"r")205 line_count=0206 keyword_line=999999999207 for line in searchfile:208 if searchphrase in line and keyword_line==999999999:209 keyword_line=line_count210 line_count=line_count+1211 searchfile.close() 212 if keyword_line < 9999999:213 linefile=open(input_path,"r")214 lines=linefile.readlines()215 keyword=lines[keyword_line].split()216 displacement=float(keyword[3])217 else:218 sys.exit('Error: Could not find displacement used in numerical Hessian calculation in Terachem file')219 # Now construct the Hessian. First displacement is the (x+delta x) displacement, then the (x-delta x) displacement 220 unfrozen_atoms=num_atoms-num_frozen_atoms221 unfrozen_count=0222 total_atom_count=0223 while total_atom_count<num_atoms: 224 # check whether this is a frozen or an unfrozen atom:225 if frozen_atom_list[total_atom_count]==0: # unfrozen226 # loop over xyz coordinates:227 xyz_count=0228 while xyz_count<3:229 print('Getting gradient ', unfrozen_count, ' of ',3*unfrozen_atoms)230 grad1=get_specific_grad(input_path,unfrozen_count*2+1,num_atoms)231 grad2=get_specific_grad(input_path,unfrozen_count*2+2,num_atoms)232 # build the effective row of the Hessian through the finite difference scheme. 233 fd_grad=(grad1-grad2)/(2.0*displacement)234 235 # this seems to be the correct approach. This way, off diagonal Hessian elements are the average of the numerical and the 236 # analytical force constants237 hessian[total_atom_count*3+xyz_count,:]=fd_grad[:]238 239 unfrozen_count=unfrozen_count+1240 xyz_count=xyz_count+1241 else: # frozen atom242 # loop over xyz coordinates:243 xyz_count=0244 while xyz_count<3:245 # set block diagonal elements corresponding to purely frozen atoms to 1246 hessian[total_atom_count*3+xyz_count,total_atom_count*3+xyz_count]=1.0247 xyz_count=xyz_count+1248 total_atom_count=total_atom_count+1249 # currently the Hessian has only one off diagonal block if there are frozen atoms. Need to fix this250 i_count=0251 while i_count<num_atoms:252 j_count=i_count253 while j_count<num_atoms:254 if frozen_atom_list[i_count]==0 and frozen_atom_list[j_count]==1: # icount refers to unfrozen and jcount refers to frozen atom255 # make sure the hessian element with the frozen atom as its first index is set.256 for xyz1 in range(3):257 for xyz2 in range(3):258 hessian[j_count*3+xyz2,i_count*3+xyz1]=hessian[i_count*3+xyz1,j_count*3+xyz2]259 j_count=j_count+1260 i_count=i_count+1 261 # symmetrize hessian h_sym=0.5*(H+H.T)262 sym_hessian=0.5*(hessian+np.transpose(hessian))263 return sym_hessian264# extract both list of atomic masses and reference geometry from terachem265# Works266def get_masses_geom_from_terachem(input_path, num_atoms):267 geom=np.zeros((num_atoms,3))268 masses=np.zeros(num_atoms)269 search_phrase='*** Reference Geometry ***'270 searchfile = open(input_path,"r")271 line_count=0272 keyword_line=999999999273 for line in searchfile:274 if search_phrase in line and keyword_line==999999999:275 keyword_line=line_count276 line_count=line_count+1277 linefile=open(input_path,"r")278 lines=linefile.readlines()279 # if we haven't found reference geometry, break280 if keyword_line==999999999:281 sys.exit('Error: Could not find reference geometry in terachem file '+input_path)282 line_start=keyword_line+2283 atom_count=0284 while atom_count<num_atoms:285 current_line=lines[line_start+atom_count].split()286 geom[atom_count,0]=float(current_line[1])287 geom[atom_count,1]=float(current_line[2])288 geom[atom_count,2]=float(current_line[3])289 elem=current_line[0] # match element with mass number then store in masses array290 if elem=='H':291 masses[atom_count]=const.Mass_list[0]292 elif elem=='He':293 masses[atom_count]=const.Mass_list[1]294 elif elem=='Li':295 masses[atom_count]=const.Mass_list[2]296 elif elem=='Be':297 masses[atom_count]=const.Mass_list[3]298 elif elem=='B':299 masses[atom_count]=const.Mass_list[4]300 elif elem=='C':301 masses[atom_count]=const.Mass_list[5]302 elif elem=='N':303 masses[atom_count]=const.Mass_list[6]304 elif elem=='O':305 masses[atom_count]=const.Mass_list[7]306 elif elem=='F':307 masses[atom_count]=const.Mass_list[8]308 elif elem=='Ne':309 masses[atom_count]=const.Mass_list[9]310 elif elem=='Na':311 masses[atom_count]=const.Mass_list[10]312 elif elem=='Mg':313 masses[atom_count]=const.Mass_list[11] 314 elif elem=='Al':315 masses[atom_count]=const.Mass_list[12]316 elif elem=='Si':317 masses[atom_count]=const.Mass_list[13]318 elif elem=='P':319 masses[atom_count]=const.Mass_list[14]320 elif elem=='S':321 masses[atom_count]=const.Mass_list[15]322 elif elem=='Cl':323 masses[atom_count]=const.Mass_list[16]324 elif elem=='Ar':325 masses[atom_count]=const.Mass_list[17]326 else:327 sys.exit('Error: Could not find atomic mass for element '+elem)328 329 atom_count=atom_count+1...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run SeleniumLibrary automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful