Best Python code snippet using autotest_python
file_mixin.py
Source:file_mixin.py  
...25            return None26    def _add_readable(self, file):27        tempname = tempfile.mktemp()28        with open(tempname, 'wb') as to_file:29            hash = self.get_hash_from_file(file, to_file.write)30        path = self._content_path_for_hash(hash)31        try: os.rename(tempname, path)32        except FileExistsError: os.remove(tempname)33        return hash34            35    def _add_files(self, file):36        if file.name is None:37            return self._add_readable(file)38        hash = get_hash_from_file(file)39        try: os.rename(to_file.name, self._content_path_for_hash(hash))40        except FileExistsError: os.remove(to_file.name)41    def get_hash_from_file(self, file, write = None):42        """=> the hash of a files content43the file must support read()"""44        hash = hashing.algorithm()45        while 1:46            try: data = file.read(1024)47            except EOFError: break48            hash.update(data)49            if write: write(data)50            if len(data) < 1024:51                break52        return hash.hexdigest()53    def _hashes(self):54        exists = lambda path: os.path.exists(os.path.join(path, self.CONTENT))55        return self._hash_directory.list(exists)...hash_comparison.py
Source:hash_comparison.py  
...9    if os.path.isfile(path):10        # Comparse Hash to provided one11        if is_verbose:12            print(f'*** Hashing file {path} into {hash_algo}')13        ret_hash = get_hash_from_file(path, hash_algo)14    if os.path.isdir(path):15        # call get_all_file_Hash16        if is_verbose:17            print(f'*** Hashing dir {path} into {hash_algo}')18        ret_hash = get_hash_from_dir(path, hash_algo)19    return ret_hash20        21def get_hash_from_dir(dir_path, hash_algo) -> dict:22    """Returns list of hashes for all files in directory"""23    if not os.path.isdir(dir_path):24        print(f"[-] Cannot find directory at {dir_path}")25        return26    hashes = {}27    for file in os.listdir(dir_path):28        hashes[file] = get_hash_from_file(os.path.join(dir_path, file), hash_algo)29    return hashes30def get_hash_from_file(file_path, hash_algo) -> str:31    """Returns hash of file at filePath at hashAlgo"""32    if not os.path.isfile(file_path):33        print(f"[-] Cannot open file at {file_path}")34        return35    if hash_algo in hashlib.algorithms_available:36        file_hash = hashlib.new(hash_algo) # Create the hash object, can use something other than `.sha256()` if you wish37    else:38        print(f'[-] Hash algo "{hash_algo}" does not exist')39        return40    with open(file_path, 'rb') as f: # Open the file to read it's bytes41        fb = f.read(BLOCK_SIZE) # Read from the file. Take in the amount declared above42        while len(fb) > 0: # While there is still data being read from the file43            file_hash.update(fb) # Update the hash44            fb = f.read(BLOCK_SIZE) # Read the next block from the file...compare_condensed_with_full.py
Source:compare_condensed_with_full.py  
...28        len(input_data_full), FULL_URL_LIST29    )30)31# Generating new dataframe32def get_hash_from_file(filename):33    sha1 = hashlib.sha1()34    with open(filename, "r") as f:35        data = f.read()36        sha1.update(data.encode("utf-8"))37    return sha1.hexdigest()38output_success = []39output_fails = []40counter_success = 041counter_failed = 042# Iterate over all of the cleansed dataset43for filename in input_data_cleaned:44    if counter_success % 5000 == 0:45        print("{}/{}".format(counter_success, len(input_data_cleaned)))46    # Get source url47    raw_filename = filename.split("/")[-1]48    # Try to get the file hash, only works with utf-849    try:50        file_hash = get_hash_from_file(filename)51    except UnicodeDecodeError as e:52        print("Bad type:\n{}".format(e))53        counter_failed += 154        output_fails.append(raw_filename)55        continue56    # Create an entry for the parent and append it57    parent_dict = {58        "parent_filename": raw_filename,59        "filename": raw_filename,60        "hash": get_hash_from_file(filename),61    }62    output_success.append(parent_dict)63    # Now search for all entries in the complete crawl with the same base url64    search = raw_filename.split(".txt")[0]65    for key in input_data_full:66        if key.startswith(search) and key != raw_filename:67            child_dict = {68                "parent_filename": raw_filename,69                "filename": key,70                "hash": input_data_full[key],71            }72            output_success.append(child_dict)73    counter_success += 174# Pickle output...Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!
