How to use path_matches_pattern method in localstack

Best Python code snippet using localstack_python

runner.py

Source:runner.py Github

copy

Full Screen

...94 pattern = self.filename_pattern()95 matched_records = [96 record97 for record in records_for_source98 if path_matches_pattern(record["imported_file"], pattern)99 ]100 sorted_records = sorted(101 matched_records, key=lambda record: record["imported_at"]102 )103 return [record["imported_file"] for record in sorted_records]104 def input_paths(self):105 """Return list of of paths to input files for task."""106 paths = glob.glob("%s/*/*" % self.source.data_dir)107 return sorted(108 path109 for path in paths110 if path_matches_pattern(path, self.filename_pattern())111 )112 def set_last_imported_path(self, path):113 """Set the path of the most recently imported data for this source."""114 now = datetime.datetime.now().replace(microsecond=0).isoformat()115 records = load_import_records()116 records[self.source.name].append({"imported_file": path, "imported_at": now})117 dump_import_records(records)118 def unimported_paths(self):119 """Return list of of paths to input files for task that have not been120 imported."""121 imported_paths = [record for record in self.imported_paths()]122 return [path for path in self.input_paths() if path not in imported_paths]123class ManualFetchTask(Task):124 def run(self, year, month):125 print("Running manual fetch task {}".format(self.name))126 instructions = self.manual_fetch_instructions()127 print(instructions)128 paths_before = find_files(self.source.data_dir)129 input("Press return when done, or to skip this step")130 paths_after = find_files(self.source.data_dir)131 new_paths = [path for path in paths_after if path not in paths_before]132 if new_paths:133 print("The following files have been manually fetched:")134 for path in new_paths:135 print(" * {}".format(path))136 else:137 print("No new files were found at {}".format(self.source.data_dir))138 input(139 "Press return to confirm, or Ctrl+C to cancel " "and resolve any problems"140 )141 def manual_fetch_instructions(self):142 source = self.source143 expected_location = os.path.join(144 settings.PIPELINE_DATA_BASEDIR, source.name, "YYYY_MM"145 )146 output = []147 output.append("~" * 80)148 output.append(149 "You should now locate the latest data for %s, if "150 "available" % source.name151 )152 output.append("You should save it at:")153 output.append(" %s" % expected_location)154 if source.index_url:155 output.append("Where to look:")156 output.append(" %s" % source.index_url)157 if source.urls:158 output.append("Previous data has been found at:")159 for k, v in source.urls.items():160 output.append(" %s: %s" % (k, v))161 if source.publication_schedule:162 output.append("Publication frequency:")163 output.append(" %s" % source.publication_schedule)164 if source.notes:165 output.append("Notes:")166 for line in textwrap.wrap(source.notes):167 output.append(" %s" % line)168 output.append("The last imported data can be found at:")169 for task in source.tasks_that_use_raw_source_data():170 paths = task.imported_paths()171 if paths:172 path = paths[-1]173 else:174 path = "<never imported>"175 output.append(" %s" % path)176 return "\n".join(output)177class AutoFetchTask(Task):178 def run(self, year, month):179 print("Running auto fetch task {}".format(self.name))180 command = self.command.format(year=year, month=month)181 tokens = shlex.split(command)182 call_command(*tokens)183class ConvertTask(Task):184 def run(self, year, month):185 # For now, year and month are ignored186 print("Running convert task {}".format(self.name))187 unimported_paths = self.unimported_paths()188 for path in unimported_paths:189 command = self.command.replace(self.filename_pattern(), path)190 tokens = shlex.split(command)191 call_command(*tokens)192 self.set_last_imported_path(path)193class ImportTask(Task):194 def run(self, year, month):195 # For now, year and month are ignored196 print("Running import task {}".format(self.name))197 unimported_paths = self.unimported_paths()198 for path in unimported_paths:199 command = self.command.replace(self.filename_pattern(), path)200 tokens = shlex.split(command)201 call_command(*tokens)202 self.set_last_imported_path(path)203class PostProcessTask(Task):204 def run(self, year, month, last_imported):205 # For now, year and month are ignored206 command = self.command.format(last_imported=last_imported)207 tokens = shlex.split(command)208 call_command(*tokens)209class TaskCollection(object):210 task_type_to_cls = {211 "manual_fetch": ManualFetchTask,212 "manual_fetch_skip_for_now": ManualFetchTask,213 "auto_fetch": AutoFetchTask,214 "convert": ConvertTask,215 "import": ImportTask,216 "post_process": PostProcessTask,217 }218 def __init__(self, task_data=None, ordered=False, task_type=None):219 self._tasks = {}220 if isinstance(task_data, dict):221 for name, attrs in task_data.items():222 cls = self.task_type_to_cls[attrs["type"]]223 task = cls(name, attrs)224 self.add(task)225 elif isinstance(task_data, list):226 for task in task_data:227 self.add(task)228 self._ordered = ordered229 self._type = task_type230 def add(self, task):231 self._tasks[task.name] = task232 def __getitem__(self, name):233 return self._tasks[name]234 def __iter__(self):235 if self._ordered:236 graph = nx.DiGraph()237 for task in self._tasks.values():238 graph.add_node(task)239 for dependency in task.dependencies:240 graph.add_node(dependency)241 graph.add_edge(dependency, task)242 tasks = nx.topological_sort(graph)243 else:244 tasks = [task for _, task in sorted(self._tasks.items())]245 for task in tasks:246 if self._type is None:247 yield task248 else:249 if self._type == task.task_type:250 yield task251 def __bool__(self):252 if self._type:253 return any(task for task in self if task.task_type == self._type)254 else:255 return bool(self._tasks)256 def by_type(self, task_type):257 return TaskCollection(list(self), ordered=self._ordered, task_type=task_type)258 def ordered(self):259 return TaskCollection(list(self), ordered=True, task_type=self._type)260def load_tasks():261 metadata_path = settings.PIPELINE_METADATA_DIR262 with open(os.path.join(metadata_path, "sources.json")) as f:263 source_data = json.load(f)264 sources = SourceCollection(source_data)265 with open(os.path.join(metadata_path, "tasks.json")) as f:266 task_data = json.load(f)267 tasks = TaskCollection(task_data)268 for task in tasks:269 if task.source_id is None:270 task.set_source(None)271 else:272 source = sources[task.source_id]273 task.set_source(source)274 source.add_task(task)275 task.resolve_dependencies(tasks)276 return tasks277def load_import_records():278 with open(settings.PIPELINE_IMPORT_LOG_PATH) as f:279 log_data = json.load(f)280 return defaultdict(list, log_data)281def dump_import_records(records):282 with open(settings.PIPELINE_IMPORT_LOG_PATH, "w") as f:283 json.dump(records, f, indent=2, separators=(",", ": "))284def upload_all_to_storage(tasks):285 for task in tasks.by_type("convert"):286 upload_task_input_files(task)287 for task in tasks.by_type("import"):288 upload_task_input_files(task)289def upload_task_input_files(task):290 storage_client = StorageClient()291 bucket = storage_client.get_bucket()292 for path in task.input_paths():293 assert path[0] == "/"294 assert settings.PIPELINE_DATA_BASEDIR[-1] == "/"295 name = "hscic" + path.replace(settings.PIPELINE_DATA_BASEDIR, "/")296 blob = bucket.blob(name)297 if blob.exists():298 print("Skipping %s, already uploaded" % name)299 continue300 print("Uploading %s to %s" % (path, name))301 with open(path, "rb") as f:302 blob.upload_from_file(f)303def path_matches_pattern(path, pattern):304 return fnmatch.fnmatch(os.path.basename(path), pattern)305def call_command(*args):306 print("call_command {}".format(args))307 return django_call_command(*args)308def run_task(task, year, month, **kwargs):309 if TaskLog.objects.filter(310 year=year, month=month, task_name=task.name, status=TaskLog.SUCCESSFUL311 ).exists():312 # This task has already been run successfully313 return314 task_log = TaskLog.objects.create(year=year, month=month, task_name=task.name)315 try:316 task.run(year, month, **kwargs)317 task_log.mark_succeeded()...

Full Screen

Full Screen

helpers.py

Source:helpers.py Github

copy

Full Screen

...185 # check if we have an exact match186 for match in matches:187 if match[0] == path:188 return match189 if path_matches_pattern(path, match[0]):190 return match191 raise Exception('Ambiguous API path %s - matches found: %s' % (path, matches))192 return matches[0]193def path_matches_pattern(path, api_path):194 api_paths = api_path.split('/')195 paths = path.split('/')196 reg_check = re.compile(r'\{(.*)\}')197 results = []198 if len(api_paths) != len(paths):199 return False200 for indx, part in enumerate(api_paths):201 if reg_check.match(part) is None and part:202 results.append(part == paths[indx])203 return len(results) > 0 and all(results)204def connect_api_gateway_to_sqs(gateway_name, stage_name, queue_arn, path, region_name=None):205 resources = {}206 template = APIGATEWAY_SQS_DATA_INBOUND_TEMPLATE207 resource_path = path.replace('/', '')...

Full Screen

Full Screen

tools.py

Source:tools.py Github

copy

Full Screen

...157TRAILING_DOUBLE_STAR_RE = re.compile(r'\/\.\*\.\*$')158TRAILING_DOUBLE_STAR_REPL = r'/.+'159# no / matches basename160NO_SLASH_RE = r'(.*/)?'161def path_matches_pattern(path: str, pattern: str) -> bool:162 """Return True if path matches the LFS filter pattern.163 See gitignore and gitattributes documentation for more on this.164 """165 # gitignore documentation to the contrary notwithstanding, a pattern ending166 # with '/' matches nothing when used for gitattributes.167 if pattern.endswith('/'): # directory pattern168 return False169 # Git documentation quoted here:170 #171 # Git treats the pattern as a shell glob suitable for consumption by172 # fnmatch(3) with the FNM_PATHNAME flag: wildcards in the pattern will not173 # match a / in the pathname.174 #175 # However, Python's fnmatch does not support FNM_PATHNAME, so use fnmatch to...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run localstack automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful