Best Python code snippet using localstack_python
sam-translate.py
Source:sam-translate.py  
...65        s3_bucket66    ]67    execute_command('package', args)68    return package_output_template_file69def transform_template(input_file_path, output_file_path):70    with open(input_file_path, 'r') as f:71        sam_template = yaml_parse(f)72    try:73        cloud_formation_template = transform(74            sam_template, {}, ManagedPolicyLoader(iam_client))75        cloud_formation_template_prettified = json.dumps(76            cloud_formation_template, indent=2)77        with open(output_file_path, 'w') as f:78            f.write(cloud_formation_template_prettified)79        print('Wrote transformed CloudFormation template to: ' + output_file_path)80    except InvalidDocumentException as e:81        errorMessage = reduce(lambda message, error: message + ' ' + error.message, e.causes, e.message)82        LOG.error(errorMessage)83        errors = map(lambda cause: cause.message, e.causes)84        LOG.error(errors)85def deploy(template_file):86    capabilities = cli_options.get('--capabilities')87    stack_name = cli_options.get('--stack-name')88    args = [89        '--template-file',90        template_file,91        '--capabilities',92        capabilities,93        '--stack-name',94        stack_name95    ]96    execute_command('deploy', args)97    return package_output_template_file98if __name__ == '__main__':99    input_file_path, output_file_path = get_input_output_file_paths()100    if cli_options.get('package'):101        package_output_template_file = package(input_file_path, output_file_path)102        transform_template(package_output_template_file, output_file_path)103    elif cli_options.get('deploy'):104        package_output_template_file = package(input_file_path, output_file_path)105        transform_template(package_output_template_file, output_file_path)106        deploy(output_file_path)107    else:...helpers.py
Source:helpers.py  
...34            annots = [annot_file_template.format(s) for s in train_split.split('-')]35        else:36            annots = annot_file_template.format(train_split)37        trainset = Dataset(imgs_path, annots,38                           transform=transform_template(mode='train'))39        valset = MiniImagenetMeta(imgs_path, annot_file_template.format('val'),40                                  args['data.test_shot'], args['data.test_query'],41                                  args['data.test_way'], args['test.size'],42                                  transform=transform_template(mode='test'))43    else:44        path = os.path.join(root, 'lmdb', train_split)45        trainset = LMDBDataset(path, transform_template(mode='train'))46        valset = LMDBMetaDataset(os.path.join(root, 'lmdb', 'val'),47                                 args['data.test_shot'], args['data.test_query'],48                                 args['data.test_way'], args['test.size'],49                                 transform=transform_template(mode='test'))50    if args.get('ens.robust_matching', False):51        from data.data_utils import BatchIdxSampler52        batch_sampler = BatchIdxSampler(trainset, batch_size=batch_size,53                                        n_copy=args['ens.num_heads'])54        train_sampler = DataLoader(trainset, batch_sampler=batch_sampler,55                                   num_workers=num_workers)56    else:57        train_sampler = DataLoader(trainset, batch_size=batch_size,58                                   shuffle=True, num_workers=num_workers)59    val_sampler = DataLoader(valset, batch_size=1,60                             num_workers=args['data.num_workers'])61    return trainset, valset, train_sampler, val_sampler62def get_test_sampler(args):63    num_workers = args['data.num_workers']64    root, transform_template = init_dataset(args)65    imgs_path = os.path.join(root, 'images')66    annot_file_template = os.path.join(PROJECT_ROOT, 'splits',67                                       args['data.dataset'], '{}.csv')68    support_mode = 'train' if args['test.augment_support'] else 'test'69    support_transform = transform_template(mode=support_mode)70    query_mode = 'train' if args['test.augment_query'] else 'test'71    query_transform = transform_template(mode=query_mode)72    if not args['data.lmdb']:73        test_annot = annot_file_template.format(args['test.set'])74        testset = MiniImagenetMeta(imgs_path, test_annot, args['data.test_shot'],75                                   args['data.test_query'], args['data.test_way'],76                                   n_copy=args['test.n_copy'], length=args['test.size'],77                                   transform=support_transform,78                                   test_transform=query_transform)79    else:80        testset = LMDBMetaDataset(os.path.join(root, 'lmdb', args['test.set']),81                                  args['data.test_shot'], args['data.test_query'],82                                  args['data.test_way'], args['test.size'],83                                  transform=support_transform)84    test_sampler = DataLoader(testset, batch_size=1, num_workers=num_workers)85    return testset, test_samplertest_record_selector.py
Source:test_record_selector.py  
1#2# Copyright (c) 2022 Airbyte, Inc., all rights reserved.3#4import json5import pytest6import requests7from airbyte_cdk.sources.declarative.decoders.json_decoder import JsonDecoder8from airbyte_cdk.sources.declarative.extractors.jello import JelloExtractor9from airbyte_cdk.sources.declarative.extractors.record_filter import RecordFilter10from airbyte_cdk.sources.declarative.extractors.record_selector import RecordSelector11@pytest.mark.parametrize(12    "test_name, transform_template, filter_template, body, expected_records",13    [14        (15            "test_with_extractor_and_filter",16            "_.data",17            "{{ record['created_at'] > stream_state['created_at'] }}",18            {"data": [{"id": 1, "created_at": "06-06-21"}, {"id": 2, "created_at": "06-07-21"}, {"id": 3, "created_at": "06-08-21"}]},19            [{"id": 2, "created_at": "06-07-21"}, {"id": 3, "created_at": "06-08-21"}],20        ),21        (22            "test_no_record_filter_returns_all_records",23            "_.data",24            None,25            {"data": [{"id": 1, "created_at": "06-06-21"}, {"id": 2, "created_at": "06-07-21"}]},26            [{"id": 1, "created_at": "06-06-21"}, {"id": 2, "created_at": "06-07-21"}],27        ),28    ],29)30def test_record_filter(test_name, transform_template, filter_template, body, expected_records):31    config = {"response_override": "stop_if_you_see_me"}32    stream_state = {"created_at": "06-06-21"}33    stream_slice = {"last_seen": "06-10-21"}34    next_page_token = {"last_seen_id": 14}35    response = create_response(body)36    decoder = JsonDecoder()37    extractor = JelloExtractor(transform=transform_template, decoder=decoder, config=config)38    if filter_template is None:39        record_filter = None40    else:41        record_filter = RecordFilter(config=config, condition=filter_template)42    record_selector = RecordSelector(extractor=extractor, record_filter=record_filter)43    actual_records = record_selector.select_records(44        response=response, stream_state=stream_state, stream_slice=stream_slice, next_page_token=next_page_token45    )46    assert actual_records == expected_records47def create_response(body):48    response = requests.Response()49    response._content = json.dumps(body).encode("utf-8")...Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.
You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.
Get 100 minutes of automation test minutes FREE!!
