How to use mappings method in localstack

Best Python code snippet using localstack_python

elasticsearch.py

Source:elasticsearch.py Github

copy

Full Screen

...79 basic_auth_password = self.configuration.get("basic_auth_password", None)80 self.auth = None81 if basic_auth_user and basic_auth_password:82 self.auth = HTTPBasicAuth(basic_auth_user, basic_auth_password)83 def _get_mappings(self, url):84 mappings = {}85 error = None86 try:87 r = requests.get(url, auth=self.auth)88 r.raise_for_status()89 mappings = r.json()90 except requests.HTTPError as e:91 logger.exception(e)92 error = "Failed to execute query. Return Code: {0} Reason: {1}".format(r.status_code, r.text)93 mappings = None94 except requests.exceptions.RequestException as e:95 logger.exception(e)96 error = "Connection refused"97 mappings = None98 return mappings, error99 def _get_query_mappings(self, url):100 mappings_data, error = self._get_mappings(url)101 if error:102 return mappings_data, error103 mappings = {}104 for index_name in mappings_data:105 index_mappings = mappings_data[index_name]106 for m in index_mappings.get("mappings", {}):107 if "properties" not in index_mappings["mappings"][m]:108 continue109 for property_name in index_mappings["mappings"][m]["properties"]:110 property_data = index_mappings["mappings"][m]["properties"][property_name]111 if property_name not in mappings:112 property_type = property_data.get("type", None)113 if property_type:114 if property_type in ELASTICSEARCH_TYPES_MAPPING:115 mappings[property_name] = ELASTICSEARCH_TYPES_MAPPING[property_type]116 else:117 mappings[property_name] = TYPE_STRING118 #raise Exception("Unknown property type: {0}".format(property_type))119 return mappings, error120 def get_schema(self, *args, **kwargs):121 def parse_doc(doc, path=None):122 '''Recursively parse a doc type dictionary123 '''124 path = path or []125 result = []126 for field, description in doc['properties'].items():127 if 'properties' in description:128 result.extend(parse_doc(description, path + [field]))129 else:130 result.append('.'.join(path + [field]))131 return result132 schema = {}133 url = "{0}/_mappings".format(self.server_url)134 mappings, error = self._get_mappings(url)135 if mappings:136 # make a schema for each index137 # the index contains a mappings dict with documents138 # in a hierarchical format139 for name, index in mappings.items():140 columns = []141 schema[name] = {'name': name}142 for doc, items in index['mappings'].items():143 columns.extend(parse_doc(items))144 # remove duplicates145 # sort alphabetically146 schema[name]['columns'] = sorted(set(columns))147 return schema.values()148 def _parse_results(self, mappings, result_fields, raw_result, result_columns, result_rows):149 def add_column_if_needed(mappings, column_name, friendly_name, result_columns, result_columns_index):150 if friendly_name not in result_columns_index:151 result_columns.append({152 "name": friendly_name,153 "friendly_name": friendly_name,154 "type": mappings.get(column_name, "string")})155 result_columns_index[friendly_name] = result_columns[-1]156 def get_row(rows, row):157 if row is None:158 row = {}159 rows.append(row)160 return row161 def collect_value(mappings, row, key, value, type):162 if result_fields and key not in result_fields_index:163 return164 mappings[key] = type165 add_column_if_needed(mappings, key, key, result_columns, result_columns_index)166 row[key] = value167 def collect_aggregations(mappings, rows, parent_key, data, row, result_columns, result_columns_index):168 if isinstance(data, dict):169 for key, value in data.iteritems():170 val = collect_aggregations(mappings, rows, parent_key if key == 'buckets' else key, value, row, result_columns, result_columns_index)171 if val:172 row = get_row(rows, row)173 collect_value(mappings, row, key, val, 'long')174 for data_key in ['value', 'doc_count']:175 if data_key not in data:176 continue177 if 'key' in data and len(data.keys()) == 2:178 key_is_string = 'key_as_string' in data179 collect_value(mappings, row, data['key'] if not key_is_string else data['key_as_string'], data[data_key], 'long' if not key_is_string else 'string')180 else:181 return data[data_key]182 elif isinstance(data, list):183 for value in data:184 result_row = get_row(rows, row)185 collect_aggregations(mappings, rows, parent_key, value, result_row, result_columns, result_columns_index)186 if 'doc_count' in value:187 collect_value(mappings, result_row, 'doc_count', value['doc_count'], 'integer')188 if 'key' in value:189 if 'key_as_string' in value:190 collect_value(mappings, result_row, parent_key, value['key_as_string'], 'string')191 else:192 collect_value(mappings, result_row, parent_key, value['key'], 'string')193 return None194 result_columns_index = {c["name"]: c for c in result_columns}195 result_fields_index = {}196 if result_fields:197 for r in result_fields:198 result_fields_index[r] = None199 if 'error' in raw_result:200 error = raw_result['error']201 if len(error) > 10240:202 error = error[:10240] + '... continues'203 raise Exception(error)204 elif 'aggregations' in raw_result:205 if result_fields:206 for field in result_fields:207 add_column_if_needed(mappings, field, field, result_columns, result_columns_index)208 for key, data in raw_result["aggregations"].iteritems():209 collect_aggregations(mappings, result_rows, key, data, None, result_columns, result_columns_index)210 logger.debug("result_rows %s", str(result_rows))211 logger.debug("result_columns %s", str(result_columns))212 elif 'hits' in raw_result and 'hits' in raw_result['hits']:213 if result_fields:214 for field in result_fields:215 add_column_if_needed(mappings, field, field, result_columns, result_columns_index)216 for h in raw_result["hits"]["hits"]:217 row = {}218 column_name = "_source" if "_source" in h else "fields"219 for column in h[column_name]:220 if result_fields and column not in result_fields_index:221 continue222 add_column_if_needed(mappings, column, column, result_columns, result_columns_index)223 value = h[column_name][column]224 row[column] = value[0] if isinstance(value, list) and len(value) == 1 else value225 result_rows.append(row)226 else:227 raise Exception("Redash failed to parse the results it got from Elasticsearch.")228 def test_connection(self):229 try:230 r = requests.get("{0}/_cluster/health".format(self.server_url), auth=self.auth)231 r.raise_for_status()232 except requests.HTTPError as e:233 logger.exception(e)234 raise Exception("Failed to execute query. Return Code: {0} Reason: {1}".format(r.status_code, r.text))235 except requests.exceptions.RequestException as e:236 logger.exception(e)237 raise Exception("Connection refused")238class Kibana(BaseElasticSearch):239 def __init__(self, configuration):240 super(Kibana, self).__init__(configuration)241 @classmethod242 def enabled(cls):243 return True244 @classmethod245 def annotate_query(cls):246 return False247 def _execute_simple_query(self, url, auth, _from, mappings, result_fields, result_columns, result_rows):248 url += "&from={0}".format(_from)249 r = requests.get(url, auth=self.auth)250 r.raise_for_status()251 raw_result = r.json()252 self._parse_results(mappings, result_fields, raw_result, result_columns, result_rows)253 total = raw_result["hits"]["total"]254 result_size = len(raw_result["hits"]["hits"])255 logger.debug("Result Size: {0} Total: {1}".format(result_size, total))256 return raw_result["hits"]["total"]257 def run_query(self, query, user):258 try:259 error = None260 logger.debug(query)261 query_params = json.loads(query)262 index_name = query_params["index"]263 query_data = query_params["query"]264 size = int(query_params.get("size", 500))265 limit = int(query_params.get("limit", 500))266 result_fields = query_params.get("fields", None)267 sort = query_params.get("sort", None)268 if not self.server_url:269 error = "Missing configuration key 'server'"270 return None, error271 url = "{0}/{1}/_search?".format(self.server_url, index_name)272 mapping_url = "{0}/{1}/_mapping".format(self.server_url, index_name)273 mappings, error = self._get_query_mappings(mapping_url)274 if error:275 return None, error276 #logger.debug(json.dumps(mappings, indent=4))277 if sort:278 url += "&sort={0}".format(urllib.quote_plus(sort))279 url += "&q={0}".format(urllib.quote_plus(query_data))280 logger.debug("Using URL: {0}".format(url))281 logger.debug("Using Query: {0}".format(query_data))282 result_columns = []283 result_rows = []284 if isinstance(query_data, str) or isinstance(query_data, unicode):285 _from = 0286 while True:287 query_size = size if limit >= (_from + size) else (limit - _from)288 total = self._execute_simple_query(url + "&size={0}".format(query_size), self.auth, _from, mappings, result_fields, result_columns, result_rows)289 _from += size290 if _from >= limit:291 break292 else:293 # TODO: Handle complete ElasticSearch queries (JSON based sent over HTTP POST)294 raise Exception("Advanced queries are not supported")295 json_data = json.dumps({296 "columns": result_columns,297 "rows": result_rows298 })299 except KeyboardInterrupt:300 error = "Query cancelled by user."301 json_data = None302 except requests.HTTPError as e:303 logger.exception(e)304 error = "Failed to execute query. Return Code: {0} Reason: {1}".format(r.status_code, r.text)305 json_data = None306 except requests.exceptions.RequestException as e:307 logger.exception(e)308 error = "Connection refused"309 json_data = None310 except Exception as e:311 logger.exception(e)312 raise sys.exc_info()[1], None, sys.exc_info()[2]313 return json_data, error314class ElasticSearch(BaseElasticSearch):315 def __init__(self, configuration):316 super(ElasticSearch, self).__init__(configuration)317 @classmethod318 def enabled(cls):319 return True320 @classmethod321 def annotate_query(cls):322 return False323 @classmethod324 def name(cls):325 return 'Elasticsearch'326 def run_query(self, query, user):327 try:328 error = None329 logger.debug(query)330 query_dict = json.loads(query)331 index_name = query_dict.pop("index", "")332 result_fields = query_dict.pop("result_fields", None)333 if not self.server_url:334 error = "Missing configuration key 'server'"335 return None, error336 url = "{0}/{1}/_search".format(self.server_url, index_name)337 mapping_url = "{0}/{1}/_mapping".format(self.server_url, index_name)338 mappings, error = self._get_query_mappings(mapping_url)339 if error:340 return None, error341 params = {"source": json.dumps(query_dict), "source_content_type": "application/json"}342 logger.debug("Using URL: %s", url)343 logger.debug("Using params : %s", params)344 r = requests.get(url, params=params, auth=self.auth)345 r.raise_for_status()346 logger.debug("Result: %s", r.json())347 result_columns = []348 result_rows = []349 self._parse_results(mappings, result_fields, r.json(), result_columns, result_rows)350 json_data = json.dumps({351 "columns": result_columns,352 "rows": result_rows...

Full Screen

Full Screen

make_intl_data.py

Source:make_intl_data.py Github

copy

Full Screen

1#!/usr/bin/env python2# -*- coding: utf-8 -*-3#4# This Source Code Form is subject to the terms of the Mozilla Public5# License, v. 2.0. If a copy of the MPL was not distributed with this6# file, You can obtain one at http://mozilla.org/MPL/2.0/.7""" Usage: make_intl_data.py [language-subtag-registry.txt]8 This script extracts information about mappings between deprecated and9 current BCP 47 language tags from the IANA Language Subtag Registry and10 converts it to JavaScript object definitions in IntlData.js. The definitions11 are used in Intl.js.12 The IANA Language Subtag Registry is imported from13 http://www.iana.org/assignments/language-subtag-registry14 and uses the syntax specified in15 http://tools.ietf.org/html/rfc5646#section-316"""17def readRegistryRecord(registry):18 """ Yields the records of the IANA Language Subtag Registry as dictionaries. """19 record = {}20 for line in registry:21 line = line.strip()22 if line == "":23 continue24 if line == "%%":25 yield record26 record = {}27 else:28 if ":" in line:29 key, value = line.split(":", 1)30 key, value = key.strip(), value.strip()31 record[key] = value32 else:33 # continuation line34 record[key] += " " + line35 if record:36 yield record37 return38def readRegistry(registry):39 """ Reads IANA Language Subtag Registry and extracts information for Intl.js.40 Information extracted:41 - langTagMappings: mappings from complete language tags to preferred42 complete language tags43 - langSubtagMappings: mappings from subtags to preferred subtags44 - extlangMappings: mappings from extlang subtags to preferred subtags,45 with prefix to be removed46 Returns these three mappings as dictionaries, along with the registry's47 file date.48 We also check that mappings for language subtags don't affect extlang49 subtags and vice versa, so that CanonicalizeLanguageTag doesn't have50 to separate them for processing. Region codes are separated by case,51 and script codes by length, so they're unproblematic.52 """53 langTagMappings = {}54 langSubtagMappings = {}55 extlangMappings = {}56 languageSubtags = set()57 extlangSubtags = set()58 for record in readRegistryRecord(registry):59 if "File-Date" in record:60 fileDate = record["File-Date"]61 continue62 if record["Type"] == "grandfathered":63 # Grandfathered tags don't use standard syntax, so64 # CanonicalizeLanguageTag expects the mapping table to provide65 # the final form for all.66 # For langTagMappings, keys must be in lower case; values in67 # the case used in the registry.68 tag = record["Tag"]69 if "Preferred-Value" in record:70 langTagMappings[tag.lower()] = record["Preferred-Value"]71 else:72 langTagMappings[tag.lower()] = tag73 elif record["Type"] == "redundant":74 # For langTagMappings, keys must be in lower case; values in75 # the case used in the registry.76 if "Preferred-Value" in record:77 langTagMappings[record["Tag"].lower()] = record["Preferred-Value"]78 elif record["Type"] in ("language", "script", "region", "variant"):79 # For langSubtagMappings, keys and values must be in the case used80 # in the registry.81 subtag = record["Subtag"]82 if record["Type"] == "language":83 languageSubtags.add(subtag)84 if "Preferred-Value" in record:85 if subtag == "heploc":86 # The entry for heploc is unique in its complexity; handle87 # it as special case below.88 continue89 if "Prefix" in record:90 # This might indicate another heploc-like complex case.91 raise Exception("Please evaluate: subtag mapping with prefix value.")92 langSubtagMappings[subtag] = record["Preferred-Value"]93 elif record["Type"] == "extlang":94 # For extlangMappings, keys must be in the case used in the95 # registry; values are records with the preferred value and the96 # prefix to be removed.97 subtag = record["Subtag"]98 extlangSubtags.add(subtag)99 if "Preferred-Value" in record:100 preferred = record["Preferred-Value"]101 prefix = record["Prefix"]102 extlangMappings[subtag] = {"preferred": preferred, "prefix": prefix}103 else:104 # No other types are allowed by105 # http://tools.ietf.org/html/rfc5646#section-3.1.3106 assert False, "Unrecognized Type: {0}".format(record["Type"])107 # Check that mappings for language subtags and extlang subtags don't affect108 # each other.109 for lang in languageSubtags:110 if lang in extlangMappings and extlangMappings[lang]["preferred"] != lang:111 raise Exception("Conflict: lang with extlang mapping: " + lang)112 for extlang in extlangSubtags:113 if extlang in langSubtagMappings:114 raise Exception("Conflict: extlang with lang mapping: " + extlang)115 # Special case for heploc.116 langTagMappings["ja-latn-hepburn-heploc"] = "ja-Latn-alalc97"117 return {"fileDate": fileDate,118 "langTagMappings": langTagMappings,119 "langSubtagMappings": langSubtagMappings,120 "extlangMappings": extlangMappings}121def writeMappingsVar(intlData, dict, name, description, fileDate, url):122 """ Writes a variable definition with a mapping table to file intlData.123 Writes the contents of dictionary dict to file intlData with the given124 variable name and a comment with description, fileDate, and URL.125 """126 intlData.write("\n")127 intlData.write("// {0}.\n".format(description))128 intlData.write("// Derived from IANA Language Subtag Registry, file date {0}.\n".format(fileDate))129 intlData.write("// {0}\n".format(url))130 intlData.write("var {0} = {{\n".format(name))131 keys = sorted(dict)132 for key in keys:133 if isinstance(dict[key], basestring):134 value = '"{0}"'.format(dict[key])135 else:136 preferred = dict[key]["preferred"]137 prefix = dict[key]["prefix"]138 value = '{{preferred: "{0}", prefix: "{1}"}}'.format(preferred, prefix)139 intlData.write(' "{0}": {1},\n'.format(key, value))140 intlData.write("};\n")141def writeLanguageTagData(intlData, fileDate, url, langTagMappings, langSubtagMappings, extlangMappings):142 """ Writes the language tag data to the Intl data file. """143 writeMappingsVar(intlData, langTagMappings, "langTagMappings",144 "Mappings from complete tags to preferred values", fileDate, url)145 writeMappingsVar(intlData, langSubtagMappings, "langSubtagMappings",146 "Mappings from non-extlang subtags to preferred values", fileDate, url)147 writeMappingsVar(intlData, extlangMappings, "extlangMappings",148 "Mappings from extlang subtags to preferred values", fileDate, url)149if __name__ == '__main__':150 import codecs151 import sys152 import urllib2153 url = "http://www.iana.org/assignments/language-subtag-registry"154 if len(sys.argv) > 1:155 print("Always make sure you have the newest language-subtag-registry.txt!")156 registry = codecs.open(sys.argv[1], "r", encoding="utf-8")157 else:158 print("Downloading IANA Language Subtag Registry...")159 reader = urllib2.urlopen(url)160 text = reader.read().decode("utf-8")161 reader.close()162 registry = codecs.open("language-subtag-registry.txt", "w+", encoding="utf-8")163 registry.write(text)164 registry.seek(0)165 print("Processing IANA Language Subtag Registry...")166 data = readRegistry(registry)167 fileDate = data["fileDate"]168 langTagMappings = data["langTagMappings"]169 langSubtagMappings = data["langSubtagMappings"]170 extlangMappings = data["extlangMappings"]171 registry.close()172 print("Writing Intl data...")173 intlData = codecs.open("IntlData.js", "w", encoding="utf-8")174 intlData.write("// Generated by make_intl_data.py. DO NOT EDIT.\n")175 writeLanguageTagData(intlData, fileDate, url, langTagMappings, langSubtagMappings, extlangMappings)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run localstack automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful