How to use _cache_response_times method in locust

Best Python code snippet using locust

stats.py

Source:stats.py Github

copy

Full Screen

...207 self.num_reqs_per_sec = {}208 self.total_content_length = 0209 if self.use_response_times_cache:210 self.response_times_cache = OrderedDict()211 self._cache_response_times(int(time.time()))212 213 def log(self, response_time, content_length):214 # get the time215 t = int(time.time())216 217 if self.use_response_times_cache and self.last_request_timestamp and t > self.last_request_timestamp:218 # see if we shall make a copy of the respone_times dict and store in the cache219 self._cache_response_times(t-1)220 221 self.num_requests += 1222 self._log_time_of_request(t)223 self._log_response_time(response_time)224 # increase total content-length225 self.total_content_length += content_length226 def _log_time_of_request(self, t):227 self.num_reqs_per_sec[t] = self.num_reqs_per_sec.setdefault(t, 0) + 1228 self.last_request_timestamp = t229 def _log_response_time(self, response_time):230 self.total_response_time += response_time231 if self.min_response_time is None:232 self.min_response_time = response_time233 self.min_response_time = min(self.min_response_time, response_time)234 self.max_response_time = max(self.max_response_time, response_time)235 # to avoid to much data that has to be transfered to the master node when236 # running in distributed mode, we save the response time rounded in a dict237 # so that 147 becomes 150, 3432 becomes 3400 and 58760 becomes 59000238 if response_time < 100:239 rounded_response_time = response_time240 elif response_time < 1000:241 rounded_response_time = int(round(response_time, -1))242 elif response_time < 10000:243 rounded_response_time = int(round(response_time, -2))244 else:245 rounded_response_time = int(round(response_time, -3))246 # increase request count for the rounded key in response time dict247 self.response_times.setdefault(rounded_response_time, 0)248 self.response_times[rounded_response_time] += 1249 # extra logic for data export250 self.response_timestamps.append(time.time())251 self.response_times_precise.append(response_time)252 def log_error(self, error):253 self.num_failures += 1254 @property255 def fail_ratio(self):256 try:257 return float(self.num_failures) / (self.num_requests + self.num_failures)258 except ZeroDivisionError:259 if self.num_failures > 0:260 return 1.0261 else:262 return 0.0263 @property264 def avg_response_time(self):265 try:266 return float(self.total_response_time) / self.num_requests267 except ZeroDivisionError:268 return 0269 @property270 def median_response_time(self):271 if not self.response_times:272 return 0273 return median_from_dict(self.num_requests, self.response_times)274 @property275 def current_rps(self):276 if self.stats.last_request_timestamp is None:277 return 0278 slice_start_time = max(self.stats.last_request_timestamp - 12, int(self.stats.start_time or 0))279 reqs = [self.num_reqs_per_sec.get(t, 0) for t in range(slice_start_time, self.stats.last_request_timestamp-2)]280 return avg(reqs)281 @property282 def total_rps(self):283 if not self.stats.last_request_timestamp or not self.stats.start_time:284 return 0.0285 return self.num_requests / max(self.stats.last_request_timestamp - self.stats.start_time, 1)286 @property287 def avg_content_length(self):288 try:289 return self.total_content_length / self.num_requests290 except ZeroDivisionError:291 return 0292 293 def extend(self, other):294 """295 Extend the data from the current StatsEntry with the stats from another296 StatsEntry instance. 297 """298 self.last_request_timestamp = max(self.last_request_timestamp, other.last_request_timestamp)299 self.start_time = min(self.start_time, other.start_time)300 self.num_requests = self.num_requests + other.num_requests301 self.num_failures = self.num_failures + other.num_failures302 self.total_response_time = self.total_response_time + other.total_response_time303 self.max_response_time = max(self.max_response_time, other.max_response_time)304 self.min_response_time = min(self.min_response_time or 0, other.min_response_time or 0) or other.min_response_time305 self.total_content_length = self.total_content_length + other.total_content_length306 for key in other.response_times:307 self.response_times[key] = self.response_times.get(key, 0) + other.response_times[key]308 for key in other.num_reqs_per_sec:309 self.num_reqs_per_sec[key] = self.num_reqs_per_sec.get(key, 0) + other.num_reqs_per_sec[key]310 311 def serialize(self):312 return {313 "name": self.name,314 "method": self.method,315 "last_request_timestamp": self.last_request_timestamp,316 "start_time": self.start_time,317 "num_requests": self.num_requests,318 "num_failures": self.num_failures,319 "total_response_time": self.total_response_time,320 "max_response_time": self.max_response_time,321 "min_response_time": self.min_response_time,322 "total_content_length": self.total_content_length,323 "response_times": self.response_times,324 "num_reqs_per_sec": self.num_reqs_per_sec,325 }326 327 @classmethod328 def unserialize(cls, data):329 obj = cls(None, data["name"], data["method"])330 for key in [331 "last_request_timestamp",332 "start_time",333 "num_requests",334 "num_failures",335 "total_response_time",336 "max_response_time",337 "min_response_time",338 "total_content_length",339 "response_times",340 "num_reqs_per_sec",341 ]:342 setattr(obj, key, data[key])343 return obj344 345 def get_stripped_report(self):346 """347 Return the serialized version of this StatsEntry, and then clear the current stats.348 """349 report = self.serialize()350 self.reset()351 return report352 def __str__(self):353 try:354 fail_percent = (self.num_failures/float(self.num_requests + self.num_failures))*100355 except ZeroDivisionError:356 fail_percent = 0357 358 return (" %-" + str(STATS_NAME_WIDTH) + "s %7d %12s %7d %7d %7d | %7.2f %7.2f") % (359 (self.method and self.method + " " or "") + self.name,360 self.num_requests,361 "%d(%.2f%%)" % (self.num_failures, fail_percent),362 self.avg_response_time,363 self.min_response_time or 0,364 self.max_response_time,365 self.median_response_time or 0,366 self.current_rps or 0367 )368 369 def get_response_time_percentile(self, percent):370 """371 Get the response time that a certain number of percent of the requests372 finished within.373 374 Percent specified in range: 0.0 - 1.0375 """376 return calculate_response_time_percentile(self.response_times, self.num_requests, percent)377 378 def get_current_response_time_percentile(self, percent):379 """380 Calculate the *current* response time for a certain percentile. We use a sliding 381 window of (approximately) the last 10 seconds (specified by CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW) 382 when calculating this.383 """384 if not self.use_response_times_cache:385 raise ValueError("StatsEntry.use_response_times_cache must be set to True if we should be able to calculate the _current_ response time percentile")386 # First, we want to determine which of the cached response_times dicts we should 387 # use to get response_times for approximately 10 seconds ago. 388 t = int(time.time())389 # Since we can't be sure that the cache contains an entry for every second. 390 # We'll construct a list of timestamps which we consider acceptable keys to be used 391 # when trying to fetch the cached response_times. We construct this list in such a way 392 # that it's ordered by preference by starting to add t-10, then t-11, t-9, t-12, t-8, 393 # and so on394 acceptable_timestamps = []395 for i in xrange(9):396 acceptable_timestamps.append(t-CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW-i)397 acceptable_timestamps.append(t-CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW+i)398 399 cached = None400 for ts in acceptable_timestamps:401 if ts in self.response_times_cache:402 cached = self.response_times_cache[ts]403 break404 405 if cached:406 # If we fond an acceptable cached response times, we'll calculate a new response 407 # times dict of the last 10 seconds (approximately) by diffing it with the current 408 # total response times. Then we'll use that to calculate a response time percentile 409 # for that timeframe410 return calculate_response_time_percentile(411 diff_response_time_dicts(self.response_times, cached.response_times), 412 self.num_requests - cached.num_requests, 413 percent,414 )415 416 def percentile(self, tpl=" %-" + str(STATS_NAME_WIDTH) + "s %8d %6d %6d %6d %6d %6d %6d %6d %6d %6d"):417 if not self.num_requests:418 raise ValueError("Can't calculate percentile on url with no successful requests")419 420 return tpl % (421 (self.method and self.method + " " or "") + self.name,422 self.num_requests,423 self.get_response_time_percentile(0.5),424 self.get_response_time_percentile(0.66),425 self.get_response_time_percentile(0.75),426 self.get_response_time_percentile(0.80),427 self.get_response_time_percentile(0.90),428 self.get_response_time_percentile(0.95),429 self.get_response_time_percentile(0.98),430 self.get_response_time_percentile(0.99),431 self.get_response_time_percentile(1.00)432 )433 434 def _cache_response_times(self, t):435 self.response_times_cache[t] = CachedResponseTimes(436 response_times=copy(self.response_times),437 num_requests=self.num_requests,438 )439 440 441 # We'll use a cache size of CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW + 10 since - in the extreme case -442 # we might still use response times (from the cache) for t-CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW-10 443 # to calculate the current response time percentile, if we're missing cached values for the subsequent 444 # 20 seconds445 cache_size = CURRENT_RESPONSE_TIME_PERCENTILE_WINDOW + 10446 447 if len(self.response_times_cache) > cache_size:448 # only keep the latest 20 response_times dicts449 for i in xrange(len(self.response_times_cache) - cache_size):450 self.response_times_cache.popitem(last=False)451class StatsError(object):452 def __init__(self, method, name, error, occurences=0):453 self.method = method454 self.name = name455 self.error = error456 self.occurences = occurences457 @classmethod458 def parse_error(cls, error):459 string_error = repr(error)460 target = "object at 0x"461 target_index = string_error.find(target)462 if target_index < 0:463 return string_error464 start = target_index + len(target) - 2465 end = string_error.find(">", start)466 if end < 0:467 return string_error468 hex_address = string_error[start:end]469 return string_error.replace(hex_address, "0x....")470 @classmethod471 def create_key(cls, method, name, error):472 key = "%s.%s.%r" % (method, name, StatsError.parse_error(error))473 return hashlib.md5(key.encode('utf-8')).hexdigest()474 def occured(self):475 self.occurences += 1476 def to_name(self):477 return "%s %s: %r" % (self.method, 478 self.name, repr(self.error))479 def to_dict(self):480 return {481 "method": self.method,482 "name": self.name,483 "error": StatsError.parse_error(self.error),484 "occurences": self.occurences485 }486 @classmethod487 def from_dict(cls, data):488 return cls(489 data["method"], 490 data["name"], 491 data["error"], 492 data["occurences"]493 )494def avg(values):495 return sum(values, 0.0) / max(len(values), 1)496def median_from_dict(total, count):497 """498 total is the number of requests made499 count is a dict {response_time: count}500 """501 pos = (total - 1) / 2502 for k in sorted(six.iterkeys(count)):503 if pos < count[k]:504 return k505 pos -= count[k]506global_stats = RequestStats()507"""508A global instance for holding the statistics. Should be removed eventually.509"""510def on_request_success(request_type, name, response_time, response_length):511 global_stats.log_request(request_type, name, response_time, response_length)512def on_request_failure(request_type, name, response_time, exception):513 global_stats.log_error(request_type, name, exception)514def on_report_to_master(client_id, data):515 data["stats"] = global_stats.serialize_stats()516 data["stats_total"] = global_stats.total.get_stripped_report()517 data["errors"] = global_stats.serialize_errors()518 global_stats.errors = {}519def on_slave_report(client_id, data):520 for stats_data in data["stats"]:521 entry = StatsEntry.unserialize(stats_data)522 request_key = (entry.name, entry.method)523 if not request_key in global_stats.entries:524 global_stats.entries[request_key] = StatsEntry(global_stats, entry.name, entry.method)525 global_stats.entries[request_key].extend(entry)526 for error_key, error in six.iteritems(data["errors"]):527 if error_key not in global_stats.errors:528 global_stats.errors[error_key] = StatsError.from_dict(error)529 else:530 global_stats.errors[error_key].occurences += error["occurences"]531 532 # save the old last_request_timestamp, to see if we should store a new copy533 # of the response times in the response times cache534 old_last_request_timestamp = global_stats.total.last_request_timestamp535 # update the total StatsEntry536 global_stats.total.extend(StatsEntry.unserialize(data["stats_total"]))537 if global_stats.total.last_request_timestamp > old_last_request_timestamp:538 # If we've entered a new second, we'll cache the response times. Note that there 539 # might still be reports from other slave nodes - that contains requests for the same 540 # time periods - that hasn't been received/accounted for yet. This will cause the cache to 541 # lag behind a second or two, but since StatsEntry.current_response_time_percentile() 542 # (which is what the response times cache is used for) uses an approximation of the 543 # last 10 seconds anyway, it should be fine to ignore this. 544 global_stats.total._cache_response_times(global_stats.total.last_request_timestamp)545 546events.request_success += on_request_success547events.request_failure += on_request_failure548events.report_to_master += on_report_to_master549events.slave_report += on_slave_report550def print_stats(stats):551 console_logger.info((" %-" + str(STATS_NAME_WIDTH) + "s %7s %12s %7s %7s %7s | %7s %7s") % ('Name', '# reqs', '# fails', 'Avg', 'Min', 'Max', 'Median', 'req/s'))552 console_logger.info("-" * (80 + STATS_NAME_WIDTH))553 total_rps = 0554 total_reqs = 0555 total_failures = 0556 for key in sorted(six.iterkeys(stats)):557 r = stats[key]558 total_rps += r.current_rps...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run locust automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful