How to use _fast_users_on_workers_copy method in locust

Best Python code snippet using locust

dispatch.py

Source:dispatch.py Github

copy

Full Screen

...58 self._initial_users_on_workers = {59 worker_node.id: {user_class.__name__: 0 for user_class in self._user_classes}60 for worker_node in worker_nodes61 }62 self._users_on_workers = self._fast_users_on_workers_copy(self._initial_users_on_workers)63 self._current_user_count = sum(map(sum, map(dict.values, self._users_on_workers.values())))64 self._dispatcher_generator = None65 self._user_generator = self._user_gen()66 self._worker_node_generator = itertools.cycle(self._worker_nodes)67 # To keep track of how long it takes for each dispatch iteration to compute68 self._dispatch_iteration_durations = []69 self._active_users = []70 # TODO: Test that attribute is set when dispatching and unset when done dispatching71 self._dispatch_in_progress = False72 self._rebalance = False73 @property74 def dispatch_in_progress(self):75 return self._dispatch_in_progress76 @property77 def dispatch_iteration_durations(self) -> List[float]:78 return self._dispatch_iteration_durations79 def __next__(self) -> Dict[str, Dict[str, int]]:80 users_on_workers = next(self._dispatcher_generator)81 # TODO: Is this necessary to copy the users_on_workers if we know82 # it won't be mutated by external code?83 return self._fast_users_on_workers_copy(users_on_workers)84 def _dispatcher(self) -> Generator[Dict[str, Dict[str, int]], None, None]:85 self._dispatch_in_progress = True86 if self._rebalance:87 self._rebalance = False88 yield self._users_on_workers89 if self._current_user_count == self._target_user_count:90 return91 if self._current_user_count == self._target_user_count:92 yield self._initial_users_on_workers93 self._dispatch_in_progress = False94 return95 while self._current_user_count < self._target_user_count:96 with self._wait_between_dispatch_iteration_context():97 yield self._add_users_on_workers()98 if self._rebalance:99 self._rebalance = False100 yield self._users_on_workers101 while self._current_user_count > self._target_user_count:102 with self._wait_between_dispatch_iteration_context():103 yield self._remove_users_from_workers()104 if self._rebalance:105 self._rebalance = False106 yield self._users_on_workers107 self._dispatch_in_progress = False108 def new_dispatch(self, target_user_count: int, spawn_rate: float) -> None:109 """110 Initialize a new dispatch cycle.111 :param target_user_count: The desired user count at the end of the dispatch cycle112 :param spawn_rate: The spawn rate113 """114 self._target_user_count = target_user_count115 self._spawn_rate = spawn_rate116 self._user_count_per_dispatch_iteration = max(1, math.floor(self._spawn_rate))117 self._wait_between_dispatch = self._user_count_per_dispatch_iteration / self._spawn_rate118 self._initial_users_on_workers = self._users_on_workers119 self._users_on_workers = self._fast_users_on_workers_copy(self._initial_users_on_workers)120 self._current_user_count = sum(map(sum, map(dict.values, self._users_on_workers.values())))121 self._dispatcher_generator = self._dispatcher()122 self._dispatch_iteration_durations.clear()123 def add_worker(self, worker_node: "WorkerNode") -> None:124 """125 This method is to be called when a new worker connects to the master. When126 a new worker is added, the users dispatcher will flag that a rebalance is required127 and ensure that the next dispatch iteration will be made to redistribute the users128 on the new pool of workers.129 :param worker_node: The worker node to add.130 """131 self._worker_nodes.append(worker_node)132 self._worker_nodes = sorted(self._worker_nodes, key=lambda w: w.id)133 self._prepare_rebalance()134 def remove_worker(self, worker_node: "WorkerNode") -> None:135 """136 This method is similar to the above `add_worker`. When a worker disconnects137 (because of e.g. network failure, worker failure, etc.), this method will ensure that the next138 dispatch iteration redistributes the users on the remaining workers.139 :param worker_node: The worker node to remove.140 """141 self._worker_nodes = [w for w in self._worker_nodes if w.id != worker_node.id]142 if len(self._worker_nodes) == 0:143 # TODO: Test this144 return145 self._prepare_rebalance()146 def _prepare_rebalance(self) -> None:147 """148 When a rebalance is required because of added and/or removed workers, we compute the desired state as if149 we started from 0 user. So, if we were currently running 500 users, then the `_distribute_users` will150 perform a fake ramp-up without any waiting and return the final distribution.151 """152 users_on_workers, user_gen, worker_gen, active_users = self._distribute_users(self._current_user_count)153 self._users_on_workers = users_on_workers154 self._active_users = active_users155 # It's important to reset the generators by using the ones from `_distribute_users`156 # so that the next iterations are smooth and continuous.157 self._user_generator = user_gen158 self._worker_node_generator = worker_gen159 self._rebalance = True160 @contextlib.contextmanager161 def _wait_between_dispatch_iteration_context(self) -> None:162 t0_rel = time.perf_counter()163 # We don't use `try: ... finally: ...` because we don't want to sleep164 # if there's an exception within the context.165 yield166 delta = time.perf_counter() - t0_rel167 self._dispatch_iteration_durations.append(delta)168 # print("Dispatch cycle took {:.3f}ms".format(delta * 1000))169 if self._current_user_count == self._target_user_count:170 # No sleep when this is the last dispatch iteration171 return172 sleep_duration = max(0.0, self._wait_between_dispatch - delta)173 gevent.sleep(sleep_duration)174 def _add_users_on_workers(self) -> Dict[str, Dict[str, int]]:175 """Add users on the workers until the target number of users is reached for the current dispatch iteration176 :return: The users that we want to run on the workers177 """178 current_user_count_target = min(179 self._current_user_count + self._user_count_per_dispatch_iteration, self._target_user_count180 )181 for user in self._user_generator:182 worker_node = next(self._worker_node_generator)183 self._users_on_workers[worker_node.id][user] += 1184 self._current_user_count += 1185 self._active_users.append((worker_node, user))186 if self._current_user_count >= current_user_count_target:187 return self._users_on_workers188 def _remove_users_from_workers(self) -> Dict[str, Dict[str, int]]:189 """Remove users from the workers until the target number of users is reached for the current dispatch iteration190 :return: The users that we want to run on the workers191 """192 current_user_count_target = max(193 self._current_user_count - self._user_count_per_dispatch_iteration, self._target_user_count194 )195 while True:196 try:197 worker_node, user = self._active_users.pop()198 except IndexError:199 return self._users_on_workers200 self._users_on_workers[worker_node.id][user] -= 1201 self._current_user_count -= 1202 if self._current_user_count == 0 or self._current_user_count <= current_user_count_target:203 return self._users_on_workers204 def _distribute_users(205 self, target_user_count: int206 ) -> Tuple[dict, Generator[str, None, None], typing.Iterator["WorkerNode"], List[Tuple["WorkerNode", str]]]:207 """208 This function might take some time to complete if the `target_user_count` is a big number. A big number209 is typically > 50 000. However, this function is only called if a worker is added or removed while a test210 is running. Such a situation should be quite rare.211 """212 user_gen = self._user_gen()213 worker_gen = itertools.cycle(self._worker_nodes)214 users_on_workers = {215 worker_node.id: {user_class.__name__: 0 for user_class in self._user_classes}216 for worker_node in self._worker_nodes217 }218 active_users = []219 user_count = 0220 while user_count < target_user_count:221 user = next(user_gen)222 worker_node = next(worker_gen)223 users_on_workers[worker_node.id][user] += 1224 user_count += 1225 active_users.append((worker_node, user))226 return users_on_workers, user_gen, worker_gen, active_users227 def _user_gen(self) -> Generator[str, None, None]:228 """229 This method generates users according to their weights using230 a smooth weighted round-robin algorithm implemented by https://github.com/linnik/roundrobin.231 For example, given users A, B with weights 5 and 1 respectively, this algorithm232 will yield AAABAAAAABAA. The smooth aspect of this algorithm is what makes it possible233 to keep the distribution during ramp-up and ramp-down. If we were to use a normal234 weighted round-robin algorithm, we'd get AAAAABAAAAAB which would make the distribution235 less accurate during ramp-up/down.236 """237 # Normalize the weights so that the smallest weight will be equal to "target_min_weight".238 # The value "2" was experimentally determined because it gave a better distribution especially239 # when dealing with weights which are close to each others, e.g. 1.5, 2, 2.4, etc.240 target_min_weight = 2241 min_weight = min(u.weight for u in self._user_classes)242 normalized_weights = [243 (user_class.__name__, round(target_min_weight * user_class.weight / min_weight))244 for user_class in self._user_classes245 ]246 gen = smooth(normalized_weights)247 # Instead of calling `gen()` for each user, we cycle through a generator of fixed-length248 # `generation_length_to_get_proper_distribution`. Doing so greatly improves performance because249 # we only ever need to call `gen()` a relatively small number of times. The length of this generator250 # is chosen as the sum of the normalized weights. So, for users A, B, C of weights 2, 5, 6, the length is251 # 2 + 5 + 6 = 13 which would yield the distribution `CBACBCBCBCABC` that gets repeated over and over252 # until the target user count is reached.253 generation_length_to_get_proper_distribution = sum(254 normalized_weight[1] for normalized_weight in normalized_weights255 )256 yield from itertools.cycle(gen() for _ in range(generation_length_to_get_proper_distribution))257 @staticmethod258 def _fast_users_on_workers_copy(users_on_workers: Dict[str, Dict[str, int]]) -> Dict[str, Dict[str, int]]:259 """deepcopy is too slow, so we use this custom copy function.260 The implementation was profiled and compared to other implementations such as dict-comprehensions261 and the one below is the most efficient.262 """...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run locust automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful