How to use inner method in ATX

Best Python code snippet using ATX

meta.py

Source:meta.py Github

copy

Full Screen

1# global2import ivy3from ivy.functional.ivy.core.gradients import gradient_descent_update4# Private #5# --------#6def _compute_cost_and_update_grads(cost_fn, order, batch, variables, outer_v, keep_outer_v,7 average_across_steps_or_final, all_grads, unique_outer, batched, num_tasks):8 if order == 1:9 cost, inner_grads = ivy.execute_with_gradients(10 lambda v: cost_fn(batch, v=variables.set_at_key_chains(v) if unique_outer else v),11 variables.at_key_chains(outer_v, ignore_none=True) if keep_outer_v else12 variables.prune_key_chains(outer_v, ignore_none=True), retain_grads=False)13 if batched:14 inner_grads = inner_grads * num_tasks15 if average_across_steps_or_final:16 all_grads.append(inner_grads)17 else:18 cost = cost_fn(batch, v=variables)19 return cost20def _train_task(inner_batch, outer_batch, inner_cost_fn, outer_cost_fn, variables, inner_grad_steps,21 inner_learning_rate, inner_optimization_step, order, average_across_steps, inner_v, keep_innver_v,22 outer_v, keep_outer_v, batched, num_tasks, stop_gradients):23 # init24 total_cost = 025 all_grads = list()26 # inner and outer27 unique_inner = inner_v is not None28 unique_outer = outer_v is not None29 # iterate through inner loop training steps30 for i in range(inner_grad_steps):31 # compute inner gradient for update the inner variables32 cost, inner_update_grads = ivy.execute_with_gradients(33 lambda v: inner_cost_fn(inner_batch, v=variables.set_at_key_chains(v) if unique_inner else v),34 variables.at_key_chains(inner_v, ignore_none=True) if keep_innver_v else35 variables.prune_key_chains(inner_v, ignore_none=True), retain_grads=order > 1)36 if batched:37 inner_update_grads = inner_update_grads * num_tasks38 # compute the cost to be optimized, and update all_grads if fist order method39 if outer_cost_fn is None and not unique_inner and not unique_outer:40 all_grads.append(inner_update_grads)41 else:42 cost = _compute_cost_and_update_grads(43 inner_cost_fn if outer_cost_fn is None else outer_cost_fn, order, outer_batch, variables, outer_v,44 keep_outer_v, average_across_steps, all_grads, unique_outer, batched, num_tasks)45 # update cost and update parameters46 total_cost = total_cost + cost47 if unique_inner:48 variables = variables.set_at_key_chains(49 inner_optimization_step(variables.at_key_chains(inner_v) if keep_innver_v else50 variables.prune_key_chains(inner_v), inner_update_grads,51 inner_learning_rate, inplace=False, stop_gradients=stop_gradients))52 else:53 variables = inner_optimization_step(variables, inner_update_grads, inner_learning_rate, inplace=False,54 stop_gradients=stop_gradients)55 # once training is finished, compute the final cost, and update all_grads if fist order method56 final_cost = _compute_cost_and_update_grads(57 inner_cost_fn if outer_cost_fn is None else outer_cost_fn, order, outer_batch, variables, outer_v,58 keep_outer_v, True, all_grads, unique_outer, batched, num_tasks)59 # update variables60 if stop_gradients:61 variables = variables.stop_gradients()62 if not batched:63 variables = variables.expand_dims(0)64 # average the cost or gradients across all timesteps if this option is chosen65 if average_across_steps:66 total_cost = total_cost + final_cost67 if order == 1:68 all_grads = sum(all_grads) / max(len(all_grads), 1)69 return total_cost / (inner_grad_steps + 1), variables, all_grads70 # else return only the final values71 if order == 1:72 all_grads = all_grads[-1]73 return final_cost, variables, all_grads74def _train_tasks_batched(batch, inner_batch_fn, outer_batch_fn, inner_cost_fn, outer_cost_fn, variables,75 inner_grad_steps, inner_learning_rate, inner_optimization_step, order, average_across_steps,76 inner_v, keep_innver_v, outer_v, keep_outer_v, return_inner_v, num_tasks, stop_gradients):77 inner_batch = batch78 outer_batch = batch79 if inner_batch_fn is not None:80 inner_batch = inner_batch_fn(inner_batch)81 if outer_batch_fn is not None:82 outer_batch = outer_batch_fn(outer_batch)83 cost, updated_ivs, grads = _train_task(inner_batch, outer_batch, inner_cost_fn, outer_cost_fn, variables,84 inner_grad_steps, inner_learning_rate, inner_optimization_step, order,85 average_across_steps, inner_v, keep_innver_v, outer_v, keep_outer_v, True,86 num_tasks, stop_gradients)87 grads = grads.reduce_mean(0) if isinstance(grads, ivy.Container) else grads88 if order == 1:89 if return_inner_v in ['all', True]:90 return cost, grads, updated_ivs91 elif return_inner_v == 'first':92 return cost, grads, updated_ivs[0:1]93 return cost, grads94 if return_inner_v in ['all', True]:95 return cost, updated_ivs96 elif return_inner_v == 'first':97 return cost, updated_ivs[0:1]98 return cost99def _train_tasks_with_for_loop(batch, inner_sub_batch_fn, outer_sub_batch_fn, inner_cost_fn, outer_cost_fn, variables,100 inner_grad_steps, inner_learning_rate, inner_optimization_step, order,101 average_across_steps, inner_v, keep_innver_v, outer_v, keep_outer_v, return_inner_v,102 num_tasks, stop_gradients):103 total_cost = 0104 updated_ivs_to_return = list()105 all_grads = list()106 if isinstance(inner_v, (list, tuple)) and isinstance(inner_v[0], (list, tuple, dict, type(None))):107 inner_v_seq = True108 else:109 inner_v_seq = False110 if isinstance(outer_v, (list, tuple)) and isinstance(outer_v[0], (list, tuple, dict, type(None))):111 outer_v_seq = True112 else:113 outer_v_seq = False114 for i, sub_batch in enumerate(batch.unstack(0, True, num_tasks)):115 if inner_sub_batch_fn is not None:116 inner_sub_batch = inner_sub_batch_fn(sub_batch)117 else:118 inner_sub_batch = sub_batch119 if outer_sub_batch_fn is not None:120 outer_sub_batch = outer_sub_batch_fn(sub_batch)121 else:122 outer_sub_batch = sub_batch123 iv = inner_v[i] if inner_v_seq else inner_v124 ov = outer_v[i] if outer_v_seq else outer_v125 cost, updated_iv, grads = _train_task(inner_sub_batch, outer_sub_batch, inner_cost_fn, outer_cost_fn, variables,126 inner_grad_steps, inner_learning_rate, inner_optimization_step, order,127 average_across_steps, iv, keep_innver_v, ov, keep_outer_v, False,128 num_tasks, stop_gradients)129 if (return_inner_v == 'first' and i == 0) or return_inner_v in ['all', True]:130 updated_ivs_to_return.append(updated_iv)131 total_cost = total_cost + cost132 all_grads.append(grads)133 if order == 1:134 if return_inner_v:135 return total_cost / num_tasks, sum(all_grads) / num_tasks, ivy.Container.concat(updated_ivs_to_return, 0)136 return total_cost / num_tasks, sum(all_grads) / num_tasks137 if return_inner_v:138 return total_cost / num_tasks, ivy.Container.concat(updated_ivs_to_return, 0)139 return total_cost / num_tasks140def _train_tasks(batch, inner_batch_fn, outer_batch_fn, inner_cost_fn, outer_cost_fn, variables, inner_grad_steps,141 inner_learning_rate, inner_optimization_step, order, average_across_steps, batched, inner_v,142 keep_innver_v, outer_v, keep_outer_v, return_inner_v, num_tasks, stop_gradients):143 if batched:144 return _train_tasks_batched(145 batch, inner_batch_fn, outer_batch_fn, inner_cost_fn, outer_cost_fn, variables, inner_grad_steps,146 inner_learning_rate, inner_optimization_step, order, average_across_steps, inner_v, keep_innver_v, outer_v,147 keep_outer_v, return_inner_v, num_tasks, stop_gradients)148 return _train_tasks_with_for_loop(149 batch, inner_batch_fn, outer_batch_fn, inner_cost_fn, outer_cost_fn, variables, inner_grad_steps,150 inner_learning_rate, inner_optimization_step, order, average_across_steps, inner_v, keep_innver_v, outer_v,151 keep_outer_v, return_inner_v, num_tasks, stop_gradients)152# Public #153# -------#154# First Order155def fomaml_step(batch, inner_cost_fn, outer_cost_fn, variables, inner_grad_steps, inner_learning_rate,156 inner_optimization_step=gradient_descent_update, inner_batch_fn=None, outer_batch_fn=None,157 average_across_steps=False, batched=True, inner_v=None, keep_inner_v=True, outer_v=None,158 keep_outer_v=True, return_inner_v=False, num_tasks=None, stop_gradients=True):159 """160 Perform step of first order MAML.161 :param batch: The input batch162 :type batch: ivy.Container163 :param inner_cost_fn: callable for the inner loop cost function, receving task-specific sub-batch,164 inner vars and outer vars165 :type inner_cost_fn: callable166 :param outer_cost_fn: callable for the outer loop cost function, receving task-specific sub-batch,167 inner vars and outer vars. If None, the cost from the inner loop will also be168 optimized in the outer loop.169 :type outer_cost_fn: callable, optional170 :param variables: Variables to be optimized during the meta step171 :type variables: ivy.Container172 :param inner_grad_steps: Number of gradient steps to perform during the inner loop.173 :type inner_grad_steps: int174 :param inner_learning_rate: The learning rate of the inner loop.175 :type inner_learning_rate: float176 :param inner_optimization_step: The function used for the inner loop optimization.177 Default is ivy.gradient_descent_update.178 :type inner_optimization_step: callable, optional179 :param inner_batch_fn: Function to apply to the task sub-batch, before passing to the inner_cost_fn.180 Default is None.181 :type inner_batch_fn: callable, optional182 :param outer_batch_fn: Function to apply to the task sub-batch, before passing to the outer_cost_fn.183 Default is None.184 :type outer_batch_fn: callable, optional185 :param average_across_steps: Whether to average the inner loop steps for the outer loop update. Default is False.186 :type average_across_steps: bool, optional187 :param batched: Whether to batch along the time dimension, and run the meta steps in batch. Default is True.188 :type batched: bool, optional189 :param inner_v: Nested variable keys to be optimized during the inner loop, with same keys and boolean values.190 :type inner_v: dict str or list, optional191 :param keep_inner_v: If True, the key chains in inner_v will be kept, otherwise they will be removed.192 Default is True.193 :type keep_inner_v: bool, optional194 :param outer_v: Nested variable keys to be optimized during the inner loop, with same keys and boolean values.195 :type outer_v: dict str or list, optional196 :param keep_outer_v: If True, the key chains in inner_v will be kept, otherwise they will be removed.197 Default is True.198 :type keep_outer_v: bool, optional199 :param return_inner_v: Either 'first', 'all', or False. 'first' means the variables for the first task inner loop200 will also be returned. variables for all tasks will be returned with 'all'. Default is False.201 :type return_inner_v: str, optional202 :param num_tasks: Number of unique tasks to inner-loop optimize for the meta step. Determined from batch by default.203 :type num_tasks: int, optional204 :param stop_gradients: Whether to stop the gradients of the cost. Default is True.205 :type stop_gradients: bool, optional206 :return: The cost and the gradients with respect to the outer loop variables.207 """208 if num_tasks is None:209 num_tasks = batch.shape[0]210 rets = _train_tasks(211 batch, inner_batch_fn, outer_batch_fn, inner_cost_fn, outer_cost_fn, variables, inner_grad_steps,212 inner_learning_rate, inner_optimization_step, 1, average_across_steps, batched, inner_v, keep_inner_v, outer_v,213 keep_outer_v, return_inner_v, num_tasks, stop_gradients)214 cost = rets[0]215 if stop_gradients:216 cost = ivy.stop_gradient(cost, preserve_type=False)217 grads = rets[1]218 if return_inner_v:219 return cost, grads, rets[2]220 return cost, grads221def reptile_step(batch, cost_fn, variables, inner_grad_steps, inner_learning_rate,222 inner_optimization_step=gradient_descent_update, batched=True, return_inner_v=False, num_tasks=None,223 stop_gradients=True):224 """225 Perform step of Reptile.226 :param batch: The input batch227 :type batch: ivy.Container228 :param cost_fn: callable for the cost function, receivng the task-specific sub-batch and variables229 :type cost_fn: callable230 :param variables: Variables to be optimized231 :type variables: ivy.Container232 :param inner_grad_steps: Number of gradient steps to perform during the inner loop.233 :type inner_grad_steps: int234 :param inner_learning_rate: The learning rate of the inner loop.235 :type inner_learning_rate: float236 :param inner_optimization_step: The function used for the inner loop optimization.237 Default is ivy.gradient_descent_update.238 :type inner_optimization_step: callable, optional239 :param batched: Whether to batch along the time dimension, and run the meta steps in batch. Default is True.240 :type batched: bool, optional241 :param return_inner_v: Either 'first', 'all', or False. 'first' means the variables for the first task inner loop242 will also be returned. variables for all tasks will be returned with 'all'. Default is False.243 :type return_inner_v: str, optional244 :param num_tasks: Number of unique tasks to inner-loop optimize for the meta step. Determined from batch by default.245 :type num_tasks: int, optional246 :param stop_gradients: Whether to stop the gradients of the cost. Default is True.247 :type stop_gradients: bool, optional248 :return: The cost and the gradients with respect to the outer loop variables.249 """250 if num_tasks is None:251 num_tasks = batch.shape[0]252 # noinspection PyTypeChecker253 rets = _train_tasks(254 batch, None, None, cost_fn, None, variables, inner_grad_steps, inner_learning_rate, inner_optimization_step,255 1, True, batched, None, True, None, True, return_inner_v, num_tasks, stop_gradients)256 cost = rets[0]257 if stop_gradients:258 cost = ivy.stop_gradient(cost, preserve_type=False)259 grads = rets[1] / inner_learning_rate260 if return_inner_v:261 return cost, grads, rets[2]262 return cost, grads263# Second Order264def maml_step(batch, inner_cost_fn, outer_cost_fn, variables, inner_grad_steps, inner_learning_rate,265 inner_optimization_step=gradient_descent_update, inner_batch_fn=None, outer_batch_fn=None,266 average_across_steps=False, batched=True, inner_v=None, keep_inner_v=True, outer_v=None,267 keep_outer_v=True, return_inner_v=False, num_tasks=None, stop_gradients=True):268 """269 Perform step of vanilla second order MAML.270 :param batch: The input batch271 :type batch: ivy.Container272 :param inner_cost_fn: callable for the inner loop cost function, receing sub-batch, inner vars and outer vars273 :type inner_cost_fn: callable274 :param outer_cost_fn: callable for the outer loop cost function, receving task-specific sub-batch,275 inner vars and outer vars. If None, the cost from the inner loop will also be276 optimized in the outer loop.277 :type outer_cost_fn: callable, optional278 :param variables: Variables to be optimized during the meta step279 :type variables: ivy.Container280 :param inner_grad_steps: Number of gradient steps to perform during the inner loop.281 :type inner_grad_steps: int282 :param inner_learning_rate: The learning rate of the inner loop.283 :type inner_learning_rate: float284 :param inner_optimization_step: The function used for the inner loop optimization.285 Default is ivy.gradient_descent_update.286 :type inner_optimization_step: callable, optional287 :param inner_batch_fn: Function to apply to the task sub-batch, before passing to the inner_cost_fn.288 Default is None.289 :type inner_batch_fn: callable, optional290 :param outer_batch_fn: Function to apply to the task sub-batch, before passing to the outer_cost_fn.291 Default is None.292 :type outer_batch_fn: callable, optional293 :param average_across_steps: Whether to average the inner loop steps for the outer loop update. Default is False.294 :type average_across_steps: bool, optional295 :param batched: Whether to batch along the time dimension, and run the meta steps in batch. Default is True.296 :type batched: bool, optional297 :param inner_v: Nested variable keys to be optimized during the inner loop, with same keys and boolean values.298 :type inner_v: dict str or list, optional299 :param keep_inner_v: If True, the key chains in inner_v will be kept, otherwise they will be removed.300 Default is True.301 :type keep_inner_v: bool, optional302 :param outer_v: Nested variable keys to be optimized during the inner loop, with same keys and boolean values.303 :type outer_v: dict str or list, optional304 :param keep_outer_v: If True, the key chains in inner_v will be kept, otherwise they will be removed.305 Default is True.306 :type keep_outer_v: bool, optional307 :param return_inner_v: Either 'first', 'all', or False. 'first' means the variables for the first task inner loop308 will also be returned. variables for all tasks will be returned with 'all'. Default is False.309 :type return_inner_v: str, optional310 :param num_tasks: Number of unique tasks to inner-loop optimize for the meta step. Determined from batch by default.311 :type num_tasks: int, optional312 :param stop_gradients: Whether to stop the gradients of the cost. Default is True.313 :type stop_gradients: bool, optional314 :return: The cost and the gradients with respect to the outer loop variables.315 """316 if num_tasks is None:317 num_tasks = batch.shape[0]318 unique_outer = outer_v is not None319 cost, grads, *rets = ivy.execute_with_gradients(lambda v: _train_tasks(320 batch, inner_batch_fn, outer_batch_fn, inner_cost_fn, outer_cost_fn,321 variables.set_at_key_chains(v) if unique_outer else v, inner_grad_steps, inner_learning_rate,322 inner_optimization_step, 2, average_across_steps, batched, inner_v, keep_inner_v, outer_v, keep_outer_v,323 return_inner_v, num_tasks, False),324 variables.at_key_chains(outer_v, ignore_none=True)325 if keep_outer_v else variables.prune_key_chains(outer_v, ignore_none=True))326 if stop_gradients:327 cost = ivy.stop_gradient(cost, preserve_type=False)328 # noinspection PyRedundantParentheses...

Full Screen

Full Screen

ragged_factory_ops.py

Source:ragged_factory_ops.py Github

copy

Full Screen

1# Copyright 2018 The TensorFlow Authors. All Rights Reserved.2#3# Licensed under the Apache License, Version 2.0 (the "License");4# you may not use this file except in compliance with the License.5# You may obtain a copy of the License at6#7# http://www.apache.org/licenses/LICENSE-2.08#9# Unless required by applicable law or agreed to in writing, software10# distributed under the License is distributed on an "AS IS" BASIS,11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.12# See the License for the specific language governing permissions and13# limitations under the License.14# ==============================================================================15"""Operations for constructing RaggedTensors."""16from __future__ import absolute_import17from __future__ import division18from __future__ import print_function19import numpy as np20from tensorflow.python.framework import constant_op21from tensorflow.python.framework import dtypes22from tensorflow.python.framework import ops23from tensorflow.python.framework import tensor_shape24from tensorflow.python.ops import array_ops25from tensorflow.python.ops.ragged import ragged_tensor26from tensorflow.python.ops.ragged import ragged_tensor_value27from tensorflow.python.util.tf_export import tf_export28#===============================================================================29# Op to construct a constant RaggedTensor from a nested Python list.30#===============================================================================31@tf_export("ragged.constant")32def constant(pylist, dtype=None, ragged_rank=None, inner_shape=None,33 name=None, row_splits_dtype=dtypes.int64):34 """Constructs a constant RaggedTensor from a nested Python list.35 Example:36 >>> tf.ragged.constant([[1, 2], [3], [4, 5, 6]])37 <tf.RaggedTensor [[1, 2], [3], [4, 5, 6]]>38 All scalar values in `pylist` must have the same nesting depth `K`, and the39 returned `RaggedTensor` will have rank `K`. If `pylist` contains no scalar40 values, then `K` is one greater than the maximum depth of empty lists in41 `pylist`. All scalar values in `pylist` must be compatible with `dtype`.42 Args:43 pylist: A nested `list`, `tuple` or `np.ndarray`. Any nested element that44 is not a `list`, `tuple` or `np.ndarray` must be a scalar value45 compatible with `dtype`.46 dtype: The type of elements for the returned `RaggedTensor`. If not47 specified, then a default is chosen based on the scalar values in48 `pylist`.49 ragged_rank: An integer specifying the ragged rank of the returned50 `RaggedTensor`. Must be nonnegative and less than `K`. Defaults to51 `max(0, K - 1)` if `inner_shape` is not specified. Defaults to `max(0, K52 - 1 - len(inner_shape))` if `inner_shape` is specified.53 inner_shape: A tuple of integers specifying the shape for individual inner54 values in the returned `RaggedTensor`. Defaults to `()` if `ragged_rank`55 is not specified. If `ragged_rank` is specified, then a default is chosen56 based on the contents of `pylist`.57 name: A name prefix for the returned tensor (optional).58 row_splits_dtype: data type for the constructed `RaggedTensor`'s row_splits.59 One of `tf.int32` or `tf.int64`.60 Returns:61 A potentially ragged tensor with rank `K` and the specified `ragged_rank`,62 containing the values from `pylist`.63 Raises:64 ValueError: If the scalar values in `pylist` have inconsistent nesting65 depth; or if ragged_rank or inner_shape are incompatible with `pylist`.66 """67 def ragged_factory(values, row_splits):68 row_splits = constant_op.constant(row_splits, dtype=row_splits_dtype)69 return ragged_tensor.RaggedTensor.from_row_splits(values, row_splits,70 validate=False)71 with ops.name_scope(name, "RaggedConstant"):72 return _constant_value(ragged_factory, constant_op.constant, pylist, dtype,73 ragged_rank, inner_shape)74@tf_export(v1=["ragged.constant_value"])75def constant_value(pylist, dtype=None, ragged_rank=None, inner_shape=None,76 row_splits_dtype="int64"):77 """Constructs a RaggedTensorValue from a nested Python list.78 Warning: This function returns a `RaggedTensorValue`, not a `RaggedTensor`.79 If you wish to construct a constant `RaggedTensor`, use80 [`ragged.constant(...)`](constant.md) instead.81 Example:82 >>> tf.compat.v1.ragged.constant_value([[1, 2], [3], [4, 5, 6]])83 tf.RaggedTensorValue(values=array([1, 2, 3, 4, 5, 6]),84 row_splits=array([0, 2, 3, 6]))85 All scalar values in `pylist` must have the same nesting depth `K`, and the86 returned `RaggedTensorValue` will have rank `K`. If `pylist` contains no87 scalar values, then `K` is one greater than the maximum depth of empty lists88 in `pylist`. All scalar values in `pylist` must be compatible with `dtype`.89 Args:90 pylist: A nested `list`, `tuple` or `np.ndarray`. Any nested element that91 is not a `list` or `tuple` must be a scalar value compatible with `dtype`.92 dtype: `numpy.dtype`. The type of elements for the returned `RaggedTensor`.93 If not specified, then a default is chosen based on the scalar values in94 `pylist`.95 ragged_rank: An integer specifying the ragged rank of the returned96 `RaggedTensorValue`. Must be nonnegative and less than `K`. Defaults to97 `max(0, K - 1)` if `inner_shape` is not specified. Defaults to `max(0, K98 - 1 - len(inner_shape))` if `inner_shape` is specified.99 inner_shape: A tuple of integers specifying the shape for individual inner100 values in the returned `RaggedTensorValue`. Defaults to `()` if101 `ragged_rank` is not specified. If `ragged_rank` is specified, then a102 default is chosen based on the contents of `pylist`.103 row_splits_dtype: data type for the constructed `RaggedTensorValue`'s104 row_splits. One of `numpy.int32` or `numpy.int64`.105 Returns:106 A `tf.RaggedTensorValue` or `numpy.array` with rank `K` and the specified107 `ragged_rank`, containing the values from `pylist`.108 Raises:109 ValueError: If the scalar values in `pylist` have inconsistent nesting110 depth; or if ragged_rank or inner_shape are incompatible with `pylist`.111 """112 if dtype is not None and isinstance(dtype, dtypes.DType):113 dtype = dtype.as_numpy_dtype114 row_splits_dtype = dtypes.as_dtype(row_splits_dtype).as_numpy_dtype115 def _ragged_factory(values, row_splits):116 row_splits = np.array(row_splits, dtype=row_splits_dtype)117 return ragged_tensor_value.RaggedTensorValue(values, row_splits)118 def _inner_factory(pylist, dtype, shape, name=None): # pylint: disable=unused-argument119 return np.reshape(np.array(pylist, dtype=dtype), shape)120 return _constant_value(_ragged_factory, _inner_factory, pylist, dtype,121 ragged_rank, inner_shape)122def _constant_value(ragged_factory, inner_factory, pylist, dtype, ragged_rank,123 inner_shape):124 """Constructs a constant RaggedTensor or RaggedTensorValue.125 Args:126 ragged_factory: A factory function with the signature:127 `ragged_factory(values, row_splits)`128 inner_factory: A factory function with the signature: `inner_factory(pylist,129 dtype, shape, name)`130 pylist: A nested `list`, `tuple` or `np.ndarray`.131 dtype: Data type for returned value.132 ragged_rank: Ragged rank for returned value.133 inner_shape: Inner value shape for returned value.134 Returns:135 A value returned by `ragged_factory` or `inner_factory`.136 Raises:137 ValueError: If the scalar values in `pylist` have inconsistent nesting138 depth; or if ragged_rank or inner_shape are incompatible with `pylist`.139 """140 if ragged_tensor.is_ragged(pylist):141 raise TypeError("pylist may not be a RaggedTensor or RaggedTensorValue.")142 # np.ndim builds an array, so we short-circuit lists and tuples.143 if not isinstance(pylist, (list, tuple)) and np.ndim(pylist) == 0:144 # Scalar value145 if ragged_rank is not None and ragged_rank != 0:146 raise ValueError("Invalid pylist=%r: incompatible with ragged_rank=%d" %147 (pylist, ragged_rank))148 if inner_shape is not None and inner_shape:149 raise ValueError(150 "Invalid pylist=%r: incompatible with dim(inner_shape)=%d" %151 (pylist, len(inner_shape)))152 return inner_factory(pylist, dtype, ())153 if ragged_rank is not None and ragged_rank < 0:154 raise ValueError(155 "Invalid ragged_rank=%r: must be nonnegative" % ragged_rank)156 # Find the depth of scalar values in `pylist`.157 scalar_depth, max_depth = _find_scalar_and_max_depth(pylist)158 if scalar_depth is not None:159 if max_depth > scalar_depth:160 raise ValueError("Invalid pylist=%r: empty list nesting is greater "161 "than scalar value nesting" % pylist)162 # If both inner_shape and ragged_rank were specified, then check that163 # they are compatible with pylist.164 if inner_shape is not None and ragged_rank is not None:165 expected_depth = ragged_rank + len(inner_shape) + 1166 if ((scalar_depth is not None and expected_depth != scalar_depth) or167 (scalar_depth is None and expected_depth < max_depth)):168 raise ValueError(169 "Invalid pylist=%r: incompatible with ragged_rank=%d "170 "and dim(inner_shape)=%d" % (pylist, ragged_rank, len(inner_shape)))171 # Check if the result is a `Tensor`.172 if (ragged_rank == 0 or173 (ragged_rank is None and174 ((max_depth < 2) or175 (inner_shape is not None and max_depth - len(inner_shape) < 2)))):176 return inner_factory(pylist, dtype, inner_shape)177 # Compute default value for inner_shape.178 if inner_shape is None:179 if ragged_rank is None:180 inner_shape = ()181 else:182 inner_shape = _default_inner_shape_for_pylist(pylist, ragged_rank)183 # Compute default value for ragged_rank.184 if ragged_rank is None:185 if scalar_depth is None:186 ragged_rank = max(1, max_depth - 1)187 else:188 ragged_rank = max(1, scalar_depth - 1 - len(inner_shape))189 # Build the splits for each ragged rank, and concatenate the inner values190 # into a single list.191 nested_splits = []192 values = pylist193 for dim in range(ragged_rank):194 nested_splits.append([0])195 concatenated_values = []196 for row in values:197 nested_splits[dim].append(nested_splits[dim][-1] + len(row))198 concatenated_values.extend(row)199 values = concatenated_values200 values = inner_factory(201 values, dtype=dtype, shape=(len(values),) + inner_shape, name="values")202 for row_splits in reversed(nested_splits):203 values = ragged_factory(values, row_splits)204 return values205def _find_scalar_and_max_depth(pylist):206 """Finds nesting depth of scalar values in pylist.207 Args:208 pylist: A nested python `list` or `tuple`.209 Returns:210 A tuple `(scalar_depth, max_depth)`. `scalar_depth` is the nesting211 depth of scalar values in `pylist`, or `None` if `pylist` contains no212 scalars. `max_depth` is the maximum depth of `pylist` (including213 empty lists).214 Raises:215 ValueError: If pylist has inconsistent nesting depths for scalars.216 """217 # Check if pylist is not scalar. np.ndim builds an array, so we218 # short-circuit lists and tuples.219 if isinstance(pylist, (list, tuple)) or np.ndim(pylist) != 0:220 scalar_depth = None221 max_depth = 1222 for child in pylist:223 child_scalar_depth, child_max_depth = _find_scalar_and_max_depth(child)224 if child_scalar_depth is not None:225 if scalar_depth is not None and scalar_depth != child_scalar_depth + 1:226 raise ValueError("all scalar values must have the same nesting depth")227 scalar_depth = child_scalar_depth + 1228 max_depth = max(max_depth, child_max_depth + 1)229 return (scalar_depth, max_depth)230 return (0, 0)231def _default_inner_shape_for_pylist(pylist, ragged_rank):232 """Computes a default inner shape for the given python list."""233 def get_inner_shape(item):234 """Returns the inner shape for a python list `item`."""235 if not isinstance(item, (list, tuple)) and np.ndim(item) == 0:236 return ()237 elif item:238 return (len(item),) + get_inner_shape(item[0])239 return (0,)240 def check_inner_shape(item, shape):241 """Checks that `item` has a consistent shape matching `shape`."""242 is_nested = isinstance(item, (list, tuple)) or np.ndim(item) != 0243 if is_nested != bool(shape):244 raise ValueError("inner values have inconsistent shape")245 if is_nested:246 if shape[0] != len(item):247 raise ValueError("inner values have inconsistent shape")248 for child in item:249 check_inner_shape(child, shape[1:])250 # Collapse the ragged layers to get the list of inner values.251 flat_values = pylist252 for dim in range(ragged_rank):253 if not all(254 isinstance(v, (list, tuple)) or np.ndim(v) != 0 for v in flat_values):255 raise ValueError("pylist has scalar values depth %d, but ragged_rank=%d "256 "requires scalar value depth greater than %d" %257 (dim + 1, ragged_rank, ragged_rank))258 flat_values = sum((list(v) for v in flat_values), [])259 # Compute the inner shape looking only at the leftmost elements; and then260 # use check_inner_shape to verify that other elements have the same shape.261 inner_shape = get_inner_shape(flat_values)262 check_inner_shape(flat_values, inner_shape)263 return inner_shape[1:]264@tf_export(v1=["ragged.placeholder"])265def placeholder(dtype, ragged_rank, value_shape=None, name=None):266 """Creates a placeholder for a `tf.RaggedTensor` that will always be fed.267 **Important**: This ragged tensor will produce an error if evaluated.268 Its value must be fed using the `feed_dict` optional argument to269 `Session.run()`, `Tensor.eval()`, or `Operation.run()`.270 @compatibility{eager} Placeholders are not compatible with eager execution.271 Args:272 dtype: The data type for the `RaggedTensor`.273 ragged_rank: The ragged rank for the `RaggedTensor`274 value_shape: The shape for individual flat values in the `RaggedTensor`.275 name: A name for the operation (optional).276 Returns:277 A `RaggedTensor` that may be used as a handle for feeding a value, but278 not evaluated directly.279 Raises:280 RuntimeError: if eager execution is enabled281 """282 if ragged_rank == 0:283 return array_ops.placeholder(dtype, value_shape, name)284 with ops.name_scope(name, "RaggedPlaceholder", []):285 flat_shape = tensor_shape.TensorShape([None]).concatenate(value_shape)286 result = array_ops.placeholder(dtype, flat_shape, "flat_values")287 for i in reversed(range(ragged_rank)):288 row_splits = array_ops.placeholder(dtypes.int64, [None],289 "row_splits_%d" % i)290 result = ragged_tensor.RaggedTensor.from_row_splits(result, row_splits)...

Full Screen

Full Screen

test_closure.py

Source:test_closure.py Github

copy

Full Screen

...67 self.run_jit_inner_function(nopython=True)68 @testing.allow_interpreter_mode69 def test_return_closure(self):70 def outer(x):71 def inner():72 return x + 173 return inner74 cfunc = jit(outer)75 self.assertEqual(cfunc(10)(), outer(10)())76class TestInlinedClosure(TestCase):77 """78 Tests for (partial) closure support in njit. The support is partial79 because it only works for closures that can be successfully inlined80 at compile time.81 """82 @tag('important')83 def test_inner_function(self):84 def outer(x):85 def inner(x):86 return x * x87 return inner(x) + inner(x)88 cfunc = njit(outer)89 self.assertEqual(cfunc(10), outer(10))90 @tag('important')91 def test_inner_function_with_closure(self):92 def outer(x):93 y = x + 194 def inner(x):95 return x * x + y96 return inner(x) + inner(x)97 cfunc = njit(outer)98 self.assertEqual(cfunc(10), outer(10))99 @tag('important')100 def test_inner_function_with_closure_2(self):101 def outer(x):102 y = x + 1103 def inner(x):104 return x * y105 y = inner(x)106 return y + inner(x)107 cfunc = njit(outer)108 self.assertEqual(cfunc(10), outer(10))109 @unittest.skipIf(utils.PYVERSION < (3, 0), "needs Python 3")110 def test_inner_function_with_closure_3(self):111 code = """112 def outer(x):113 y = x + 1114 z = 0115 def inner(x):116 nonlocal z117 z += x * x118 return z + y119 return inner(x) + inner(x) + z120 """121 ns = {}122 exec(code.strip(), ns)123 cfunc = njit(ns['outer'])124 self.assertEqual(cfunc(10), ns['outer'](10))125 @tag('important')126 def test_inner_function_nested(self):127 def outer(x):128 def inner(y):129 def innermost(z):130 return x + y + z131 s = 0132 for i in range(y):133 s += innermost(i)134 return s135 return inner(x * x)136 cfunc = njit(outer)137 self.assertEqual(cfunc(10), outer(10))138 @tag('important')139 def test_bulk_use_cases(self):140 """ Tests the large number of use cases defined below """141 # jitted function used in some tests142 @njit143 def fib3(n):144 if n < 2:145 return n146 return fib3(n - 1) + fib3(n - 2)147 def outer1(x):148 """ Test calling recursive function from inner """149 def inner(x):150 return fib3(x)151 return inner(x)152 def outer2(x):153 """ Test calling recursive function from closure """154 z = x + 1155 def inner(x):156 return x + fib3(z)157 return inner(x)158 def outer3(x):159 """ Test recursive inner """160 def inner(x):161 if x + y < 2:162 return 10163 else:164 inner(x - 1)165 return inner(x)166 def outer4(x):167 """ Test recursive closure """168 y = x + 1169 def inner(x):170 if x + y < 2:171 return 10172 else:173 inner(x - 1)174 return inner(x)175 def outer5(x):176 """ Test nested closure """177 y = x + 1178 def inner1(x):179 z = y + x + 2180 def inner2(x):181 return x + z182 return inner2(x) + y183 return inner1(x)184 def outer6(x):185 """ Test closure with list comprehension in body """186 y = x + 1187 def inner1(x):188 z = y + x + 2189 return [t for t in range(z)]190 return inner1(x)191 _OUTER_SCOPE_VAR = 9192 def outer7(x):193 """ Test use of outer scope var, no closure """194 z = x + 1195 return x + z + _OUTER_SCOPE_VAR196 _OUTER_SCOPE_VAR = 9197 def outer8(x):198 """ Test use of outer scope var, with closure """199 z = x + 1200 def inner(x):201 return x + z + _OUTER_SCOPE_VAR202 return inner(x)203 def outer9(x):204 """ Test closure assignment"""205 z = x + 1206 def inner(x):207 return x + z208 f = inner209 return f(x)210 def outer10(x):211 """ Test two inner, one calls other """212 z = x + 1213 def inner(x):214 return x + z215 def inner2(x):216 return inner(x)217 return inner2(x)218 def outer11(x):219 """ return the closure """220 z = x + 1221 def inner(x):222 return x + z223 return inner224 def outer12(x):225 """ closure with kwarg"""226 z = x + 1227 def inner(x, kw=7):228 return x + z + kw229 return inner(x)230 def outer13(x, kw=7):231 """ outer with kwarg no closure"""232 z = x + 1 + kw233 return z234 def outer14(x, kw=7):235 """ outer with kwarg used in closure"""236 z = x + 1237 def inner(x):238 return x + z + kw239 return inner(x)240 def outer15(x, kw=7):241 """ outer with kwarg as arg to closure"""242 z = x + 1243 def inner(x, kw):244 return x + z + kw245 return inner(x, kw)246 def outer16(x):247 """ closure is generator, consumed locally """248 z = x + 1249 def inner(x):250 yield x + z251 return list(inner(x))252 def outer17(x):253 """ closure is generator, returned """254 z = x + 1255 def inner(x):256 yield x + z257 return inner(x)258 def outer18(x):259 """ closure is generator, consumed in loop """260 z = x + 1261 def inner(x):262 yield x + z263 for i in inner(x):264 t = i265 return t266 def outer19(x):267 """ closure as arg to another closure """268 z1 = x + 1269 z2 = x + 2270 def inner(x):271 return x + z1272 def inner2(f, x):273 return f(x) + z2274 return inner2(inner, x)275 def outer20(x):276 #""" Test calling numpy in closure """277 z = x + 1278 def inner(x):279 return x + numpy.cos(z)280 return inner(x)281 def outer21(x):282 #""" Test calling numpy import as in closure """283 z = x + 1284 def inner(x):285 return x + np.cos(z)286 return inner(x)287 # functions to test that are expected to pass288 f = [outer1, outer2, outer5, outer6, outer7, outer8,289 outer9, outer10, outer12, outer13, outer14,290 outer15, outer19, outer20, outer21]291 for ref in f:292 cfunc = njit(ref)293 var = 10294 self.assertEqual(cfunc(var), ref(var))295 # test functions that are expected to fail296 with self.assertRaises(NotImplementedError) as raises:297 cfunc = jit(nopython=True)(outer3)298 cfunc(var)299 msg = "Unsupported use of op_LOAD_CLOSURE encountered"300 self.assertIn(msg, str(raises.exception))...

Full Screen

Full Screen

Project2_Classification.py

Source:Project2_Classification.py Github

copy

Full Screen

1# -*- coding: utf-8 -*-2"""3Created on Fri Nov 12 19:11:45 202145@author: Usuario6"""78from matplotlib.pyplot import figure, plot, xlabel, ylabel, legend, show9import sklearn.linear_model as lm10import numpy as np11import pandas as pd12from matplotlib.pylab import (figure, semilogx, loglog, xlabel, ylabel, legend, 13 title, subplot, show, grid)14import numpy as np15from scipy.io import loadmat16from scipy import stats17import sklearn.linear_model as lm18from sklearn import model_selection19from toolbox_02450 import rlr_validate20import torch21from toolbox_02450 import train_neural_net, draw_neural_net, visualize_decision_boundary22from sklearn.preprocessing import StandardScaler23from sklearn.linear_model import LogisticRegression24from sklearn.model_selection import train_test_split25import statistics as st26import array 2728# Dataset declaration and data cleasing2930df=pd.read_csv('heart.csv')31df.shape32df.info()33df.isnull().sum()34df.info()3536# Creation of matrix X3738X = df.values3940# Outlier removal4142z_scores = stats.zscore(df)43abs_z_scores = np.abs(z_scores)44filtered_entries = (abs_z_scores < 3).all(axis=1)45new_df = df[filtered_entries]4647X_new=new_df48y = X_new['target']49X_new=X_new.drop('target',axis=1)5051#---------Getting dummies for the categorical features--------52X = pd.get_dummies(X_new,columns=['cp','restecg','slope','ca','thal'],drop_first=False)535455X = (X - X.mean())/X.std()5657attributeNames = np.asarray(X.columns)5859N, M = np.shape(X)6061X = np.asarray(X)62y = np.asarray(y)6364#CLASSIFICATION-------------------------6566# Cross Validation Parameters for inner and outer fold.67K_Outer = 568CV_Outer = model_selection.KFold(K_Outer, shuffle=True)6970K_Inner = 571CV_Inner = model_selection.KFold(K_Inner, shuffle=True)7273#Neural Network parameters74h = [1, 3, 5, 7, 9]75max_iter = 100007677#--MODEL ERRORS78Error_test_LR=np.empty((K_Inner,1))79opt_lambda_idx=np.empty((K_Inner,1))80opt_lambda=np.empty((K_Inner,1))8182Error_train_bl_in = np.empty((K_Inner, 1))83Error_test_bl_in = np.empty((K_Inner, 1))84Error_train_bl_out = np.empty((K_Outer, 1))85Error_test_bl_out = np.empty((K_Outer, 1))8687Error_ANN_h = np.empty((K_Inner, 1))88error_in = []89error_out = []90Best_h = np.empty((K_Outer, 1))91Min_Error_h = np.empty((K_Inner, 1 ))92Error_ANN_out = []93949596## ----OUTER CROSS VALIDATION FOLD9798k_out=099for train_index, test_index in CV_Outer.split(X,y):100 print('Outer cross validation fold {0}/{1}:'.format(k_out+1,K_Outer))101 102 # Extract training and test set for the outer cross validation fold103 X_train_outer = X[train_index]104 y_train_outer = y[train_index]105 X_test_outer = X[test_index]106 y_test_outer = y[test_index]107108 # Fit regularized logistic regression model to training data to predict 109110 lambda_interval = np.logspace(-8, 2, 50)111 optim_lambdas = np.empty(K_Outer)112 train_error_rate = np.zeros(len(lambda_interval))113 test_error_rate = np.zeros(len(lambda_interval))114 coefficient_norm = np.zeros(len(lambda_interval))115 116 117 ## -----INNER CROSS VALIDATION FOLD118 119 k_in=0120 for train_index2, test_index2 in CV_Inner.split(X_train_outer,y_train_outer):121 h = [1, 3, 5, 7, 9]122 print('Inner cross validation fold {0}/{1}:'.format(k_in+1,K_Inner))123 124 # Extract inner training and test set for current CV fold125 X_train_inner, X_test_inner, y_train_inner, y_test_inner = train_test_split(X_train_outer, y_train_outer, test_size=.80)126 127 128 #----BASELINE MODEL129 Error_train_bl_in[k_in] = np.sum(y_train_inner != np.argmax(np.bincount(y_train_inner)))/len(y_train_inner)130 Error_test_bl_in[k_in] = np.sum(y_test_inner != np.argmax(np.bincount(y_test_inner)))/len(y_test_inner)131 132 133 134 #vector = np.vectorize(np.int)135 #vector(y_test_inner.numpy()) 136 137 138 #----LOGISTIC REGRESSION CLASSIFICATION139 140 # Selection of the best lambda for the inner cross validation fold141 for k in range(0, len(lambda_interval)):142 143 #Creation of the Logistic Regression Model144 mdl = LogisticRegression(penalty='l2', C=1/lambda_interval[k] )145 146 # Training of the model with the inner partition of the CV147 mdl.fit(X_train_inner, y_train_inner)148 149 # Prediction of the model on the inner test partitions150 y_train_est = mdl.predict(X_train_inner).T151 y_test_est = mdl.predict(X_test_inner).T #y_predict152 153 154 # Compute the model erro for each lambda155 train_error_rate[k] = np.sum(y_train_est != y_train_inner) / len(y_train_inner)156 test_error_rate[k] = np.sum(y_test_est != y_test_inner) / len(y_test_inner)157 158 w_est = mdl.coef_[0] 159 coefficient_norm[k] = np.sqrt(np.sum(w_est**2))160 161 #----ARTIFICIAL NEURAL NETWORK FOR CLASSIFICATION162 X_train_inner = torch.Tensor(X_train_outer[train_index2,:] )163 y_train_inner = torch.Tensor(y_train_outer[train_index2] )164 X_test_inner = torch.Tensor(X_train_outer[test_index2,:] )165 y_test_inner = torch.Tensor(y_train_outer[test_index2] )166 167 y_train_inner = y_train_inner.unsqueeze(1)168 error_in = []169 170 for i, j in enumerate(h): 171 172 # Create a model for each h173 inner_ann = lambda: torch.nn.Sequential(174 torch.nn.Linear(M, h[i]), #M features to H hiden units175 # 1st transfer function, either Tanh or ReLU:176 torch.nn.Tanh(), 177 torch.nn.Linear(h[i], 1), # H hidden units to 1 output neuron178 torch.nn.Sigmoid() #Final transfer function179 )180 loss_fn = torch.nn.BCELoss()181 print('\nTesting h: {0}'.format(j)) 182 183 184 # Train the new model185 net, final_loss_in, learning_curve = train_neural_net(inner_ann,186 loss_fn,187 X=X_train_inner,188 y=y_train_inner,189 n_replicates=1,190 max_iter=max_iter)191 192 print('\n\tBest loss: {}\n'.format(final_loss_in))193 194 # Determine estimated class labels for test set195 y_sigmoid_in = net(X_test_inner) # activation of final note, i.e. prediction of network196 y_test_est_in = (y_sigmoid_in > .5).type(dtype=torch.uint8) # threshold output of sigmoidal function197 y_test_in = y_test_inner.type(dtype=torch.uint8)198 # Determine errors and error rate199 e_in = (y_test_est_in != y_test_in)200 error_rate_in = (sum(e_in).type(torch.float)/len(y_test_inner)).data.numpy()201 error_in.append(error_rate_in) # store error rate for current CV fold202 Error_ANN_h[i] = round(np.mean(error_in),4)203 # Determine errors and error rate204 #InnerErrors_h[i] = final_loss_in/y_test_inner.shape[0]205 if (Error_ANN_h[i] < Error_ANN_h[i-1]):206 Besth = j207 else:208 Besth = h[0]209 210 211 212 #Choose the minimum error for given h213 Min_Error_h[k_in] = min(Error_ANN_h)214 215 # Best h for each inner fold 216 Best_h[k_out] = Besth217 218 219 k_in+=1220 221 # COMPUTE THE ERRORS OF THE BEST MODEL FOR THE OUTER FOLD222 223 # Baseline Model224 Error_train_bl_out[k_out] = min(Error_train_bl_in)225 Error_test_bl_out[k_out] = min(Error_test_bl_in)226 227 p=range(len(y_test_outer))228 y_predict_bl=array.array('i',[])229 for i in p:230 y_predict_bl.append(np.argmax(np.bincount(y_test_outer)))231 len(y_predict_bl)232 233 # Logistic Regression234 Error_test_LR[k_out] = np.min(test_error_rate)235 opt_lambda_idx[k_out] = np.argmin(test_error_rate)236 opt_lambda[k_out] = lambda_interval[int(opt_lambda_idx[k_out])]237 238 239 240 LR= LogisticRegression(penalty='l2' , C=1/opt_lambda[k_out].item() )241242 LR.fit(X_train_outer, y_train_outer)243 244 y_predict_LR = LR.predict(X_test_outer).T245 246247 248 249 # Neural Network for outer fold250 # - Create Outer ANN model251 outer_ann = lambda: torch.nn.Sequential(252 torch.nn.Linear(M, int(np.asarray(Best_h[k_out]))), #M features to H hiden units253 # 1st transfer function, either Tanh or ReLU:254 torch.nn.Tanh(), 255 torch.nn.Linear(int(np.asarray(Best_h[k_out])), 1), # H hidden units to 1 output neuron256 torch.nn.Sigmoid() #Final transfer function257 )258 loss_fn = torch.nn.BCELoss()259 260 # - Training data to pytorch261 X_train_out = torch.Tensor(X[train_index,:] )262 y_train_out = torch.Tensor(y[train_index] )263 X_test_out = torch.Tensor(X[test_index,:] )264 y_test_out = torch.Tensor(y[test_index] )265 266 267 # - Train the net with outer data folds268 y_train_out = y_train_out.unsqueeze(1)269 net, final_loss_out, learning_curve = train_neural_net(outer_ann,270 loss_fn,271 X=X_train_out,272 y=y_train_out,273 n_replicates=1,274 max_iter=max_iter)275 276 # - Compute the errors of the ANN277 # -- Determine estimated class labels for test set278 y_sigmoid_out = net(X_test_out) # activation of final note, i.e. prediction of network279 280 y_test_est_out = (y_sigmoid_out > .5).type(dtype=torch.uint8) # threshold output of sigmoidal function281282 y_predict_ANN = np.concatenate(y_test_est_out.numpy())283 284 y_test_out = y_test_out.type(dtype=torch.uint8)285 286 # -- Determine errors and error rate287 e_out = (y_test_est_out != y_test_out)288 error_rate_out = (sum(e_out).type(torch.float)/len(y_test_out)).data.numpy()289 Error_ANN_out.append(error_rate_out) # store error rate for current CV fold290 Error_ANN_out[k_out] = round(np.mean(error_in),4)291 292 293 k_out+=1294295296297298299300301302303304305306307308 ...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run ATX automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful