How to use norm_rows method in Testify

Best Python code snippet using Testify_python

test_np_distances.py

Source:test_np_distances.py Github

copy

Full Screen

1from gehm.utils.np_distances import *2from tests.test_data import create_test_data34import pytest5import numpy as np6import torch7from numpy import cos, sin89@pytest.mark.distances10def test_nx_second_order_proximity(create_test_data):1112 G,G_undir=create_test_data13 nodes=np.array(G.nodes)141516 # Test 1: Ordering when subset proximity17 sel1=nodes[[0,1,2,3]]18 sel2=nodes[[0,3,2,1]]19 prox1=nx_second_order_proximity(G,sel1,False)20 prox2=nx_second_order_proximity(G,sel2,False)21 prox1=np.round(prox1,5)22 prox2=np.round(prox2,5)23 assert (prox1[0,1]==prox2[0,3]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])24 assert (prox1[1,2]==prox2[3,2]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])252627 # Test 2: Ordering when whole network proximity28 prox1=nx_second_order_proximity(G,sel1,True)29 prox2=nx_second_order_proximity(G,sel2,True)30 prox1=np.round(prox1,5)31 prox2=np.round(prox2,5)32 assert (prox1[1,3]==prox2[3,3]), "Ordering problem, {} != {}".format(prox1[1,3],prox2[3,3])33 assert (prox1[3,1]==prox2[1,1]), "Ordering problem, {} != {}".format(prox1[3,1],prox2[1,1])3435 # Test 3+4: Without row normalization36 prox1=nx_second_order_proximity(G,sel1,False, norm_rows=False)37 prox2=nx_second_order_proximity(G,sel2,False, norm_rows=False)38 prox1=np.round(prox1,5)39 prox2=np.round(prox2,5)40 assert (prox1[0,1]==prox2[0,3]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])41 assert (prox1[1,2]==prox2[3,2]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])4243 prox1=nx_second_order_proximity(G,sel1,True, norm_rows=False)44 prox2=nx_second_order_proximity(G,sel2,True, norm_rows=False)45 prox1=np.round(prox1,5)46 prox2=np.round(prox2,5)47 assert (prox1[1,3]==prox2[3,3]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])48 assert (prox1[3,1]==prox2[1,1]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])4950 # Test 5+6: Without row normalization, but with batch normalization51 prox1=nx_second_order_proximity(G,sel1,False, norm_rows=False, norm_rows_in_sample=True)52 prox2=nx_second_order_proximity(G,sel2,False, norm_rows=False, norm_rows_in_sample=True)53 prox1=np.round(prox1,5)54 prox2=np.round(prox2,5)55 assert (prox1[0,1]==prox2[0,3]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])56 assert (prox1[1,2]==prox2[3,2]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])5758 prox1=nx_second_order_proximity(G,sel1,True, norm_rows=False, norm_rows_in_sample=True)59 prox2=nx_second_order_proximity(G,sel2,True, norm_rows=False, norm_rows_in_sample=True)60 prox1=np.round(prox1,5)61 prox2=np.round(prox2,5)62 assert (prox1[1,3]==prox2[3,3]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])63 assert (prox1[3,1]==prox2[1,1]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])646566 # Test 7: Whole network, but return batch order67 prox1=nx_second_order_proximity(G,sel1,True, norm_rows=True, to_batch=True)68 prox2=nx_second_order_proximity(G,sel2,True, norm_rows=True, to_batch=True)69 prox1=np.round(prox1,5)70 prox2=np.round(prox2,5)71 assert (prox1[1,0]==prox2[3,0]), "Ordering problem, {} != {}".format(prox1[1,0],prox2[3,0])72 assert (prox1[3,1]==prox2[1,3]), "Ordering problem, {} != {}".format(prox1[3,1],prox2[1,3])7374 # Test 8: Whole network, but return batch order, no norm75 prox1=nx_second_order_proximity(G,sel1,True, norm_rows=False, to_batch=True)76 prox2=nx_second_order_proximity(G,sel2,True, norm_rows=False, to_batch=True)77 prox1=np.round(prox1,5)78 prox2=np.round(prox2,5)79 assert (prox1[1,0]==prox2[3,0]), "Ordering problem, {} != {}".format(prox1[1,0],prox2[3,0])80 assert (prox1[3,1]==prox2[1,3]), "Ordering problem, {} != {}".format(prox1[3,1],prox2[1,3])81828384 # Now repeat everything with an undirected graph:8586 G=G_undir87 nodes=np.array(G.nodes)888990 # Test 1: Ordering when subset proximity91 sel1=nodes[[0,1,2,3]]92 sel2=nodes[[0,3,2,1]]93 prox1=nx_second_order_proximity(G,sel1,False)94 prox2=nx_second_order_proximity(G,sel2,False)95 prox1=np.round(prox1,5)96 prox2=np.round(prox2,5)97 assert (prox1[0,1]==prox2[0,3]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])98 assert (prox1[1,2]==prox2[3,2]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])99100101 # Test 2: Ordering when whole network proximity102 prox1=nx_second_order_proximity(G,sel1,True)103 prox2=nx_second_order_proximity(G,sel2,True)104 prox1=np.round(prox1,5)105 prox2=np.round(prox2,5)106 assert (prox1[1,3]==prox2[3,3]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])107 assert (prox1[3,1]==prox2[1,1]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])108109 # Test 3+4: Without row normalization110 prox1=nx_second_order_proximity(G,sel1,False, norm_rows=False)111 prox2=nx_second_order_proximity(G,sel2,False, norm_rows=False)112 prox1=np.round(prox1,5)113 prox2=np.round(prox2,5)114 assert (prox1[0,1]==prox2[0,3]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])115 assert (prox1[1,2]==prox2[3,2]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])116117 prox1=nx_second_order_proximity(G,sel1,True, norm_rows=False)118 prox2=nx_second_order_proximity(G,sel2,True, norm_rows=False)119 prox1=np.round(prox1,5)120 prox2=np.round(prox2,5)121 assert (prox1[1,3]==prox2[3,3]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])122 assert (prox1[3,1]==prox2[1,1]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])123124 # Test 5+6: Without row normalization, but with batch normalization125 prox1=nx_second_order_proximity(G,sel1,False, norm_rows=False, norm_rows_in_sample=True)126 prox2=nx_second_order_proximity(G,sel2,False, norm_rows=False, norm_rows_in_sample=True)127 prox1=np.round(prox1,5)128 prox2=np.round(prox2,5)129 assert (prox1[0,1]==prox2[0,3]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])130 assert (prox1[1,2]==prox2[3,2]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])131132 prox1=nx_second_order_proximity(G,sel1,True, norm_rows=False, norm_rows_in_sample=True)133 prox2=nx_second_order_proximity(G,sel2,True, norm_rows=False, norm_rows_in_sample=True)134 prox1=np.round(prox1,5)135 prox2=np.round(prox2,5)136 assert (prox1[1,3]==prox2[3,3]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])137 assert (prox1[3,1]==prox2[1,1]), "Ordering problem, {} != {}".format(prox1[0,1],prox2[0,3])138139140 # Test 7: Whole network, but return batch order141 prox1=nx_second_order_proximity(G,sel1,True, norm_rows=True, to_batch=True)142 prox2=nx_second_order_proximity(G,sel2,True, norm_rows=True, to_batch=True)143 prox1=np.round(prox1,5)144 prox2=np.round(prox2,5)145 assert (prox1[1,0]==prox2[3,0]), "Ordering problem, {} != {}".format(prox1[1,0],prox2[3,0])146 assert (prox1[3,1]==prox2[1,3]), "Ordering problem, {} != {}".format(prox1[3,1],prox2[1,3])147148 # Test 8: Whole network, but return batch order, no norm149 prox1=nx_second_order_proximity(G,sel1,True, norm_rows=False, to_batch=True)150 prox2=nx_second_order_proximity(G,sel2,True, norm_rows=False, to_batch=True)151 prox1=np.round(prox1,5)152 prox2=np.round(prox2,5)153 assert (prox1[1,0]==prox2[3,0]), "Ordering problem, {} != {}".format(prox1[1,0],prox2[3,0])154 assert (prox1[3,1]==prox2[1,3]), "Ordering problem, {} != {}".format(prox1[3,1],prox2[1,3]) ...

Full Screen

Full Screen

cosine.py

Source:cosine.py Github

copy

Full Screen

1from ..utils import _XXT, rowSum, reflect, setDiagonal2from numpy import zeros, newaxis3from numba import njit, prange4from ..sparse.csr import div_1 ,mult_1, _XXT as _XXT_sparse, rowSum as rowSum_sparse5from ..sparse.tcsr import _XXT as _XXT_triangular6def cosine_sim_triangular(N, D):7 """8 [Added 21/10/2018]9 Quickly performs X / norm_rows / norm_rows.T on the TCSR matrix.10 """11 n = len(N)12 move = 013 14 # loop *-2 and adds S[:, newaxis]15 for i in prange(n-1):16 i1 = i+117 18 left = i*i1 // 219 s = N[i1]20 for j in range(left, left+i1): 21 # div N[:, newaxis]22 D[j] /= s23 24 # loop div N[newaxis, :]25 for a in prange(n-1):26 s = N[a]27 for b in range(a, n-1):28 # div N[newaxis, :] or N29 c = b*(b+1) // 2 + a30 D[c] /= s31 return D32cosine_sim_triangular_single = njit(cosine_sim_triangular, fastmath = True, nogil = True, cache = True)33cosine_sim_triangular_parallel = njit(cosine_sim_triangular, fastmath = True, nogil = True, parallel = True)34@njit(fastmath = True, nogil = True, cache = True)35def cosine_dis(XXT):36 """37 [Added 22/10/2018]38 Performs XXT*-1 + 1 quickly on the lower triangular part.39 """40 n = len(XXT)41 for i in range(n):42 for j in range(i):43 XXT[i, j] *= -144 XXT[i, j] += 145 return XXT46@njit(fastmath = True, nogil = True, cache = True)47def cosine_dis_triangular(D):48 """49 [Added 22/10/2018]50 Performs XXT*-1 + 1 quickly on the TCSR.51 """52 D *= -153 D += 154 return D55def cosine_similarity(X, Y = None, triangular = False, n_jobs = 1, copy = False):56 """57 [Added 20/10/2018] [Edited 22/201/2018]58 [Edited 22/10/2018 Added Y option]59 Note: when using Y, speed improvement is approx 5% only from Sklearn.60 Cosine similarity is approx the same speed as Sklearn, but uses approx 10%61 less memory. One clear advantage is if you set triangular to TRUE, then it's faster.62 """63 norm_rows = rowSum(X, norm = True)64 if Y is X:65 # Force algo to be triangular cosine rather than normal CS.66 Y = None67 if Y is None:68 if copy:69 XXT = _XXT(X.T)70 XXT /= norm_rows[:, newaxis]71 XXT /= norm_rows #[newaxis, :]72 else:73 XXT = _XXT( (X/norm_rows[:, newaxis]).T )74 if not triangular:75 XXT = reflect(XXT, n_jobs)76 # diagonal is set to 177 setDiagonal(XXT, 1)78 return XXT79 else:80 D = X @ Y.T81 D /= norm_rows[:, newaxis]82 D /= rowSum(Y, norm = True)83 return D84def cosine_similarity_sparse(val, colPointer, rowIndices, n, p, triangular = False, dense_output = True,85 n_jobs = 1, copy = True):86 """87 [Added 20/10/2018] [Edited 21/10/2018]88 Slightly faster than Sklearn's Cosine Similarity implementation.89 If dense_output is set to FALSE, then a TCSR Matrix (Triangular CSR Matrix) is90 provided and not a CSR matrix. This has the advantage of using only 1/2n^2 - n91 memory and not n^2 memory.92 """93 norm_rows = rowSum_sparse(val, colPointer, rowIndices, norm = True)94 if dense_output:95 if copy:96 XXT = _XXT_sparse(val, colPointer, rowIndices, n, p, n_jobs)97 XXT /= norm_rows[:, newaxis]98 XXT /= norm_rows #[newaxis, :]99 else:100 val = div_1(val, colPointer, rowIndices, norm_rows, n, p, copy = False)101 XXT = _XXT_sparse(val, colPointer, rowIndices, n, p, n_jobs)102 val = mult_1(val, colPointer, rowIndices, norm_rows, n, p, copy = False)103 if not triangular: 104 XXT = reflect(XXT, n_jobs)105 # diagonal is set to 1106 setDiagonal(XXT, 1)107 else:108 XXT = _XXT_triangular(val, colPointer, rowIndices, n, p, n_jobs)109 XXT = cosine_triangular_parallel(norm_rows, XXT) if n_jobs != 1 else \110 cosine_triangular_single(norm_rows, XXT)111 return XXT112def cosine_distances(X, Y = None, triangular = False, n_jobs = 1, copy = False):113 """114 [Added 15/10/2018] [Edited 18/10/2018]115 [Edited 22/10/2018 Added Y option]116 Note: when using Y, speed improvement is approx 5-10% only from Sklearn.117 Slightly faster than Sklearn's Cosine Distances implementation.118 If you set triangular to TRUE, the result is much much faster.119 (Approx 50% faster than Sklearn)120 """121 norm_rows = rowSum(X, norm = True)122 if Y is X:123 # Force algo to be triangular cosine rather than normal CS.124 Y = None125 if Y is None:126 if copy:127 XXT = _XXT(X.T)128 XXT /= norm_rows[:, newaxis]129 XXT /= norm_rows #[newaxis, :]130 else:131 XXT = _XXT( (X/norm_rows[:, newaxis]).T )132 # XXT*-1 + 1133 XXT = cosine_dis(XXT)134 if not triangular:135 XXT = reflect(XXT, n_jobs)136 # diagonal is set to 0 as zero distance between row i and i137 setDiagonal(XXT, 0)138 return XXT139 else:140 D = X @ Y.T141 D /= norm_rows[:, newaxis]142 D /= rowSum(Y, norm = True)143 D *= -1144 D += 1145 return D146def cosine_distances_sparse(val, colPointer, rowIndices, n, p, triangular = False, dense_output = True,147 n_jobs = 1, copy = True):148 """149 [Added 22/10/2018]150 Slightly faster than Sklearn's Cosine Distances implementation.151 If dense_output is set to FALSE, then a TCSR Matrix (Triangular CSR Matrix) is152 provided and not a CSR matrix. This has the advantage of using only 1/2n^2 - n153 memory and not n^2 memory.154 """155 norm_rows = rowSum_sparse(val, colPointer, rowIndices, norm = True)156 if dense_output:157 if copy:158 XXT = _XXT_sparse(val, colPointer, rowIndices, n, p, n_jobs)159 XXT /= norm_rows[:, newaxis]160 XXT /= norm_rows #[newaxis, :]161 else:162 val = div_1(val, colPointer, rowIndices, norm_rows, n, p, copy = False)163 XXT = _XXT_sparse(val, colPointer, rowIndices, n, p, n_jobs)164 val = mult_1(val, colPointer, rowIndices, norm_rows, n, p, copy = False)165 # XXT*-1 + 1166 XXT = cosine_dis(XXT)167 if not triangular: 168 XXT = reflect(XXT, n_jobs)169 # diagonal is set to 0 as zero distance between row i and i170 setDiagonal(XXT, 0)171 else:172 XXT = _XXT_triangular(val, colPointer, rowIndices, n, p, n_jobs)173 # XXT*-1 + 1174 XXT = cosine_dis_triangular(XXT)175 XXT = cosine_triangular_parallel(norm_rows, XXT) if n_jobs != 1 else \176 cosine_triangular_single(norm_rows, XXT)...

Full Screen

Full Screen

distances.py

Source:distances.py Github

copy

Full Screen

1import networkx as nx2from typing import Union3from torch import Tensor4import networkx as nx5import torch6from torch.nn.functional import cosine_similarity7from torch import cdist8from gehm.utils.funcs import row_norm91011def embedding_first_order_proximity(12 positions: Tensor, norm_rows: bool = True,13) -> Tensor:14 """15 A simple application of euclidian distance to the positions vector in the embedding space.16 Includes row normalization to get relative distances.1718 Parameters19 ----------20 positions : Tensor21 Input positions, usually nx222 norm_rows : bool, optional23 If True, rows will be normed to 1, by default True2425 Returns26 -------27 Tensor28 Similarity Matrix29 """3031 assert isinstance(positions, Tensor)3233 similarity_matrix = cdist(positions, positions, p=2).to(positions.device)34 if norm_rows:35 similarity_matrix = row_norm(similarity_matrix)36 similarity_matrix = 1-similarity_matrix373839 return similarity_matrix-torch.eye(similarity_matrix.shape[0]).to(positions.device)404142def embedding_second_order_proximity(43 positions: Tensor, norm_rows: bool = True,44) -> Tensor:45 """46 Derives first the pairwise distances, then compares these distance vectors47 between positions to derive proximities of second order.4849 Parameters50 ----------51 positions : Union[Tensor,ndarray]52 Input positions, usually nx253 norm_rows : bool, optional54 If True, rows will be normed to 1, by default True5556 Returns57 -------58 Tensor59 Similarity Matrix60 """61 similarity_matrix = embedding_first_order_proximity(positions, norm_rows=norm_rows)62 similarity_matrix = matrix_cosine(similarity_matrix)6364 if norm_rows:65 similarity_matrix = row_norm(similarity_matrix)66 return similarity_matrix.to(positions.device)676869def matrix_cosine(mat: Tensor) -> Tensor:70 """71 Applies cosine similarity to a matrix, since pdist or cdist in PyTorch72 only computes Minkowski metrices.7374 Parameters75 ----------76 mat : Tensor77 NxN Input7879 Returns80 -------81 Tensor82 Cosine Similarity Matrix83 """84 return cosine_similarity(85 mat[..., None, :, :], mat[..., :, None, :], dim=-1 ...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Testify automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful