How to use invariant_2 method in hypothesis

Best Python code snippet using hypothesis

statics2.py

Source:statics2.py Github

copy

Full Screen

1# Created by YongHua | 12 April 2021 23:102# Email: yht1e20@soton.ac.uk3import numpy as np4from math import pi, sin, cos, sqrt5def stress_transformation(exx, eyy, exy, theta):6 """Stress transformation for a 2-D stress tensor matrix.7 Takes four input parameters:8 `exx` >>> the exx stress component.9 `eyy` >>> the eyy stress component.10 `exy` >>> the exy stress component.11 `theta` >>> (in degrees) which is the angle of the stress element to be rotated.12 Returns `exx'`, `eyy'` and `exy'`13 * You can choose to include or omit prefixes e.g. MPa which is 1E6, as always take14 care of units and magnitudes!15 """16 # Convert to radians for the sin and cos functions17 thetar = theta / 180 * pi18 rotation_matrix = np.array([19 [cos(thetar)**2, sin(thetar)**2, sin(2*thetar)],20 [sin(thetar)**2, cos(thetar)**2, -sin(2*thetar)],21 [-cos(thetar)*sin(thetar), cos(thetar)*sin(thetar), cos(2*thetar)]22 ])23 old_stress = np.array([exx, eyy, exy])24 new_stress = np.dot(rotation_matrix, old_stress)25 return new_stress26def find_principle(exx, eyy, exy):27 """Finds the principle values `eI` and `eII` of a 2-D stress element.28 Takes three input parameters: `exx`, `eyy` and `exy`. Returns a list of29 principal stresses"""30 invariant_1 = exx + eyy # trace31 invariant_2 = exx * eyy - exy ** 2 # determinant32 eI = 0.5 * (invariant_1 + sqrt(invariant_1 ** 2 - 4 * invariant_2))33 eII = 0.5 * (invariant_1 - sqrt(invariant_1 ** 2 - 4 * invariant_2))34 return [eI, eII] # returns a list35def find_maxshear(exx, eyy, exy):36 """Find the maximum shear stress `exy` in 2-D stress element subject to37 planar rotational transformation. Numerical implementation of Mohr's circle.38 Takes three input parameters: `exx`, `eyy` and `exy`. Returns a list of39 coordinates of maximum shear stresses, in the form [centre, ±radius]"""40 eI, eII = find_principle(exx, eyy, exy)41 centre = (eI + eII) / 2 # centre of Mohr's circle, 0.5tr(e)42 radius = abs(eI - centre) # radius of Mohr's circle43 assert abs(eI - centre) == abs(eII - centre)44 maxshear = [[centre, radius], [centre, -radius]]45 return maxshear46if __name__ == '__main__':47 # example48 help(stress_transformation)49 print(stress_transformation(40, 860, 375, 30))50 help(find_principle)51 print(find_principle(-25, 75, -56))52 help(find_maxshear)...

Full Screen

Full Screen

test.py

Source:test.py Github

copy

Full Screen

1import torch2import numpy as np3import pickle as pkl4'''5train_mdata_, bb, invariant_1 = pkl.load(open('snapshot.pkl', 'rb'))6print(invariant.shape, invariant.dtype, type(invariant))7tnsr_features_ = train_mdata_._getTensorFeatures().cuda()8basis = bb.getBasisTensor()9bias = bb.getBiasTensor()10sub_basis = basis[:, :, invariant]11sub_bias = bias[:, invariant]12'''13np.random.seed(0)14invariant_2 = np.random.permutation(6452)15#dice = np.random.rand(6452)16#invariant_2[dice < 0.01] = 117#invariant_2[invariant_1] = True18print(torch.__version__)19print(np.sum(invariant_2))20invariant0 = invariant_2[:10]21invariant1 = invariant_2[:129]22tnsr_features_ = torch.rand(10808, 784).cuda()23basis = (torch.rand(1, 784, 6452) - 0.5).cuda()24bias = (torch.rand(1, 6452) - 0.5).cuda()25sub_basis = basis[:, :, invariant0]26sub_bias = bias[:, invariant0]27sub_basis_ = basis[:, :, invariant1]28sub_bias_ = bias[:, invariant1]29print('=============================')30print(tnsr_features_.dtype, tnsr_features_.shape)31print(basis.dtype, basis.shape)32print(bias.dtype, bias.shape)33print(sub_basis.dtype, sub_basis.shape)34print(sub_bias.dtype, sub_bias.shape)35hashval = torch.matmul(tnsr_features_, basis)36sub_hashval = hashval[:, :, invariant0]37sub_hashval_ = hashval[:, :, invariant1]38sub_config = np.zeros(sub_hashval.shape)39sub_config[sub_hashval.cpu().numpy() > 0] = 140sub_basis = basis[:, :, invariant0]41sub_basis_ = basis[:, :, invariant1]42sub_bias = bias[:, invariant0]43sub_bias_ = bias[:, invariant1]44new_sub_hashval = torch.matmul(tnsr_features_, sub_basis)45new_sub_hashval_ = torch.matmul(tnsr_features_, sub_basis_)46error = np.sum(np.abs(new_sub_hashval[:,:,:2].cpu().numpy() - new_sub_hashval_[:,:,:2].cpu().numpy()))47print("=================")48print('error:', error)49print("==================")50new_sub_config = np.zeros(new_sub_hashval.shape)51new_sub_config_ = np.zeros(new_sub_hashval_.shape)52new_sub_config[new_sub_hashval.cpu().numpy() > 0] = 153new_sub_config_[new_sub_hashval_.cpu().numpy() > 0] = 154diff1 = np.sum(np.abs(sub_hashval.cpu().numpy() - new_sub_hashval.cpu().numpy()))55print('diff1: ', diff1)56diff2 = np.sum(np.abs(sub_config - new_sub_config))57print('diff2: ', diff2)58diff_mat = np.abs(sub_hashval.cpu().numpy() - new_sub_hashval.cpu().numpy())59print(np.amax(diff_mat))60'''61print(np.amin(sub_hashval.numpy()), np.amax(sub_hashval.numpy()))62print(np.amin(new_sub_hashval.numpy()), np.amax(new_sub_hashval.numpy()))...

Full Screen

Full Screen

test_prec_np.py

Source:test_prec_np.py Github

copy

Full Screen

1import numpy as np2import pickle as pkl3train_mdata_, bb, invariant = pkl.load(open('snapshot.pkl', 'rb'))4tnsr_features_ = train_mdata_._getTensorFeatures().squeeze().cpu().numpy()5basis = bb.getBasisTensor().squeeze().cpu().numpy()6bias = bb.getBiasTensor().squeeze().cpu().numpy()7sub_basis = basis[:, invariant]8sub_bias = bias[invariant]9print('num boundaries: ', sub_bias.shape)10'''11invariant_2 = np.zeros(6452, dtype='bool')12dice = np.random.rand(6452)13invariant_2[dice < 0.8] = True14#invariant_2[invariant_1] = True15print(np.sum(invariant_2))16invariant = invariant_217tnsr_features_ = np.random.rand(10808, 784)18basis = np.random.rand(784, 6452) - 0.519bias = np.random.rand(6452) - 0.520sub_basis = basis[:, invariant]21sub_bias = bias[invariant]22'''23print('=============================')24print(tnsr_features_.dtype, tnsr_features_.shape)25print(basis.dtype, basis.shape)26print(bias.dtype, bias.shape)27print(sub_basis.dtype, sub_basis.shape)28print(sub_bias.dtype, sub_bias.shape)29hashval = np.matmul(tnsr_features_, basis)30sub_hashval = hashval[:, invariant]31sub_config = np.zeros(sub_hashval.shape)32sub_config[sub_hashval > 0] = 133sub_basis = basis[:, invariant]34sub_bias = bias[invariant]35new_sub_hashval = np.matmul(tnsr_features_, sub_basis)36new_sub_config = np.zeros(new_sub_hashval.shape)37new_sub_config[new_sub_hashval > 0] = 138diff1 = np.sum(np.abs(sub_hashval - new_sub_hashval))39print('diff1: ', diff1)40diff2 = np.sum(np.abs(sub_config - new_sub_config))41print('diff2: ', diff2)42diff_mat = np.abs(sub_hashval - new_sub_hashval)43print(np.amax(diff_mat))44'''45print(np.amin(sub_hashval.numpy()), np.amax(sub_hashval.numpy()))46print(np.amin(new_sub_hashval.numpy()), np.amax(new_sub_hashval.numpy()))...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run hypothesis automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful