How to use is_up method in autotest

Best Python code snippet using autotest_python

d3_fr_unet.py

Source:d3_fr_unet.py Github

copy

Full Screen

1import torch2import torch.nn as nn3from models.utils import InitWeights4class conv(nn.Module):5 def __init__(self, in_c, out_c, dp=0):6 super(conv, self).__init__()7 self.in_c = in_c8 self.out_c = out_c9 self.conv = nn.Sequential(10 nn.Conv3d(out_c, out_c, kernel_size=3, padding=1, bias=False),11 nn.BatchNorm3d(out_c),12 nn.Dropout3d(dp),13 nn.LeakyReLU(0.1, inplace=True),14 nn.Conv3d(out_c, out_c, kernel_size=3, padding=1, bias=False),15 nn.BatchNorm3d(out_c),16 nn.Dropout3d(dp),17 nn.LeakyReLU(0.1, inplace=True))18 self.relu = nn.LeakyReLU(0.1, inplace=True)19 def forward(self, x):20 res = x21 x = self.conv(x)22 out = x + res23 out = self.relu(out)24 return x25class feature_fuse(nn.Module):26 def __init__(self, in_c, out_c):27 super(feature_fuse, self).__init__()28 self.conv11 = nn.Conv3d(29 in_c, out_c, kernel_size=1, padding=0, bias=False)30 self.conv33 = nn.Conv3d(31 in_c, out_c, kernel_size=3, padding=1, bias=False)32 self.conv33_di = nn.Conv3d(33 in_c, out_c, kernel_size=3, padding=2, bias=False, dilation=2)34 self.norm = nn.BatchNorm3d(out_c)35 def forward(self, x):36 x1 = self.conv11(x)37 x2 = self.conv33(x)38 x3 = self.conv33_di(x)39 out = self.norm(x1+x2+x3)40 return out41class up(nn.Module):42 def __init__(self, in_c, out_c, stride=[1, 2, 2]):43 super(up, self).__init__()44 self.up = nn.Sequential(45 nn.ConvTranspose3d(in_c, out_c, kernel_size=stride,46 padding=0, stride=stride, bias=False),47 nn.BatchNorm3d(out_c),48 nn.LeakyReLU(0.1, inplace=False))49 def forward(self, x):50 x = self.up(x)51 return x52class down(nn.Module):53 def __init__(self, in_c, out_c, stride=[1, 2, 2]):54 super(down, self).__init__()55 self.down = nn.Sequential(56 nn.Conv3d(in_c, out_c, kernel_size=stride,57 padding=0, stride=stride, bias=False),58 nn.BatchNorm3d(out_c),59 nn.LeakyReLU(0.1, inplace=True))60 def forward(self, x):61 x = self.down(x)62 return x63class block(nn.Module):64 def __init__(self, in_c, out_c, dp=0, is_up=False, is_down=False, fuse=False):65 super(block, self).__init__()66 self.in_c = in_c67 self.out_c = out_c68 if fuse == True:69 self.fuse = feature_fuse(in_c, out_c)70 else:71 self.fuse = nn.Conv2d(in_c, out_c, kernel_size=1, stride=1)72 self.is_up = is_up73 self.is_down = is_down74 self.conv = conv(out_c, out_c, dp=dp)75 if self.is_up == True:76 self.up = up(out_c, out_c//2)77 if self.is_down == True:78 self.down = down(out_c, out_c*2)79 def forward(self, x):80 if self.in_c != self.out_c:81 x = self.fuse(x)82 x = self.conv(x)83 if self.is_up == False and self.is_down == False:84 return x85 elif self.is_up == True and self.is_down == False:86 x_up = self.up(x)87 return x, x_up88 elif self.is_up == False and self.is_down == True:89 x_down = self.down(x)90 return x, x_down91 else:92 x_up = self.up(x)93 x_down = self.down(x)94 return x, x_up, x_down95class final_conv(nn.Module):96 def __init__(self, in_c, out_c) -> None:97 super().__init__()98 self.conv1 = nn.Conv3d(in_c, 1, kernel_size=1)99 self.conv2 = nn.Conv2d(8, out_c, kernel_size=1)100 def forward(self, x):101 x = self.conv1(x)102 x = self.conv2(x.squeeze(1))103 return x104class D3_FR_UNet(nn.Module):105 def __init__(self, num_classes=1, num_channels=1, feature_scale=2, dropout=0.2, fuse=True, out_ave=True):106 super(D3_FR_UNet, self).__init__()107 self.out_ave = out_ave108 filters = [64, 128, 256, 512, 1024]109 filters = [int(x / feature_scale) for x in filters]110 self.block1_3 = block(111 num_channels, filters[0], dp=dropout, is_up=False, is_down=True, fuse=fuse)112 self.block1_2 = block(113 filters[0], filters[0], dp=dropout, is_up=False, is_down=True, fuse=fuse)114 self.block1_1 = block(115 filters[0]*2, filters[0], dp=dropout, is_up=False, is_down=True, fuse=fuse)116 self.block10 = block(117 filters[0]*2, filters[0], dp=dropout, is_up=False, is_down=True, fuse=fuse)118 self.block11 = block(119 filters[0]*2, filters[0], dp=dropout, is_up=False, is_down=True, fuse=fuse)120 self.block12 = block(121 filters[0]*2, filters[0], dp=dropout, is_up=False, is_down=False, fuse=fuse)122 self.block13 = block(123 filters[0]*2, filters[0], dp=dropout, is_up=False, is_down=False, fuse=fuse)124 self.block2_2 = block(125 filters[1], filters[1], dp=dropout, is_up=True, is_down=True, fuse=fuse)126 self.block2_1 = block(127 filters[1]*2, filters[1], dp=dropout, is_up=True, is_down=True, fuse=fuse)128 self.block20 = block(129 filters[1]*3, filters[1], dp=dropout, is_up=True, is_down=True, fuse=fuse)130 self.block21 = block(131 filters[1]*3, filters[1], dp=dropout, is_up=True, is_down=False, fuse=fuse)132 self.block22 = block(133 filters[1]*3, filters[1], dp=dropout, is_up=True, is_down=False, fuse=fuse)134 self.block3_1 = block(135 filters[2], filters[2], dp=dropout, is_up=True, is_down=True, fuse=fuse)136 self.block30 = block(137 filters[2]*2, filters[2], dp=dropout, is_up=True, is_down=False, fuse=fuse)138 self.block31 = block(139 filters[2]*3, filters[2], dp=dropout, is_up=True, is_down=False, fuse=fuse)140 self.block40 = block(filters[3], filters[3],141 dp=dropout, is_up=True, is_down=False, fuse=fuse)142 self.final1 = final_conv(filters[0], num_classes)143 self.final2 = final_conv(filters[0], num_classes)144 self.final3 = final_conv(filters[0], num_classes)145 self.final4 = final_conv(filters[0], num_classes)146 self.final5 = final_conv(filters[0], num_classes)147 self.fuse = nn.Conv3d(5, num_classes, kernel_size=1, padding=0, bias=True)148 self.apply(InitWeights)149 def forward(self, x):150 x1_3, x_down1_3 = self.block1_3(x)151 x1_2, x_down1_2 = self.block1_2(x1_3)152 x2_2, x_up2_2, x_down2_2 = self.block2_2(x_down1_3)153 x1_1, x_down1_1 = self.block1_1(torch.cat([x1_2, x_up2_2], dim=1))154 x2_1, x_up2_1, x_down2_1 = self.block2_1(155 torch.cat([x_down1_2, x2_2], dim=1))156 x3_1, x_up3_1, x_down3_1 = self.block3_1(x_down2_2)157 x10, x_down10 = self.block10(torch.cat([x1_1, x_up2_1], dim=1))158 x20, x_up20, x_down20 = self.block20(159 torch.cat([x_down1_1, x2_1, x_up3_1], dim=1))160 x30, x_up30 = self.block30(torch.cat([x_down2_1, x3_1], dim=1))161 _, x_up40 = self.block40(x_down3_1)162 x11, x_down11 = self.block11(torch.cat([x10, x_up20], dim=1))163 x21, x_up21 = self.block21(torch.cat([x_down10, x20, x_up30], dim=1))164 _, x_up31 = self.block31(torch.cat([x_down20, x30, x_up40], dim=1))165 x12 = self.block12(torch.cat([x11, x_up21], dim=1))166 _, x_up22 = self.block22(torch.cat([x_down11, x21, x_up31], dim=1))167 x13 = self.block13(torch.cat([x12, x_up22], dim=1))168 if self.out_ave == True:169 output = (self.final1(x1_1)+self.final2(x10) +170 self.final3(x11)+self.final4(x12)+self.final5(x13))/5171 else:172 output = self.final5(x13)...

Full Screen

Full Screen

fr_unet.py

Source:fr_unet.py Github

copy

Full Screen

1import torch2import torch.nn as nn3from models.utils import InitWeights4class conv(nn.Module):5 def __init__(self, in_c, out_c, dp=0):6 super(conv, self).__init__()7 self.in_c = in_c8 self.out_c = out_c9 self.conv = nn.Sequential(10 nn.Conv2d(out_c, out_c, kernel_size=3, padding=1, bias=False),11 nn.BatchNorm2d(out_c),12 nn.Dropout2d(dp),13 nn.LeakyReLU(0.1, inplace=True),14 nn.Conv2d(out_c, out_c, kernel_size=3, padding=1, bias=False),15 nn.BatchNorm2d(out_c),16 nn.Dropout2d(dp),17 nn.LeakyReLU(0.1, inplace=True))18 self.relu = nn.LeakyReLU(0.1, inplace=True)19 def forward(self, x):20 res = x21 x = self.conv(x)22 out = x + res23 out = self.relu(out)24 return x25class feature_fuse(nn.Module):26 def __init__(self, in_c, out_c):27 super(feature_fuse, self).__init__()28 self.conv11 = nn.Conv2d(29 in_c, out_c, kernel_size=1, padding=0, bias=False)30 self.conv33 = nn.Conv2d(31 in_c, out_c, kernel_size=3, padding=1, bias=False)32 self.conv33_di = nn.Conv2d(33 in_c, out_c, kernel_size=3, padding=2, bias=False, dilation=2)34 self.norm = nn.BatchNorm2d(out_c)35 def forward(self, x):36 x1 = self.conv11(x)37 x2 = self.conv33(x)38 x3 = self.conv33_di(x)39 out = self.norm(x1+x2+x3)40 return out41class up(nn.Module):42 def __init__(self, in_c, out_c, dp=0):43 super(up, self).__init__()44 self.up = nn.Sequential(45 nn.ConvTranspose2d(in_c, out_c, kernel_size=2,46 padding=0, stride=2, bias=False),47 nn.BatchNorm2d(out_c),48 nn.LeakyReLU(0.1, inplace=False))49 def forward(self, x):50 x = self.up(x)51 return x52class down(nn.Module):53 def __init__(self, in_c, out_c, dp=0):54 super(down, self).__init__()55 self.down = nn.Sequential(56 nn.Conv2d(in_c, out_c, kernel_size=2,57 padding=0, stride=2, bias=False),58 nn.BatchNorm2d(out_c),59 nn.LeakyReLU(0.1, inplace=True))60 def forward(self, x):61 x = self.down(x)62 return x63class block(nn.Module):64 def __init__(self, in_c, out_c, dp=0, is_up=False, is_down=False, fuse=False):65 super(block, self).__init__()66 self.in_c = in_c67 self.out_c = out_c68 if fuse == True:69 self.fuse = feature_fuse(in_c, out_c)70 else:71 self.fuse = nn.Conv2d(in_c, out_c, kernel_size=1, stride=1)72 self.is_up = is_up73 self.is_down = is_down74 self.conv = conv(out_c, out_c, dp=dp)75 if self.is_up == True:76 self.up = up(out_c, out_c//2)77 if self.is_down == True:78 self.down = down(out_c, out_c*2)79 def forward(self, x):80 if self.in_c != self.out_c:81 x = self.fuse(x)82 x = self.conv(x)83 if self.is_up == False and self.is_down == False:84 return x85 elif self.is_up == True and self.is_down == False:86 x_up = self.up(x)87 return x, x_up88 elif self.is_up == False and self.is_down == True:89 x_down = self.down(x)90 return x, x_down91 else:92 x_up = self.up(x)93 x_down = self.down(x)94 return x, x_up, x_down95class FR_UNet(nn.Module):96 def __init__(self, num_classes=1, num_channels=1, feature_scale=2, dropout=0.1, fuse=True, out_ave=True):97 super(FR_UNet, self).__init__()98 self.out_ave = out_ave99 filters = [64, 128, 256, 512, 1024]100 filters = [int(x / feature_scale) for x in filters]101 self.block1_3 = block(102 num_channels, filters[0], dp=dropout, is_up=False, is_down=True, fuse=fuse)103 self.block1_2 = block(104 filters[0], filters[0], dp=dropout, is_up=False, is_down=True, fuse=fuse)105 self.block1_1 = block(106 filters[0]*2, filters[0], dp=dropout, is_up=False, is_down=True, fuse=fuse)107 self.block10 = block(108 filters[0]*2, filters[0], dp=dropout, is_up=False, is_down=True, fuse=fuse)109 self.block11 = block(110 filters[0]*2, filters[0], dp=dropout, is_up=False, is_down=True, fuse=fuse)111 self.block12 = block(112 filters[0]*2, filters[0], dp=dropout, is_up=False, is_down=False, fuse=fuse)113 self.block13 = block(114 filters[0]*2, filters[0], dp=dropout, is_up=False, is_down=False, fuse=fuse)115 self.block2_2 = block(116 filters[1], filters[1], dp=dropout, is_up=True, is_down=True, fuse=fuse)117 self.block2_1 = block(118 filters[1]*2, filters[1], dp=dropout, is_up=True, is_down=True, fuse=fuse)119 self.block20 = block(120 filters[1]*3, filters[1], dp=dropout, is_up=True, is_down=True, fuse=fuse)121 self.block21 = block(122 filters[1]*3, filters[1], dp=dropout, is_up=True, is_down=False, fuse=fuse)123 self.block22 = block(124 filters[1]*3, filters[1], dp=dropout, is_up=True, is_down=False, fuse=fuse)125 self.block3_1 = block(126 filters[2], filters[2], dp=dropout, is_up=True, is_down=True, fuse=fuse)127 self.block30 = block(128 filters[2]*2, filters[2], dp=dropout, is_up=True, is_down=False, fuse=fuse)129 self.block31 = block(130 filters[2]*3, filters[2], dp=dropout, is_up=True, is_down=False, fuse=fuse)131 self.block40 = block(filters[3], filters[3],132 dp=dropout, is_up=True, is_down=False, fuse=fuse)133 self.final1 = nn.Conv2d(134 filters[0], num_classes, kernel_size=1, padding=0, bias=True)135 self.final2 = nn.Conv2d(136 filters[0], num_classes, kernel_size=1, padding=0, bias=True)137 self.final3 = nn.Conv2d(138 filters[0], num_classes, kernel_size=1, padding=0, bias=True)139 self.final4 = nn.Conv2d(140 filters[0], num_classes, kernel_size=1, padding=0, bias=True)141 self.final5 = nn.Conv2d(142 filters[0], num_classes, kernel_size=1, padding=0, bias=True)143 self.fuse = nn.Conv2d(144 5, num_classes, kernel_size=1, padding=0, bias=True)145 self.apply(InitWeights)146 def forward(self, x):147 x1_3, x_down1_3 = self.block1_3(x)148 x1_2, x_down1_2 = self.block1_2(x1_3)149 x2_2, x_up2_2, x_down2_2 = self.block2_2(x_down1_3)150 x1_1, x_down1_1 = self.block1_1(torch.cat([x1_2, x_up2_2], dim=1))151 x2_1, x_up2_1, x_down2_1 = self.block2_1(152 torch.cat([x_down1_2, x2_2], dim=1))153 x3_1, x_up3_1, x_down3_1 = self.block3_1(x_down2_2)154 x10, x_down10 = self.block10(torch.cat([x1_1, x_up2_1], dim=1))155 x20, x_up20, x_down20 = self.block20(156 torch.cat([x_down1_1, x2_1, x_up3_1], dim=1))157 x30, x_up30 = self.block30(torch.cat([x_down2_1, x3_1], dim=1))158 _, x_up40 = self.block40(x_down3_1)159 x11, x_down11 = self.block11(torch.cat([x10, x_up20], dim=1))160 x21, x_up21 = self.block21(torch.cat([x_down10, x20, x_up30], dim=1))161 _, x_up31 = self.block31(torch.cat([x_down20, x30, x_up40], dim=1))162 x12 = self.block12(torch.cat([x11, x_up21], dim=1))163 _, x_up22 = self.block22(torch.cat([x_down11, x21, x_up31], dim=1))164 x13 = self.block13(torch.cat([x12, x_up22], dim=1))165 if self.out_ave == True:166 output = (self.final1(x1_1)+self.final2(x10) +167 self.final3(x11)+self.final4(x12)+self.final5(x13))/5168 else:169 output = self.final5(x13)...

Full Screen

Full Screen

test_servicegroup.py

Source:test_servicegroup.py Github

copy

Full Screen

...27 def test_service_is_up_forced_down(self):28 kwarg = {'forced_down': True}29 magnum_object = obj_util.get_test_magnum_service_object(30 self.context, **kwarg)31 is_up = self.servicegroup_api.service_is_up(magnum_object)32 self.assertFalse(is_up)33 def test_service_is_up_alive(self):34 kwarg = {'last_seen_up': timeutils.utcnow(True)}35 magnum_object = obj_util.get_test_magnum_service_object(36 self.context, **kwarg)37 is_up = self.servicegroup_api.service_is_up(magnum_object)38 self.assertTrue(is_up)39 def test_service_is_up_alive_with_created(self):40 kwarg = {'created_at': timeutils.utcnow(True)}41 magnum_object = obj_util.get_test_magnum_service_object(42 self.context, **kwarg)43 is_up = self.servicegroup_api.service_is_up(magnum_object)44 self.assertTrue(is_up)45 def test_service_is_up_alive_with_updated(self):46 kwarg = {'updated_at': timeutils.utcnow(True)}47 magnum_object = obj_util.get_test_magnum_service_object(48 self.context, **kwarg)49 is_up = self.servicegroup_api.service_is_up(magnum_object)50 self.assertTrue(is_up)51 def test_service_is_up_alive_with_all_three(self):52 kwarg = {'created_at': timeutils.utcnow(True),53 'updated_at': timeutils.utcnow(True),54 'last_seen_up': timeutils.utcnow(True)}55 magnum_object = obj_util.get_test_magnum_service_object(56 self.context, **kwarg)57 is_up = self.servicegroup_api.service_is_up(magnum_object)58 self.assertTrue(is_up)59 def test_service_is_up_alive_with_latest_update(self):60 kwarg = {'created_at': datetime.datetime(1970, 1, 1,61 tzinfo=pytz.UTC),62 'updated_at': datetime.datetime(1970, 1, 1,63 tzinfo=pytz.UTC),64 'last_seen_up': timeutils.utcnow(True)}65 magnum_object = obj_util.get_test_magnum_service_object(66 self.context, **kwarg)67 is_up = self.servicegroup_api.service_is_up(magnum_object)68 self.assertTrue(is_up)69 def test_service_is_up_down(self):70 kwarg = {'last_seen_up': datetime.datetime(1970, 1, 1,71 tzinfo=pytz.UTC)}72 magnum_object = obj_util.get_test_magnum_service_object(73 self.context, **kwarg)74 is_up = self.servicegroup_api.service_is_up(magnum_object)75 self.assertFalse(is_up)76 def test_service_is_up_down_with_create(self):77 kwarg = {'created_at': datetime.datetime(1970, 1, 1,78 tzinfo=pytz.UTC)}79 magnum_object = obj_util.get_test_magnum_service_object(80 self.context, **kwarg)81 is_up = self.servicegroup_api.service_is_up(magnum_object)82 self.assertFalse(is_up)83 def test_service_is_up_down_with_update(self):84 kwarg = {'updated_at': datetime.datetime(1970, 1, 1,85 tzinfo=pytz.UTC)}86 magnum_object = obj_util.get_test_magnum_service_object(87 self.context, **kwarg)88 is_up = self.servicegroup_api.service_is_up(magnum_object)89 self.assertFalse(is_up)90 def test_service_is_up_down_with_all_three(self):91 kwarg = {'last_seen_up': datetime.datetime(1970, 1, 1,92 tzinfo=pytz.UTC),93 'created_at': datetime.datetime(1970, 1, 1,94 tzinfo=pytz.UTC),95 'updated_at': datetime.datetime(1970, 1, 1,96 tzinfo=pytz.UTC)}97 magnum_object = obj_util.get_test_magnum_service_object(98 self.context, **kwarg)99 is_up = self.servicegroup_api.service_is_up(magnum_object)100 self.assertFalse(is_up)101 def test_service_is_up_down_with_old_update(self):102 kwarg = {'last_seen_up': datetime.datetime(1970, 1, 1,103 tzinfo=pytz.UTC),104 'created_at': timeutils.utcnow(True),105 'updated_at': timeutils.utcnow(True)}106 magnum_object = obj_util.get_test_magnum_service_object(107 self.context, **kwarg)108 is_up = self.servicegroup_api.service_is_up(magnum_object)...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run autotest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful