How to use mult method in Nose

Best Python code snippet using nose

test_indexing.py

Source:test_indexing.py Github

copy

Full Screen

1from datetime import timedelta2import numpy as np3import pytest4from pandas.errors import InvalidIndexError5import pandas as pd6from pandas import Categorical, Index, MultiIndex, date_range7import pandas._testing as tm8class TestSliceLocs:9 def test_slice_locs_partial(self, idx):10 sorted_idx, _ = idx.sortlevel(0)11 result = sorted_idx.slice_locs(("foo", "two"), ("qux", "one"))12 assert result == (1, 5)13 result = sorted_idx.slice_locs(None, ("qux", "one"))14 assert result == (0, 5)15 result = sorted_idx.slice_locs(("foo", "two"), None)16 assert result == (1, len(sorted_idx))17 result = sorted_idx.slice_locs("bar", "baz")18 assert result == (2, 4)19 def test_slice_locs(self):20 df = tm.makeTimeDataFrame()21 stacked = df.stack()22 idx = stacked.index23 slob = slice(*idx.slice_locs(df.index[5], df.index[15]))24 sliced = stacked[slob]25 expected = df[5:16].stack()26 tm.assert_almost_equal(sliced.values, expected.values)27 slob = slice(28 *idx.slice_locs(29 df.index[5] + timedelta(seconds=30),30 df.index[15] - timedelta(seconds=30),31 )32 )33 sliced = stacked[slob]34 expected = df[6:15].stack()35 tm.assert_almost_equal(sliced.values, expected.values)36 def test_slice_locs_with_type_mismatch(self):37 df = tm.makeTimeDataFrame()38 stacked = df.stack()39 idx = stacked.index40 with pytest.raises(TypeError, match="^Level type mismatch"):41 idx.slice_locs((1, 3))42 with pytest.raises(TypeError, match="^Level type mismatch"):43 idx.slice_locs(df.index[5] + timedelta(seconds=30), (5, 2))44 df = tm.makeCustomDataframe(5, 5)45 stacked = df.stack()46 idx = stacked.index47 with pytest.raises(TypeError, match="^Level type mismatch"):48 idx.slice_locs(timedelta(seconds=30))49 # TODO: Try creating a UnicodeDecodeError in exception message50 with pytest.raises(TypeError, match="^Level type mismatch"):51 idx.slice_locs(df.index[1], (16, "a"))52 def test_slice_locs_not_sorted(self):53 index = MultiIndex(54 levels=[Index(np.arange(4)), Index(np.arange(4)), Index(np.arange(4))],55 codes=[56 np.array([0, 0, 1, 2, 2, 2, 3, 3]),57 np.array([0, 1, 0, 0, 0, 1, 0, 1]),58 np.array([1, 0, 1, 1, 0, 0, 1, 0]),59 ],60 )61 msg = "[Kk]ey length.*greater than MultiIndex lexsort depth"62 with pytest.raises(KeyError, match=msg):63 index.slice_locs((1, 0, 1), (2, 1, 0))64 # works65 sorted_index, _ = index.sortlevel(0)66 # should there be a test case here???67 sorted_index.slice_locs((1, 0, 1), (2, 1, 0))68 def test_slice_locs_not_contained(self):69 # some searchsorted action70 index = MultiIndex(71 levels=[[0, 2, 4, 6], [0, 2, 4]],72 codes=[[0, 0, 0, 1, 1, 2, 3, 3, 3], [0, 1, 2, 1, 2, 2, 0, 1, 2]],73 )74 result = index.slice_locs((1, 0), (5, 2))75 assert result == (3, 6)76 result = index.slice_locs(1, 5)77 assert result == (3, 6)78 result = index.slice_locs((2, 2), (5, 2))79 assert result == (3, 6)80 result = index.slice_locs(2, 5)81 assert result == (3, 6)82 result = index.slice_locs((1, 0), (6, 3))83 assert result == (3, 8)84 result = index.slice_locs(-1, 10)85 assert result == (0, len(index))86 @pytest.mark.parametrize(87 "index_arr,expected,start_idx,end_idx",88 [89 ([[np.nan, "a", "b"], ["c", "d", "e"]], (0, 3), np.nan, None),90 ([[np.nan, "a", "b"], ["c", "d", "e"]], (0, 3), np.nan, "b"),91 ([[np.nan, "a", "b"], ["c", "d", "e"]], (0, 3), np.nan, ("b", "e")),92 ([["a", "b", "c"], ["d", np.nan, "e"]], (1, 3), ("b", np.nan), None),93 ([["a", "b", "c"], ["d", np.nan, "e"]], (1, 3), ("b", np.nan), "c"),94 ([["a", "b", "c"], ["d", np.nan, "e"]], (1, 3), ("b", np.nan), ("c", "e")),95 ],96 )97 def test_slice_locs_with_missing_value(98 self, index_arr, expected, start_idx, end_idx99 ):100 # issue 19132101 idx = MultiIndex.from_arrays(index_arr)102 result = idx.slice_locs(start=start_idx, end=end_idx)103 assert result == expected104def test_putmask_with_wrong_mask(idx):105 # GH18368106 msg = "putmask: mask and data must be the same size"107 with pytest.raises(ValueError, match=msg):108 idx.putmask(np.ones(len(idx) + 1, np.bool_), 1)109 with pytest.raises(ValueError, match=msg):110 idx.putmask(np.ones(len(idx) - 1, np.bool_), 1)111 with pytest.raises(ValueError, match=msg):112 idx.putmask("foo", 1)113class TestGetIndexer:114 def test_get_indexer(self):115 major_axis = Index(np.arange(4))116 minor_axis = Index(np.arange(2))117 major_codes = np.array([0, 0, 1, 2, 2, 3, 3], dtype=np.intp)118 minor_codes = np.array([0, 1, 0, 0, 1, 0, 1], dtype=np.intp)119 index = MultiIndex(120 levels=[major_axis, minor_axis], codes=[major_codes, minor_codes]121 )122 idx1 = index[:5]123 idx2 = index[[1, 3, 5]]124 r1 = idx1.get_indexer(idx2)125 tm.assert_almost_equal(r1, np.array([1, 3, -1], dtype=np.intp))126 r1 = idx2.get_indexer(idx1, method="pad")127 e1 = np.array([-1, 0, 0, 1, 1], dtype=np.intp)128 tm.assert_almost_equal(r1, e1)129 r2 = idx2.get_indexer(idx1[::-1], method="pad")130 tm.assert_almost_equal(r2, e1[::-1])131 rffill1 = idx2.get_indexer(idx1, method="ffill")132 tm.assert_almost_equal(r1, rffill1)133 r1 = idx2.get_indexer(idx1, method="backfill")134 e1 = np.array([0, 0, 1, 1, 2], dtype=np.intp)135 tm.assert_almost_equal(r1, e1)136 r2 = idx2.get_indexer(idx1[::-1], method="backfill")137 tm.assert_almost_equal(r2, e1[::-1])138 rbfill1 = idx2.get_indexer(idx1, method="bfill")139 tm.assert_almost_equal(r1, rbfill1)140 # pass non-MultiIndex141 r1 = idx1.get_indexer(idx2.values)142 rexp1 = idx1.get_indexer(idx2)143 tm.assert_almost_equal(r1, rexp1)144 r1 = idx1.get_indexer([1, 2, 3])145 assert (r1 == [-1, -1, -1]).all()146 # create index with duplicates147 idx1 = Index(list(range(10)) + list(range(10)))148 idx2 = Index(list(range(20)))149 msg = "Reindexing only valid with uniquely valued Index objects"150 with pytest.raises(InvalidIndexError, match=msg):151 idx1.get_indexer(idx2)152 def test_get_indexer_nearest(self):153 midx = MultiIndex.from_tuples([("a", 1), ("b", 2)])154 msg = (155 "method='nearest' not implemented yet for MultiIndex; "156 "see GitHub issue 9365"157 )158 with pytest.raises(NotImplementedError, match=msg):159 midx.get_indexer(["a"], method="nearest")160 msg = "tolerance not implemented yet for MultiIndex"161 with pytest.raises(NotImplementedError, match=msg):162 midx.get_indexer(["a"], method="pad", tolerance=2)163 def test_get_indexer_categorical_time(self):164 # https://github.com/pandas-dev/pandas/issues/21390165 midx = MultiIndex.from_product(166 [167 Categorical(["a", "b", "c"]),168 Categorical(date_range("2012-01-01", periods=3, freq="H")),169 ]170 )171 result = midx.get_indexer(midx)172 tm.assert_numpy_array_equal(result, np.arange(9, dtype=np.intp))173 @pytest.mark.parametrize(174 "index_arr,labels,expected",175 [176 (177 [[1, np.nan, 2], [3, 4, 5]],178 [1, np.nan, 2],179 np.array([-1, -1, -1], dtype=np.intp),180 ),181 ([[1, np.nan, 2], [3, 4, 5]], [(np.nan, 4)], np.array([1], dtype=np.intp)),182 ([[1, 2, 3], [np.nan, 4, 5]], [(1, np.nan)], np.array([0], dtype=np.intp)),183 (184 [[1, 2, 3], [np.nan, 4, 5]],185 [np.nan, 4, 5],186 np.array([-1, -1, -1], dtype=np.intp),187 ),188 ],189 )190 def test_get_indexer_with_missing_value(self, index_arr, labels, expected):191 # issue 19132192 idx = MultiIndex.from_arrays(index_arr)193 result = idx.get_indexer(labels)194 tm.assert_numpy_array_equal(result, expected)195 def test_get_indexer_methods(self):196 # https://github.com/pandas-dev/pandas/issues/29896197 # test getting an indexer for another index with different methods198 # confirms that getting an indexer without a filling method, getting an199 # indexer and backfilling, and getting an indexer and padding all behave200 # correctly in the case where all of the target values fall in between201 # several levels in the MultiIndex into which they are getting an indexer202 #203 # visually, the MultiIndexes used in this test are:204 # mult_idx_1:205 # 0: -1 0206 # 1: 2207 # 2: 3208 # 3: 4209 # 4: 0 0210 # 5: 2211 # 6: 3212 # 7: 4213 # 8: 1 0214 # 9: 2215 # 10: 3216 # 11: 4217 #218 # mult_idx_2:219 # 0: 0 1220 # 1: 3221 # 2: 4222 mult_idx_1 = MultiIndex.from_product([[-1, 0, 1], [0, 2, 3, 4]])223 mult_idx_2 = MultiIndex.from_product([[0], [1, 3, 4]])224 indexer = mult_idx_1.get_indexer(mult_idx_2)225 expected = np.array([-1, 6, 7], dtype=indexer.dtype)226 tm.assert_almost_equal(expected, indexer)227 backfill_indexer = mult_idx_1.get_indexer(mult_idx_2, method="backfill")228 expected = np.array([5, 6, 7], dtype=backfill_indexer.dtype)229 tm.assert_almost_equal(expected, backfill_indexer)230 # ensure the legacy "bfill" option functions identically to "backfill"231 backfill_indexer = mult_idx_1.get_indexer(mult_idx_2, method="bfill")232 expected = np.array([5, 6, 7], dtype=backfill_indexer.dtype)233 tm.assert_almost_equal(expected, backfill_indexer)234 pad_indexer = mult_idx_1.get_indexer(mult_idx_2, method="pad")235 expected = np.array([4, 6, 7], dtype=pad_indexer.dtype)236 tm.assert_almost_equal(expected, pad_indexer)237 # ensure the legacy "ffill" option functions identically to "pad"238 pad_indexer = mult_idx_1.get_indexer(mult_idx_2, method="ffill")239 expected = np.array([4, 6, 7], dtype=pad_indexer.dtype)240 tm.assert_almost_equal(expected, pad_indexer)241 def test_get_indexer_three_or_more_levels(self):242 # https://github.com/pandas-dev/pandas/issues/29896243 # tests get_indexer() on MultiIndexes with 3+ levels244 # visually, these are245 # mult_idx_1:246 # 0: 1 2 5247 # 1: 7248 # 2: 4 5249 # 3: 7250 # 4: 6 5251 # 5: 7252 # 6: 3 2 5253 # 7: 7254 # 8: 4 5255 # 9: 7256 # 10: 6 5257 # 11: 7258 #259 # mult_idx_2:260 # 0: 1 1 8261 # 1: 1 5 9262 # 2: 1 6 7263 # 3: 2 1 6264 # 4: 2 7 6265 # 5: 2 7 8266 # 6: 3 6 8267 mult_idx_1 = pd.MultiIndex.from_product([[1, 3], [2, 4, 6], [5, 7]])268 mult_idx_2 = pd.MultiIndex.from_tuples(269 [270 (1, 1, 8),271 (1, 5, 9),272 (1, 6, 7),273 (2, 1, 6),274 (2, 7, 7),275 (2, 7, 8),276 (3, 6, 8),277 ]278 )279 # sanity check280 assert mult_idx_1.is_monotonic281 assert mult_idx_1.is_unique282 assert mult_idx_2.is_monotonic283 assert mult_idx_2.is_unique284 # show the relationships between the two285 assert mult_idx_2[0] < mult_idx_1[0]286 assert mult_idx_1[3] < mult_idx_2[1] < mult_idx_1[4]287 assert mult_idx_1[5] == mult_idx_2[2]288 assert mult_idx_1[5] < mult_idx_2[3] < mult_idx_1[6]289 assert mult_idx_1[5] < mult_idx_2[4] < mult_idx_1[6]290 assert mult_idx_1[5] < mult_idx_2[5] < mult_idx_1[6]291 assert mult_idx_1[-1] < mult_idx_2[6]292 indexer_no_fill = mult_idx_1.get_indexer(mult_idx_2)293 expected = np.array([-1, -1, 5, -1, -1, -1, -1], dtype=indexer_no_fill.dtype)294 tm.assert_almost_equal(expected, indexer_no_fill)295 # test with backfilling296 indexer_backfilled = mult_idx_1.get_indexer(mult_idx_2, method="backfill")297 expected = np.array([0, 4, 5, 6, 6, 6, -1], dtype=indexer_backfilled.dtype)298 tm.assert_almost_equal(expected, indexer_backfilled)299 # now, the same thing, but forward-filled (aka "padded")300 indexer_padded = mult_idx_1.get_indexer(mult_idx_2, method="pad")301 expected = np.array([-1, 3, 5, 5, 5, 5, 11], dtype=indexer_padded.dtype)302 tm.assert_almost_equal(expected, indexer_padded)303 # now, do the indexing in the other direction304 assert mult_idx_2[0] < mult_idx_1[0] < mult_idx_2[1]305 assert mult_idx_2[0] < mult_idx_1[1] < mult_idx_2[1]306 assert mult_idx_2[0] < mult_idx_1[2] < mult_idx_2[1]307 assert mult_idx_2[0] < mult_idx_1[3] < mult_idx_2[1]308 assert mult_idx_2[1] < mult_idx_1[4] < mult_idx_2[2]309 assert mult_idx_2[2] == mult_idx_1[5]310 assert mult_idx_2[5] < mult_idx_1[6] < mult_idx_2[6]311 assert mult_idx_2[5] < mult_idx_1[7] < mult_idx_2[6]312 assert mult_idx_2[5] < mult_idx_1[8] < mult_idx_2[6]313 assert mult_idx_2[5] < mult_idx_1[9] < mult_idx_2[6]314 assert mult_idx_2[5] < mult_idx_1[10] < mult_idx_2[6]315 assert mult_idx_2[5] < mult_idx_1[11] < mult_idx_2[6]316 indexer = mult_idx_2.get_indexer(mult_idx_1)317 expected = np.array(318 [-1, -1, -1, -1, -1, 2, -1, -1, -1, -1, -1, -1], dtype=indexer.dtype319 )320 tm.assert_almost_equal(expected, indexer)321 backfill_indexer = mult_idx_2.get_indexer(mult_idx_1, method="bfill")322 expected = np.array(323 [1, 1, 1, 1, 2, 2, 6, 6, 6, 6, 6, 6], dtype=backfill_indexer.dtype324 )325 tm.assert_almost_equal(expected, backfill_indexer)326 pad_indexer = mult_idx_2.get_indexer(mult_idx_1, method="pad")327 expected = np.array(328 [0, 0, 0, 0, 1, 2, 5, 5, 5, 5, 5, 5], dtype=pad_indexer.dtype329 )330 tm.assert_almost_equal(expected, pad_indexer)331 def test_get_indexer_crossing_levels(self):332 # https://github.com/pandas-dev/pandas/issues/29896333 # tests a corner case with get_indexer() with MultiIndexes where, when we334 # need to "carry" across levels, proper tuple ordering is respected335 #336 # the MultiIndexes used in this test, visually, are:337 # mult_idx_1:338 # 0: 1 1 1 1339 # 1: 2340 # 2: 2 1341 # 3: 2342 # 4: 1 2 1 1343 # 5: 2344 # 6: 2 1345 # 7: 2346 # 8: 2 1 1 1347 # 9: 2348 # 10: 2 1349 # 11: 2350 # 12: 2 2 1 1351 # 13: 2352 # 14: 2 1353 # 15: 2354 #355 # mult_idx_2:356 # 0: 1 3 2 2357 # 1: 2 3 2 2358 mult_idx_1 = pd.MultiIndex.from_product([[1, 2]] * 4)359 mult_idx_2 = pd.MultiIndex.from_tuples([(1, 3, 2, 2), (2, 3, 2, 2)])360 # show the tuple orderings, which get_indexer() should respect361 assert mult_idx_1[7] < mult_idx_2[0] < mult_idx_1[8]362 assert mult_idx_1[-1] < mult_idx_2[1]363 indexer = mult_idx_1.get_indexer(mult_idx_2)364 expected = np.array([-1, -1], dtype=indexer.dtype)365 tm.assert_almost_equal(expected, indexer)366 backfill_indexer = mult_idx_1.get_indexer(mult_idx_2, method="bfill")367 expected = np.array([8, -1], dtype=backfill_indexer.dtype)368 tm.assert_almost_equal(expected, backfill_indexer)369 pad_indexer = mult_idx_1.get_indexer(mult_idx_2, method="ffill")370 expected = np.array([7, 15], dtype=pad_indexer.dtype)371 tm.assert_almost_equal(expected, pad_indexer)372def test_getitem(idx):373 # scalar374 assert idx[2] == ("bar", "one")375 # slice376 result = idx[2:5]377 expected = idx[[2, 3, 4]]378 assert result.equals(expected)379 # boolean380 result = idx[[True, False, True, False, True, True]]381 result2 = idx[np.array([True, False, True, False, True, True])]382 expected = idx[[0, 2, 4, 5]]383 assert result.equals(expected)384 assert result2.equals(expected)385def test_getitem_group_select(idx):386 sorted_idx, _ = idx.sortlevel(0)387 assert sorted_idx.get_loc("baz") == slice(3, 4)388 assert sorted_idx.get_loc("foo") == slice(0, 2)389@pytest.mark.parametrize("ind1", [[True] * 5, pd.Index([True] * 5)])390@pytest.mark.parametrize(391 "ind2",392 [[True, False, True, False, False], pd.Index([True, False, True, False, False])],393)394def test_getitem_bool_index_all(ind1, ind2):395 # GH#22533396 idx = MultiIndex.from_tuples([(10, 1), (20, 2), (30, 3), (40, 4), (50, 5)])397 tm.assert_index_equal(idx[ind1], idx)398 expected = MultiIndex.from_tuples([(10, 1), (30, 3)])399 tm.assert_index_equal(idx[ind2], expected)400@pytest.mark.parametrize("ind1", [[True], pd.Index([True])])401@pytest.mark.parametrize("ind2", [[False], pd.Index([False])])402def test_getitem_bool_index_single(ind1, ind2):403 # GH#22533404 idx = MultiIndex.from_tuples([(10, 1)])405 tm.assert_index_equal(idx[ind1], idx)406 expected = pd.MultiIndex(407 levels=[np.array([], dtype=np.int64), np.array([], dtype=np.int64)],408 codes=[[], []],409 )410 tm.assert_index_equal(idx[ind2], expected)411class TestGetLoc:412 def test_get_loc(self, idx):413 assert idx.get_loc(("foo", "two")) == 1414 assert idx.get_loc(("baz", "two")) == 3415 with pytest.raises(KeyError, match=r"^10$"):416 idx.get_loc(("bar", "two"))417 with pytest.raises(KeyError, match=r"^'quux'$"):418 idx.get_loc("quux")419 msg = "only the default get_loc method is currently supported for MultiIndex"420 with pytest.raises(NotImplementedError, match=msg):421 idx.get_loc("foo", method="nearest")422 # 3 levels423 index = MultiIndex(424 levels=[Index(np.arange(4)), Index(np.arange(4)), Index(np.arange(4))],425 codes=[426 np.array([0, 0, 1, 2, 2, 2, 3, 3]),427 np.array([0, 1, 0, 0, 0, 1, 0, 1]),428 np.array([1, 0, 1, 1, 0, 0, 1, 0]),429 ],430 )431 with pytest.raises(KeyError, match=r"^\(1, 1\)$"):432 index.get_loc((1, 1))433 assert index.get_loc((2, 0)) == slice(3, 5)434 def test_get_loc_duplicates(self):435 index = Index([2, 2, 2, 2])436 result = index.get_loc(2)437 expected = slice(0, 4)438 assert result == expected439 index = Index(["c", "a", "a", "b", "b"])440 rs = index.get_loc("c")441 xp = 0442 assert rs == xp443 with pytest.raises(KeyError):444 index.get_loc(2)445 def test_get_loc_level(self):446 index = MultiIndex(447 levels=[Index(np.arange(4)), Index(np.arange(4)), Index(np.arange(4))],448 codes=[449 np.array([0, 0, 1, 2, 2, 2, 3, 3]),450 np.array([0, 1, 0, 0, 0, 1, 0, 1]),451 np.array([1, 0, 1, 1, 0, 0, 1, 0]),452 ],453 )454 loc, new_index = index.get_loc_level((0, 1))455 expected = slice(1, 2)456 exp_index = index[expected].droplevel(0).droplevel(0)457 assert loc == expected458 assert new_index.equals(exp_index)459 loc, new_index = index.get_loc_level((0, 1, 0))460 expected = 1461 assert loc == expected462 assert new_index is None463 with pytest.raises(KeyError, match=r"^\(2, 2\)$"):464 index.get_loc_level((2, 2))465 # GH 22221: unused label466 with pytest.raises(KeyError, match=r"^2$"):467 index.drop(2).get_loc_level(2)468 # Unused label on unsorted level:469 with pytest.raises(KeyError, match=r"^2$"):470 index.drop(1, level=2).get_loc_level(2, level=2)471 index = MultiIndex(472 levels=[[2000], list(range(4))],473 codes=[np.array([0, 0, 0, 0]), np.array([0, 1, 2, 3])],474 )475 result, new_index = index.get_loc_level((2000, slice(None, None)))476 expected = slice(None, None)477 assert result == expected478 assert new_index.equals(index.droplevel(0))479 @pytest.mark.parametrize("dtype1", [int, float, bool, str])480 @pytest.mark.parametrize("dtype2", [int, float, bool, str])481 def test_get_loc_multiple_dtypes(self, dtype1, dtype2):482 # GH 18520483 levels = [np.array([0, 1]).astype(dtype1), np.array([0, 1]).astype(dtype2)]484 idx = pd.MultiIndex.from_product(levels)485 assert idx.get_loc(idx[2]) == 2486 @pytest.mark.parametrize("level", [0, 1])487 @pytest.mark.parametrize("dtypes", [[int, float], [float, int]])488 def test_get_loc_implicit_cast(self, level, dtypes):489 # GH 18818, GH 15994 : as flat index, cast int to float and vice-versa490 levels = [["a", "b"], ["c", "d"]]491 key = ["b", "d"]492 lev_dtype, key_dtype = dtypes493 levels[level] = np.array([0, 1], dtype=lev_dtype)494 key[level] = key_dtype(1)495 idx = MultiIndex.from_product(levels)496 assert idx.get_loc(tuple(key)) == 3497 def test_get_loc_cast_bool(self):498 # GH 19086 : int is casted to bool, but not vice-versa499 levels = [[False, True], np.arange(2, dtype="int64")]500 idx = MultiIndex.from_product(levels)501 assert idx.get_loc((0, 1)) == 1502 assert idx.get_loc((1, 0)) == 2503 with pytest.raises(KeyError, match=r"^\(False, True\)$"):504 idx.get_loc((False, True))505 with pytest.raises(KeyError, match=r"^\(True, False\)$"):506 idx.get_loc((True, False))507 @pytest.mark.parametrize("level", [0, 1])508 def test_get_loc_nan(self, level, nulls_fixture):509 # GH 18485 : NaN in MultiIndex510 levels = [["a", "b"], ["c", "d"]]511 key = ["b", "d"]512 levels[level] = np.array([0, nulls_fixture], dtype=type(nulls_fixture))513 key[level] = nulls_fixture514 idx = MultiIndex.from_product(levels)515 assert idx.get_loc(tuple(key)) == 3516 def test_get_loc_missing_nan(self):517 # GH 8569518 idx = MultiIndex.from_arrays([[1.0, 2.0], [3.0, 4.0]])519 assert isinstance(idx.get_loc(1), slice)520 with pytest.raises(KeyError, match=r"^3$"):521 idx.get_loc(3)522 with pytest.raises(KeyError, match=r"^nan$"):523 idx.get_loc(np.nan)524 with pytest.raises(TypeError, match="unhashable type: 'list'"):525 # listlike/non-hashable raises TypeError526 idx.get_loc([np.nan])527 def test_get_loc_with_values_including_missing_values(self):528 # issue 19132529 idx = MultiIndex.from_product([[np.nan, 1]] * 2)530 expected = slice(0, 2, None)531 assert idx.get_loc(np.nan) == expected532 idx = MultiIndex.from_arrays([[np.nan, 1, 2, np.nan]])533 expected = np.array([True, False, False, True])534 tm.assert_numpy_array_equal(idx.get_loc(np.nan), expected)535 idx = MultiIndex.from_product([[np.nan, 1]] * 3)536 expected = slice(2, 4, None)537 assert idx.get_loc((np.nan, 1)) == expected538 def test_get_loc_duplicates2(self):539 # TODO: de-duplicate with test_get_loc_duplicates above?540 index = MultiIndex(541 levels=[["D", "B", "C"], [0, 26, 27, 37, 57, 67, 75, 82]],542 codes=[[0, 0, 0, 1, 2, 2, 2, 2, 2, 2], [1, 3, 4, 6, 0, 2, 2, 3, 5, 7]],543 names=["tag", "day"],544 )545 assert index.get_loc("D") == slice(0, 3)546class TestWhere:547 def test_where(self):548 i = MultiIndex.from_tuples([("A", 1), ("A", 2)])549 msg = r"\.where is not supported for MultiIndex operations"550 with pytest.raises(NotImplementedError, match=msg):551 i.where(True)552 @pytest.mark.parametrize("klass", [list, tuple, np.array, pd.Series])553 def test_where_array_like(self, klass):554 i = MultiIndex.from_tuples([("A", 1), ("A", 2)])555 cond = [False, True]556 msg = r"\.where is not supported for MultiIndex operations"557 with pytest.raises(NotImplementedError, match=msg):558 i.where(klass(cond))559class TestContains:560 def test_contains_top_level(self):561 midx = MultiIndex.from_product([["A", "B"], [1, 2]])562 assert "A" in midx563 assert "A" not in midx._engine564 def test_contains_with_nat(self):565 # MI with a NaT566 mi = MultiIndex(567 levels=[["C"], pd.date_range("2012-01-01", periods=5)],568 codes=[[0, 0, 0, 0, 0, 0], [-1, 0, 1, 2, 3, 4]],569 names=[None, "B"],570 )571 assert ("C", pd.Timestamp("2012-01-01")) in mi572 for val in mi.values:573 assert val in mi574 def test_contains(self, idx):575 assert ("foo", "two") in idx576 assert ("bar", "two") not in idx577 assert None not in idx578 def test_contains_with_missing_value(self):579 # GH#19132580 idx = MultiIndex.from_arrays([[1, np.nan, 2]])581 assert np.nan in idx582 idx = MultiIndex.from_arrays([[1, 2], [np.nan, 3]])583 assert np.nan not in idx584 assert (1, np.nan) in idx585 def test_multiindex_contains_dropped(self):586 # GH#19027587 # test that dropped MultiIndex levels are not in the MultiIndex588 # despite continuing to be in the MultiIndex's levels589 idx = MultiIndex.from_product([[1, 2], [3, 4]])590 assert 2 in idx591 idx = idx.drop(2)592 # drop implementation keeps 2 in the levels593 assert 2 in idx.levels[0]594 # but it should no longer be in the index itself595 assert 2 not in idx596 # also applies to strings597 idx = MultiIndex.from_product([["a", "b"], ["c", "d"]])598 assert "a" in idx599 idx = idx.drop("a")600 assert "a" in idx.levels[0]601 assert "a" not in idx602 def test_contains_td64_level(self):603 # GH#24570604 tx = pd.timedelta_range("09:30:00", "16:00:00", freq="30 min")605 idx = MultiIndex.from_arrays([tx, np.arange(len(tx))])606 assert tx[0] in idx607 assert "element_not_exit" not in idx608 assert "0 day 09:30:00" in idx609 @pytest.mark.slow610 def test_large_mi_contains(self):611 # GH#10645612 result = MultiIndex.from_arrays([range(10 ** 6), range(10 ** 6)])613 assert not (10 ** 6, 0) in result614def test_timestamp_multiindex_indexer():615 # https://github.com/pandas-dev/pandas/issues/26944616 idx = pd.MultiIndex.from_product(617 [618 pd.date_range("2019-01-01T00:15:33", periods=100, freq="H", name="date"),619 ["x"],620 [3],621 ]622 )623 df = pd.DataFrame({"foo": np.arange(len(idx))}, idx)624 result = df.loc[pd.IndexSlice["2019-1-2":, "x", :], "foo"]625 qidx = pd.MultiIndex.from_product(626 [627 pd.date_range(628 start="2019-01-02T00:15:33",629 end="2019-01-05T02:15:33",630 freq="H",631 name="date",632 ),633 ["x"],634 [3],635 ]636 )637 should_be = pd.Series(data=np.arange(24, len(qidx) + 24), index=qidx, name="foo")638 tm.assert_series_equal(result, should_be)639@pytest.mark.parametrize(640 "index_arr,expected,target,algo",641 [642 ([[np.nan, "a", "b"], ["c", "d", "e"]], 0, np.nan, "left"),643 ([[np.nan, "a", "b"], ["c", "d", "e"]], 1, (np.nan, "c"), "right"),644 ([["a", "b", "c"], ["d", np.nan, "d"]], 1, ("b", np.nan), "left"),645 ],646)647def test_get_slice_bound_with_missing_value(index_arr, expected, target, algo):648 # issue 19132649 idx = MultiIndex.from_arrays(index_arr)650 result = idx.get_slice_bound(target, side=algo, kind="loc")651 assert result == expected652@pytest.mark.parametrize(653 "index_arr,expected,start_idx,end_idx",654 [655 ([[np.nan, 1, 2], [3, 4, 5]], slice(0, 2, None), np.nan, 1),656 ([[np.nan, 1, 2], [3, 4, 5]], slice(0, 3, None), np.nan, (2, 5)),657 ([[1, 2, 3], [4, np.nan, 5]], slice(1, 3, None), (2, np.nan), 3),658 ([[1, 2, 3], [4, np.nan, 5]], slice(1, 3, None), (2, np.nan), (3, 5)),659 ],660)661def test_slice_indexer_with_missing_value(index_arr, expected, start_idx, end_idx):662 # issue 19132663 idx = MultiIndex.from_arrays(index_arr)664 result = idx.slice_indexer(start=start_idx, end=end_idx)665 assert result == expected666def test_pyint_engine():667 # GH#18519 : when combinations of codes cannot be represented in 64668 # bits, the index underlying the MultiIndex engine works with Python669 # integers, rather than uint64.670 N = 5671 keys = [672 tuple(l)673 for l in [674 [0] * 10 * N,675 [1] * 10 * N,676 [2] * 10 * N,677 [np.nan] * N + [2] * 9 * N,678 [0] * N + [2] * 9 * N,679 [np.nan] * N + [2] * 8 * N + [0] * N,680 ]681 ]682 # Each level contains 4 elements (including NaN), so it is represented683 # in 2 bits, for a total of 2*N*10 = 100 > 64 bits. If we were using a684 # 64 bit engine and truncating the first levels, the fourth and fifth685 # keys would collide; if truncating the last levels, the fifth and686 # sixth; if rotating bits rather than shifting, the third and fifth.687 for idx in range(len(keys)):688 index = MultiIndex.from_tuples(keys)689 assert index.get_loc(keys[idx]) == idx690 expected = np.arange(idx + 1, dtype=np.intp)691 result = index.get_indexer([keys[i] for i in expected])692 tm.assert_numpy_array_equal(result, expected)693 # With missing key:694 idces = range(len(keys))695 expected = np.array([-1] + list(idces), dtype=np.intp)696 missing = tuple([0, 1] * 5 * N)697 result = index.get_indexer([missing] + [keys[i] for i in idces])...

Full Screen

Full Screen

networks.py

Source:networks.py Github

copy

Full Screen

1# Copyright (c) Microsoft Corporation.2# Licensed under the MIT License.3import torch4import torch.nn as nn5import functools6from torch.autograd import Variable7import numpy as np8from torch.nn.utils import spectral_norm9# from util.util import SwitchNorm2d10import torch.nn.functional as F11###############################################################################12# Functions13###############################################################################14def weights_init(m):15 classname = m.__class__.__name__16 if classname.find("Conv") != -1:17 m.weight.data.normal_(0.0, 0.02)18 elif classname.find("BatchNorm2d") != -1:19 m.weight.data.normal_(1.0, 0.02)20 m.bias.data.fill_(0)21def get_norm_layer(norm_type="instance"):22 if norm_type == "batch":23 norm_layer = functools.partial(nn.BatchNorm2d, affine=True)24 elif norm_type == "instance":25 norm_layer = functools.partial(nn.InstanceNorm2d, affine=False)26 elif norm_type == "spectral":27 norm_layer = spectral_norm()28 elif norm_type == "SwitchNorm":29 norm_layer = SwitchNorm2d30 else:31 raise NotImplementedError("normalization layer [%s] is not found" % norm_type)32 return norm_layer33def print_network(net):34 if isinstance(net, list):35 net = net[0]36 num_params = 037 for param in net.parameters():38 num_params += param.numel()39 print(net)40 print("Total number of parameters: %d" % num_params)41class GlobalGenerator_DCDCv2(nn.Module):42 def __init__(43 self,44 input_nc,45 output_nc,46 ngf=64,47 k_size=3,48 n_downsampling=8,49 norm_layer=nn.BatchNorm2d,50 padding_type="reflect",51 opt=None,52 ):53 super(GlobalGenerator_DCDCv2, self).__init__()54 activation = nn.ReLU(True)55 model = [56 nn.ReflectionPad2d(3),57 nn.Conv2d(input_nc, min(ngf, opt.mc), kernel_size=7, padding=0),58 norm_layer(ngf),59 activation,60 ]61 ### downsample62 for i in range(opt.start_r):63 mult = 2 ** i64 model += [65 nn.Conv2d(66 min(ngf * mult, opt.mc),67 min(ngf * mult * 2, opt.mc),68 kernel_size=k_size,69 stride=2,70 padding=1,71 ),72 norm_layer(min(ngf * mult * 2, opt.mc)),73 activation,74 ]75 for i in range(opt.start_r, n_downsampling - 1):76 mult = 2 ** i77 model += [78 nn.Conv2d(79 min(ngf * mult, opt.mc),80 min(ngf * mult * 2, opt.mc),81 kernel_size=k_size,82 stride=2,83 padding=1,84 ),85 norm_layer(min(ngf * mult * 2, opt.mc)),86 activation,87 ]88 model += [89 ResnetBlock(90 min(ngf * mult * 2, opt.mc),91 padding_type=padding_type,92 activation=activation,93 norm_layer=norm_layer,94 opt=opt,95 )96 ]97 model += [98 ResnetBlock(99 min(ngf * mult * 2, opt.mc),100 padding_type=padding_type,101 activation=activation,102 norm_layer=norm_layer,103 opt=opt,104 )105 ]106 mult = 2 ** (n_downsampling - 1)107 if opt.spatio_size == 32:108 model += [109 nn.Conv2d(110 min(ngf * mult, opt.mc),111 min(ngf * mult * 2, opt.mc),112 kernel_size=k_size,113 stride=2,114 padding=1,115 ),116 norm_layer(min(ngf * mult * 2, opt.mc)),117 activation,118 ]119 if opt.spatio_size == 64:120 model += [121 ResnetBlock(122 min(ngf * mult * 2, opt.mc),123 padding_type=padding_type,124 activation=activation,125 norm_layer=norm_layer,126 opt=opt,127 )128 ]129 model += [130 ResnetBlock(131 min(ngf * mult * 2, opt.mc),132 padding_type=padding_type,133 activation=activation,134 norm_layer=norm_layer,135 opt=opt,136 )137 ]138 # model += [nn.Conv2d(min(ngf * mult * 2, opt.mc), min(ngf, opt.mc), 1, 1)]139 if opt.feat_dim > 0:140 model += [nn.Conv2d(min(ngf * mult * 2, opt.mc), opt.feat_dim, 1, 1)]141 self.encoder = nn.Sequential(*model)142 # decode143 model = []144 if opt.feat_dim > 0:145 model += [nn.Conv2d(opt.feat_dim, min(ngf * mult * 2, opt.mc), 1, 1)]146 # model += [nn.Conv2d(min(ngf, opt.mc), min(ngf * mult * 2, opt.mc), 1, 1)]147 o_pad = 0 if k_size == 4 else 1148 mult = 2 ** n_downsampling149 model += [150 ResnetBlock(151 min(ngf * mult, opt.mc),152 padding_type=padding_type,153 activation=activation,154 norm_layer=norm_layer,155 opt=opt,156 )157 ]158 if opt.spatio_size == 32:159 model += [160 nn.ConvTranspose2d(161 min(ngf * mult, opt.mc),162 min(int(ngf * mult / 2), opt.mc),163 kernel_size=k_size,164 stride=2,165 padding=1,166 output_padding=o_pad,167 ),168 norm_layer(min(int(ngf * mult / 2), opt.mc)),169 activation,170 ]171 if opt.spatio_size == 64:172 model += [173 ResnetBlock(174 min(ngf * mult, opt.mc),175 padding_type=padding_type,176 activation=activation,177 norm_layer=norm_layer,178 opt=opt,179 )180 ]181 for i in range(1, n_downsampling - opt.start_r):182 mult = 2 ** (n_downsampling - i)183 model += [184 ResnetBlock(185 min(ngf * mult, opt.mc),186 padding_type=padding_type,187 activation=activation,188 norm_layer=norm_layer,189 opt=opt,190 )191 ]192 model += [193 ResnetBlock(194 min(ngf * mult, opt.mc),195 padding_type=padding_type,196 activation=activation,197 norm_layer=norm_layer,198 opt=opt,199 )200 ]201 model += [202 nn.ConvTranspose2d(203 min(ngf * mult, opt.mc),204 min(int(ngf * mult / 2), opt.mc),205 kernel_size=k_size,206 stride=2,207 padding=1,208 output_padding=o_pad,209 ),210 norm_layer(min(int(ngf * mult / 2), opt.mc)),211 activation,212 ]213 for i in range(n_downsampling - opt.start_r, n_downsampling):214 mult = 2 ** (n_downsampling - i)215 model += [216 nn.ConvTranspose2d(217 min(ngf * mult, opt.mc),218 min(int(ngf * mult / 2), opt.mc),219 kernel_size=k_size,220 stride=2,221 padding=1,222 output_padding=o_pad,223 ),224 norm_layer(min(int(ngf * mult / 2), opt.mc)),225 activation,226 ]227 if opt.use_segmentation_model:228 model += [nn.ReflectionPad2d(3), nn.Conv2d(min(ngf, opt.mc), output_nc, kernel_size=7, padding=0)]229 else:230 model += [231 nn.ReflectionPad2d(3),232 nn.Conv2d(min(ngf, opt.mc), output_nc, kernel_size=7, padding=0),233 nn.Tanh(),234 ]235 self.decoder = nn.Sequential(*model)236 # def forward(self, input, flow="enc_dec"):237 # if flow == "enc":238 # return self.encoder(input)239 # elif flow == "dec":240 # return self.decoder(input)241 # elif flow == "enc_dec":242 # x = self.encoder(input)243 # x = self.decoder(x)244 # return x245 def reparameterize(self, mu, logvar):246 std = torch.exp(0.5*logvar)247 eps = torch.randn_like(std)248 return mu + eps*std249 def forward(self, input, flow="enc_dec"):250 if flow == "enc":251 h = self.encoder(input)252 if not self.eval_:253 mean = self.mean_layer(h)254 var = self.var_layer(h)255 h = self.reparameterize(mean, var)256 return h, mean, var257 else:258 return h259 elif flow == "dec":260 return self.decoder(input)261 elif flow == "enc_dec":262 z_x = self.encoder(input)263 if not self.eval_:264 mean = self.mean_layer(z_x)265 var = self.var_layer(z_x)266 z_x = self.reparameterize(mean, var)267 x = self.decoder(z_x)268 return z_x, mean, var, x269 return self.decoder(z_x)270# Define a resnet block271class ResnetBlock(nn.Module):272 def __init__(273 self, dim, padding_type, norm_layer, opt, activation=nn.ReLU(True), use_dropout=False, dilation=1274 ):275 super(ResnetBlock, self).__init__()276 self.opt = opt277 self.dilation = dilation278 self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, activation, use_dropout)279 def build_conv_block(self, dim, padding_type, norm_layer, activation, use_dropout):280 conv_block = []281 p = 0282 if padding_type == "reflect":283 conv_block += [nn.ReflectionPad2d(self.dilation)]284 elif padding_type == "replicate":285 conv_block += [nn.ReplicationPad2d(self.dilation)]286 elif padding_type == "zero":287 p = self.dilation288 else:289 raise NotImplementedError("padding [%s] is not implemented" % padding_type)290 conv_block += [291 nn.Conv2d(dim, dim, kernel_size=3, padding=p, dilation=self.dilation),292 norm_layer(dim),293 activation,294 ]295 if use_dropout:296 conv_block += [nn.Dropout(0.5)]297 p = 0298 if padding_type == "reflect":299 conv_block += [nn.ReflectionPad2d(1)]300 elif padding_type == "replicate":301 conv_block += [nn.ReplicationPad2d(1)]302 elif padding_type == "zero":303 p = 1304 else:305 raise NotImplementedError("padding [%s] is not implemented" % padding_type)306 conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, dilation=1), norm_layer(dim)]307 return nn.Sequential(*conv_block)308 def forward(self, x):309 out = x + self.conv_block(x)310 return out311class Encoder(nn.Module):312 def __init__(self, input_nc, output_nc, ngf=32, n_downsampling=4, norm_layer=nn.BatchNorm2d):313 super(Encoder, self).__init__()314 self.output_nc = output_nc315 model = [316 nn.ReflectionPad2d(3),317 nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0),318 norm_layer(ngf),319 nn.ReLU(True),320 ]321 ### downsample322 for i in range(n_downsampling):323 mult = 2 ** i324 model += [325 nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1),326 norm_layer(ngf * mult * 2),327 nn.ReLU(True),328 ]329 ### upsample330 for i in range(n_downsampling):331 mult = 2 ** (n_downsampling - i)332 model += [333 nn.ConvTranspose2d(334 ngf * mult, int(ngf * mult / 2), kernel_size=3, stride=2, padding=1, output_padding=1335 ),336 norm_layer(int(ngf * mult / 2)),337 nn.ReLU(True),338 ]339 model += [nn.ReflectionPad2d(3), nn.Conv2d(ngf, output_nc, kernel_size=7, padding=0), nn.Tanh()]340 self.model = nn.Sequential(*model)341 def forward(self, input, inst):342 outputs = self.model(input)343 # instance-wise average pooling344 outputs_mean = outputs.clone()345 inst_list = np.unique(inst.cpu().numpy().astype(int))346 for i in inst_list:347 for b in range(input.size()[0]):348 indices = (inst[b : b + 1] == int(i)).nonzero() # n x 4349 for j in range(self.output_nc):350 output_ins = outputs[indices[:, 0] + b, indices[:, 1] + j, indices[:, 2], indices[:, 3]]351 mean_feat = torch.mean(output_ins).expand_as(output_ins)352 outputs_mean[353 indices[:, 0] + b, indices[:, 1] + j, indices[:, 2], indices[:, 3]354 ] = mean_feat355 return outputs_mean356def SN(module, mode=True):357 if mode:358 return torch.nn.utils.spectral_norm(module)359 return module360class NonLocalBlock2D_with_mask_Res(nn.Module):361 def __init__(362 self,363 in_channels,364 inter_channels,365 mode="add",366 re_norm=False,367 temperature=1.0,368 use_self=False,369 cosin=False,370 ):371 super(NonLocalBlock2D_with_mask_Res, self).__init__()372 self.cosin = cosin373 self.renorm = re_norm374 self.in_channels = in_channels375 self.inter_channels = inter_channels376 self.g = nn.Conv2d(377 in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0378 )379 self.W = nn.Conv2d(380 in_channels=self.inter_channels, out_channels=self.in_channels, kernel_size=1, stride=1, padding=0381 )382 # for pytorch 0.3.1383 # nn.init.constant(self.W.weight, 0)384 # nn.init.constant(self.W.bias, 0)385 # for pytorch 0.4.0386 nn.init.constant_(self.W.weight, 0)387 nn.init.constant_(self.W.bias, 0)388 self.theta = nn.Conv2d(389 in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0390 )391 self.phi = nn.Conv2d(392 in_channels=self.in_channels, out_channels=self.inter_channels, kernel_size=1, stride=1, padding=0393 )394 self.mode = mode395 self.temperature = temperature396 self.use_self = use_self397 norm_layer = get_norm_layer(norm_type="instance")398 activation = nn.ReLU(True)399 model = []400 for i in range(3):401 model += [402 ResnetBlock(403 inter_channels,404 padding_type="reflect",405 activation=activation,406 norm_layer=norm_layer,407 opt=None,408 )409 ]410 self.res_block = nn.Sequential(*model)411 def forward(self, x, mask): ## The shape of mask is Batch*1*H*W412 batch_size = x.size(0)413 g_x = self.g(x).view(batch_size, self.inter_channels, -1)414 g_x = g_x.permute(0, 2, 1)415 theta_x = self.theta(x).view(batch_size, self.inter_channels, -1)416 theta_x = theta_x.permute(0, 2, 1)417 phi_x = self.phi(x).view(batch_size, self.inter_channels, -1)418 if self.cosin:419 theta_x = F.normalize(theta_x, dim=2)420 phi_x = F.normalize(phi_x, dim=1)421 f = torch.matmul(theta_x, phi_x)422 f /= self.temperature423 f_div_C = F.softmax(f, dim=2)424 tmp = 1 - mask425 mask = F.interpolate(mask, (x.size(2), x.size(3)), mode="bilinear")426 mask[mask > 0] = 1.0427 mask = 1 - mask428 tmp = F.interpolate(tmp, (x.size(2), x.size(3)))429 mask *= tmp430 mask_expand = mask.view(batch_size, 1, -1)431 mask_expand = mask_expand.repeat(1, x.size(2) * x.size(3), 1)432 # mask = 1 - mask433 # mask=F.interpolate(mask,(x.size(2),x.size(3)))434 # mask_expand=mask.view(batch_size,1,-1)435 # mask_expand=mask_expand.repeat(1,x.size(2)*x.size(3),1)436 if self.use_self:437 mask_expand[:, range(x.size(2) * x.size(3)), range(x.size(2) * x.size(3))] = 1.0438 # print(mask_expand.shape)439 # print(f_div_C.shape)440 f_div_C = mask_expand * f_div_C441 if self.renorm:442 f_div_C = F.normalize(f_div_C, p=1, dim=2)443 ###########################444 y = torch.matmul(f_div_C, g_x)445 y = y.permute(0, 2, 1).contiguous()446 y = y.view(batch_size, self.inter_channels, *x.size()[2:])447 W_y = self.W(y)448 W_y = self.res_block(W_y)449 if self.mode == "combine":450 full_mask = mask.repeat(1, self.inter_channels, 1, 1)451 z = full_mask * x + (1 - full_mask) * W_y452 return z453class Z_xr_Discriminator(nn.Module):454 def __init__(self, input_nc, ndf=64, n_layers=5):455 super(Z_xr_Discriminator, self).__init__()456 model = [nn.ReflectionPad2d(1),457 nn.utils.spectral_norm(458 nn.Conv2d(input_nc, ndf, kernel_size=4, stride=2, padding=0, bias=True)),459 nn.LeakyReLU(0.2, True)]460 for i in range(1, n_layers - 2):461 mult = 2 ** (i - 1)462 model += [nn.ReflectionPad2d(1),463 nn.utils.spectral_norm(464 nn.Conv2d(ndf * mult, ndf * mult * 2, kernel_size=4, stride=2, padding=0, bias=True)),465 nn.LeakyReLU(0.2, True)]466 mult = 2 ** (n_layers - 2 - 1)467 model += [nn.ReflectionPad2d(1),468 nn.utils.spectral_norm(469 nn.Conv2d(ndf * mult, ndf * mult * 2, kernel_size=4, stride=1, padding=0, bias=True)),470 nn.LeakyReLU(0.2, True)]471 # Class Activation Map472 mult = 2 ** (n_layers - 2)473 self.gap_fc = nn.utils.spectral_norm(nn.Linear(ndf * mult, 1, bias=False))474 self.gmp_fc = nn.utils.spectral_norm(nn.Linear(ndf * mult, 1, bias=False))475 self.conv1x1 = nn.Conv2d(ndf * mult * 2, ndf * mult, kernel_size=1, stride=1, bias=True)476 self.leaky_relu = nn.LeakyReLU(0.2, True)477 self.pad = nn.ReflectionPad2d(1)478 self.conv = nn.utils.spectral_norm(479 nn.Conv2d(ndf * mult, 1, kernel_size=4, stride=1, padding=0, bias=False))480 self.model = nn.Sequential(*model)481 def forward(self, input, need_each_activation=False):482 each_activations = []483 if need_each_activation:484 x = input485 for i in range(len(self.model)):486 x = self.model[i](x)487 if isinstance(self.model[i], torch.nn.modules.activation.LeakyReLU):488 each_activations.append(x)489 else:490 x = self.model(input)491 gap = torch.nn.functional.adaptive_avg_pool2d(x, 1)492 gap_weight = list(self.gap_fc.parameters())[0]493 gap = x * gap_weight.unsqueeze(2).unsqueeze(3)494 gmp = torch.nn.functional.adaptive_max_pool2d(x, 1)495 gmp_weight = list(self.gmp_fc.parameters())[0]496 gmp = x * gmp_weight.unsqueeze(2).unsqueeze(3)497 # gap_logit = self.gap_fc(gap.view(x.shape[0], -1))498 # gmp_logit = self.gmp_fc(gmp.view(x.shape[0], -1))499 # cam_logit = torch.cat([gap_logit, gmp_logit], 1)500 x = torch.cat([gap, gmp], 1)501 x = self.leaky_relu(self.conv1x1(x))502 # heatmap = torch.sum(x, dim=1, keepdim=True)503 x = self.pad(x)504 out = self.conv(x)505 if need_each_activation:506 return out, each_activations507 else:...

Full Screen

Full Screen

autoencoder.py

Source:autoencoder.py Github

copy

Full Screen

1from caffe import layers as L, params as P 2import sys3sys.path.append("utils") 4sys.path.append("utils/autoencoder") 5from basis import *6from __future__ import print_function7height=None8width=None9def conv1_autoencoder(split, batch_sz):10 n = caffe.NetSpec()11 n.data, n.label = L.ImageData(image_data_param=dict(source=split, batch_size=batch_sz,new_height=height, new_width=width,is_color=False),ntop=2)12 n.silence = L.Silence(n.label, ntop=0)13 n.flatdata_i = L.Flatten(n.data)14 15 n.conv1 = conv(n.data, 5, 5, 64, pad=2)16 n.bn1 = L.BatchNorm(n.conv1, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])17 n.scale1 = L.Scale(n.bn1, bias_term=True, in_place=True) 18 n.relu1 = L.ReLU(n.scale1, relu_param=dict(negative_slope=0.1))19 n.pool1 = max_pool(n.relu1, 2, stride=2) 20 21 n.code = conv(n.pool1, 5, 5, 64, pad=2)22 23 n.upsample1 = L.Deconvolution(n.code, param=dict(lr_mult=0 ,decay_mult = 0), convolution_param=dict(group=64, num_output=64, kernel_size=4, stride=2, pad=1, bias_term=False,weight_filler=dict(type="bilinear")))24 n.deconv1 = conv(n.upsample1, 5, 5, 1, pad=2) 25 n.debn1 = L.BatchNorm(n.deconv1, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])26 n.descale1 = L.Scale(n.debn1, bias_term=True, in_place=True) 27 n.derelu1 = L.ReLU(n.descale1, relu_param=dict(negative_slope=0.1))28 29 n.flatdata_o = L.Flatten(n.derelu1)30 n.loss_s = L.SigmoidCrossEntropyLoss(n.flatdata_o, n.flatdata_i, loss_weight=1)31 n.loss_e = L.EuclideanLoss(n.flatdata_o, n.flatdata_i, loss_weight=0)32 return str(n.to_proto())33def conv2_autoencoder(split, batch_sz):34 n = caffe.NetSpec()35 n.data, n.label = L.ImageData(image_data_param=dict(source=split, batch_size=batch_sz,new_height=height, new_width=width,is_color=False),ntop=2)36 n.silence = L.Silence(n.label, ntop=0)37 n.flatdata_i = L.Flatten(n.data)38 39 n.conv1 = conv(n.data, 5, 5, 64, pad=2, no_back=True)40 n.bn1 = L.BatchNorm(n.conv1, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])41 n.scale1 = L.Scale(n.bn1, bias_term=True, in_place=True) 42 n.relu1 = L.ReLU(n.scale1,relu_param=dict(negative_slope=0.1))43 n.pool1 = max_pool(n.relu1, 2, stride=2) 44 45 n.conv2 = conv(n.pool1, 5, 5, 128, pad=2)46 n.bn2 = L.BatchNorm(n.conv2, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])47 n.scale2 = L.Scale(n.bn2, bias_term=True, in_place=True) 48 n.relu2 = L.ReLU(n.scale2, relu_param=dict(negative_slope=0.1))49 n.pool2 = max_pool(n.relu2, 2, stride=2)50 51 n.code = conv(n.pool2, 5, 5, 128, pad=2)52 n.upsample2 = L.Deconvolution(n.code, param=dict(lr_mult=0 ,decay_mult = 0), convolution_param=dict(group=128, num_output=128, kernel_size=4, stride=2, pad=1, bias_term=False,weight_filler=dict(type="bilinear")))53 n.deconv2 = conv(n.upsample2, 5, 5, 64, pad=2) 54 n.debn2 = L.BatchNorm(n.deconv2, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])55 n.descale2 = L.Scale(n.debn2, bias_term=True, in_place=True) 56 n.derelu2 = L.ReLU(n.descale2, relu_param=dict(negative_slope=0.1))57 n.upsample1 = L.Deconvolution(n.derelu2, param=dict(lr_mult=0 ,decay_mult = 0), convolution_param=dict(group=64, num_output=64, kernel_size=4, stride=2, pad=1, bias_term=False,weight_filler=dict(type="bilinear")))58 n.deconv1 = conv(n.upsample1, 5, 5, 1, pad=2, no_back=True) 59 n.debn1 = L.BatchNorm(n.deconv1, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])60 n.descale1 = L.Scale(n.debn1, bias_term=True, in_place=True) 61 n.derelu1 = L.ReLU(n.descale1, relu_param=dict(negative_slope=0.1))62 n.flatdata_o = L.Flatten(n.derelu1)63 n.loss_s = L.SigmoidCrossEntropyLoss(n.flatdata_o, n.flatdata_i, loss_weight=1)64 n.loss_e = L.EuclideanLoss(n.flatdata_o, n.flatdata_i, loss_weight=0)65 66 67 return str(n.to_proto())68def conv3_autoencoder(split, batch_sz):69 n = caffe.NetSpec()70 n.data, n.label = L.ImageData(image_data_param=dict(source=split, batch_size=batch_sz,new_height=height, new_width=width, is_color=False),ntop=2)71 n.silence = L.Silence(n.label, ntop=0)72 n.flatdata_i = L.Flatten(n.data)73 74 n.conv1 = conv(n.data, 5, 5, 64, pad=2, no_back=True)75 n.bn1 = L.BatchNorm(n.conv1, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])76 n.scale1 = L.Scale(n.bn1, bias_term=True, in_place=True) 77 n.relu1 = L.ReLU(n.scale1, relu_param=dict(negative_slope=0.1))78 n.pool1 = max_pool(n.relu1, 2, stride=2) 79 80 n.conv2 = conv(n.pool1, 5, 5, 128, pad=2, no_back=True)81 n.bn2 = L.BatchNorm(n.conv2, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])82 n.scale2 = L.Scale(n.bn2, bias_term=True, in_place=True) 83 n.relu2 = L.ReLU(n.scale2, relu_param=dict(negative_slope=0.1))84 n.pool2 = max_pool(n.relu2, 2, stride=2)85 86 n.conv3 = conv(n.pool2, 3, 3, 256, pad=1)87 n.bn3 = L.BatchNorm(n.conv3, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])88 n.scale3 = L.Scale(n.bn3, bias_term=True, in_place=True) 89 n.relu3 = L.ReLU(n.scale3, relu_param=dict(negative_slope=0.1)) 90 n.conv3_5 = conv(n.relu3, 3, 3, 512, pad=1)91 n.bn3_5 = L.BatchNorm(n.conv3_5, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])92 n.scale3_5 = L.Scale(n.bn3_5, bias_term=True, in_place=True) 93 n.relu3_5 = L.ReLU(n.scale3_5, relu_param=dict(negative_slope=0.1))94 n.pool3_5 = max_pool(n.relu3_5, 2, stride=2)95 96 n.code = conv(n.pool3_5, 3, 3, 512, pad=1)97 n.upsample3_5 = L.Deconvolution(n.code, param=dict(lr_mult=0 ,decay_mult = 0), convolution_param=dict(group=512, num_output=512, kernel_size=4, stride=2, pad=1, bias_term=False,weight_filler=dict(type="bilinear")))98 n.deconv3_5 = conv(n.upsample3_5, 3, 3, 256, pad=1, no_back=True) 99 n.debn3_5 = L.BatchNorm(n.deconv3_5, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])100 n.descale3_5 = L.Scale(n.debn3_5, bias_term=True, in_place=True) 101 n.derelu3_5 = L.ReLU(n.descale3_5, relu_param=dict(negative_slope=0.1))102 103 n.deconv3 = conv(n.derelu3_5, 5, 5,128, pad=2, no_back=True) 104 n.debn3 = L.BatchNorm(n.deconv3, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])105 n.descale3 = L.Scale(n.debn3, bias_term=True, in_place=True) 106 n.derelu3 = L.ReLU(n.descale3, relu_param=dict(negative_slope=0.1))107 108 n.upsample2 = L.Deconvolution(n.derelu3, param=dict(lr_mult=0 ,decay_mult = 0), convolution_param=dict(group=128, num_output=128, kernel_size=4, stride=2, pad=1, bias_term=False,weight_filler=dict(type="bilinear")))109 n.deconv2 = conv(n.upsample2, 5, 5, 64, pad=2, no_back=True) 110 n.debn2 = L.BatchNorm(n.deconv2, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])111 n.descale2 = L.Scale(n.debn2, bias_term=True, in_place=True) 112 n.derelu2 = L.ReLU(n.descale2, relu_param=dict(negative_slope=0.1))113 n.upsample1 = L.Deconvolution(n.derelu2, param=dict(lr_mult=0 ,decay_mult = 0), convolution_param=dict(group=64, num_output=64, kernel_size=4, stride=2, pad=1, bias_term=False,weight_filler=dict(type="bilinear")))114 n.deconv1 = conv(n.upsample1, 5, 5, 1, pad=2, no_back=True) 115 n.debn1 = L.BatchNorm(n.deconv1, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])116 n.descale1 = L.Scale(n.debn1, bias_term=True, in_place=True) 117 n.derelu1 = L.ReLU(n.descale1, relu_param=dict(negative_slope=0.1))118 n.flatdata_o = L.Flatten(n.derelu1)119 n.loss_s = L.SigmoidCrossEntropyLoss(n.flatdata_o, n.flatdata_i, loss_weight=1)120 n.loss_e = L.EuclideanLoss(n.flatdata_o, n.flatdata_i, loss_weight=0)121 122 123 return str(n.to_proto())124def conv4_autoencoder(split, batch_sz):125 n = caffe.NetSpec()126 n.data, n.label = L.ImageData(image_data_param=dict(source=split, batch_size=batch_sz,new_height=height, new_width=width,is_color=False),ntop=2)127 n.silence = L.Silence(n.label, ntop=0)128 n.flatdata_i = L.Flatten(n.data)129 130 n.conv1 = conv(n.data, 5, 5, 64, pad=2, no_back=True)131 n.bn1 = L.BatchNorm(n.conv1, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])132 n.scale1 = L.Scale(n.bn1, bias_term=True, in_place=True) 133 n.relu1 = L.ReLU(n.scale1, relu_param=dict(negative_slope=0.1))134 n.pool1 = max_pool(n.relu1, 2, stride=2) 135 136 n.conv2 = conv(n.pool1, 5, 5, 128, pad=2, no_back=True)137 n.bn2 = L.BatchNorm(n.conv2, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])138 n.scale2 = L.Scale(n.bn2, bias_term=True, in_place=True) 139 n.relu2 = L.ReLU(n.scale2, relu_param=dict(negative_slope=0.1))140 n.pool2 = max_pool(n.relu2, 2, stride=2)141 142 n.conv3 = conv(n.pool2, 3, 3, 256, pad=1, no_back=True)143 n.bn3 = L.BatchNorm(n.conv3, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])144 n.scale3 = L.Scale(n.bn3, bias_term=True, in_place=True) 145 n.relu3 = L.ReLU(n.scale3, relu_param=dict(negative_slope=0.1)) 146 n.conv3_5 = conv(n.relu3, 3, 3, 512, pad=1, no_back=True)147 n.bn3_5 = L.BatchNorm(n.conv3_5, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])148 n.scale3_5 = L.Scale(n.bn3_5, bias_term=True, in_place=True) 149 n.relu3_5 = L.ReLU(n.scale3_5, relu_param=dict(negative_slope=0.1))150 n.pool3_5 = max_pool(n.relu3_5, 2, stride=2)151 152 n.conv4 = conv(n.pool3_5, 3, 3, 512, pad=1)153 n.bn4 = L.BatchNorm(n.conv4, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])154 n.scale4 = L.Scale(n.bn4, bias_term=True, in_place=True) 155 n.relu4 = L.ReLU(n.scale4, relu_param=dict(negative_slope=0.1))156 157 n.code = conv(n.relu4, 3, 3, 512, pad=1)158 159 n.deconv4 = conv(n.code, 3, 3, 512, pad=1) 160 n.debn4 = L.BatchNorm(n.deconv4, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])161 n.descale4 = L.Scale(n.debn4, bias_term=True, in_place=True) 162 n.derelu4 = L.ReLU(n.descale4, relu_param=dict(negative_slope=0.1)) 163 n.upsample3_5 = L.Deconvolution(n.derelu4, param=dict(lr_mult=0 ,decay_mult = 0), convolution_param=dict(group=512, num_output=512, kernel_size=4, stride=2, pad=1, bias_term=False,weight_filler=dict(type="bilinear")))164 n.deconv3_5 = conv(n.upsample3_5, 3, 3, 256, pad=1, no_back=True) 165 n.debn3_5 = L.BatchNorm(n.deconv3_5, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])166 n.descale3_5 = L.Scale(n.debn3_5, bias_term=True, in_place=True) 167 n.derelu3_5 = L.ReLU(n.descale3_5, relu_param=dict(negative_slope=0.1))168 169 n.deconv3 = conv(n.derelu3_5, 5, 5,128, pad=2, no_back=True) 170 n.debn3 = L.BatchNorm(n.deconv3, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])171 n.descale3 = L.Scale(n.debn3, bias_term=True, in_place=True) 172 n.derelu3 = L.ReLU(n.descale3, relu_param=dict(negative_slope=0.1))173 174 n.upsample2 = L.Deconvolution(n.derelu3, param=dict(lr_mult=0 ,decay_mult = 0), convolution_param=dict(group=128, num_output=128, kernel_size=4, stride=2, pad=1, bias_term=False,weight_filler=dict(type="bilinear")))175 n.deconv2 = conv(n.upsample2, 5, 5, 64, pad=2, no_back=True) 176 n.debn2 = L.BatchNorm(n.deconv2, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])177 n.descale2 = L.Scale(n.debn2, bias_term=True, in_place=True) 178 n.derelu2 = L.ReLU(n.descale2, relu_param=dict(negative_slope=0.1))179 n.upsample1 = L.Deconvolution(n.derelu2, param=dict(lr_mult=0 ,decay_mult = 0), convolution_param=dict(group=64, num_output=64, kernel_size=4, stride=2, pad=1, bias_term=False,weight_filler=dict(type="bilinear")))180 n.deconv1 = conv(n.upsample1, 5, 5, 1, pad=2, no_back=True) 181 n.debn1 = L.BatchNorm(n.deconv1, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])182 n.descale1 = L.Scale(n.debn1, bias_term=True, in_place=True) 183 n.derelu1 = L.ReLU(n.descale1, relu_param=dict(negative_slope=0.1))184 n.flatdata_o = L.Flatten(n.derelu1)185 n.loss_s = L.SigmoidCrossEntropyLoss(n.flatdata_o, n.flatdata_i, loss_weight=1)186 n.loss_e = L.EuclideanLoss(n.flatdata_o, n.flatdata_i, loss_weight=0)187 return str(n.to_proto())188def vgg(split, batch_sz):189 n = caffe.NetSpec()190 n.data, n.label = L.ImageData(image_data_param=dict(shuffle=True,source=split, batch_size=batch_sz,new_height=32, new_width=100,is_color=False),ntop=2)191 n.silence = L.Silence(n.label, ntop=0)192 193 n.conv1 = conv(n.data, 5, 5, 64, pad=2)194 n.bn1 = L.BatchNorm(n.conv1, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])195 n.scale1 = L.Scale(n.bn1, bias_term=True, in_place=True) 196 n.relu1 = L.ReLU(n.scale1)197 n.pool1 = max_pool(n.relu1, 2, stride=2) 198 199 n.conv2 = conv(n.pool1, 5, 5, 128, pad=2)200 n.bn2 = L.BatchNorm(n.conv2, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])201 n.scale2 = L.Scale(n.bn2, bias_term=True, in_place=True) 202 n.relu2 = L.ReLU(n.scale2)203 n.pool2 = max_pool(n.relu2, 2, stride=2)204 205 n.conv3 = conv(n.pool2, 3, 3, 256, pad=1)206 n.bn3 = L.BatchNorm(n.conv3, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])207 n.scale3 = L.Scale(n.bn3, bias_term=True, in_place=True) 208 n.relu3 = L.ReLU(n.scale3) 209 n.conv3_5 = conv(n.relu3, 3, 3, 512, pad=1)210 n.bn3_5 = L.BatchNorm(n.conv3_5, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])211 n.scale3_5 = L.Scale(n.bn3_5, bias_term=True, in_place=True) 212 n.relu3_5 = L.ReLU(n.scale3_5)213 n.pool3_5 = max_pool(n.relu3_5, 2, stride=2)214 215 n.conv4 = conv(n.pool3_5, 3, 3, 512, pad=1)216 n.bn4 = L.BatchNorm(n.conv4, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])217 n.scale4 = L.Scale(n.bn4, bias_term=True, in_place=True) 218 n.relu4 = L.ReLU(n.scale4)219 220 n.fc5 = conv(n.relu4, 13, 4, 4096)221 n.bn5 = L.BatchNorm(n.fc1, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])222 n.scale5 = L.Scale(n.bn5, bias_term=True, in_place=True) 223 n.relu5 = L.ReLU(n.scale5)224 n.drop1 = L.Dropout(n.relu5, in_place=True)225 226 n.fc6 = conv(n.drop1, 1, 1, 4096)227 n.bn6 = L.BatchNorm(n.fc2, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])228 n.scale6 = L.Scale(n.bn6, bias_term=True, in_place=True) 229 n.relu6 = L.ReLU(n.scale6)230 n.drop2 = L.Dropout(n.relu6, in_place=True)231 232 n.fc_class = conv(n.drop2, 1, 1, 88172)233 n.bn7 = L.BatchNorm(n.fc_class, use_global_stats=False, in_place=True, param=[{"lr_mult":0},{"lr_mult":0},{"lr_mult":0}])234 n.scale7 = L.Scale(n.bn7, bias_term=True, in_place=True) 235 n.relu7 = L.ReLU(n.scale7) 236 n.loss = L.SoftmaxWithLoss(n.relu7, n.label, loss_weight=1)...

Full Screen

Full Screen

pMC_mult.py

Source:pMC_mult.py Github

copy

Full Screen

1# This file was created automatically by SWIG 1.3.29.2# Don't modify this file, modify the SWIG interface instead.3# This file is compatible with both classic and new-style classes.4import _pMC_mult5import new6new_instancemethod = new.instancemethod7def _swig_setattr_nondynamic(self,class_type,name,value,static=1):8 if (name == "thisown"): return self.this.own(value)9 if (name == "this"):10 if type(value).__name__ == 'PySwigObject':11 self.__dict__[name] = value12 return13 method = class_type.__swig_setmethods__.get(name,None)14 if method: return method(self,value)15 if (not static) or hasattr(self,name):16 self.__dict__[name] = value17 else:18 raise AttributeError("You cannot add attributes to %s" % self)19def _swig_setattr(self,class_type,name,value):20 return _swig_setattr_nondynamic(self,class_type,name,value,0)21def _swig_getattr(self,class_type,name):22 if (name == "thisown"): return self.this.own()23 method = class_type.__swig_getmethods__.get(name,None)24 if method: return method(self)25 raise AttributeError,name26def _swig_repr(self):27 try: strthis = "proxy of " + self.this.__repr__()28 except: strthis = ""29 return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)30import types31try:32 _object = types.ObjectType33 _newclass = 134except AttributeError:35 class _object : pass36 _newclass = 037del types38class PySwigIterator(_object):39 __swig_setmethods__ = {}40 __setattr__ = lambda self, name, value: _swig_setattr(self, PySwigIterator, name, value)41 __swig_getmethods__ = {}42 __getattr__ = lambda self, name: _swig_getattr(self, PySwigIterator, name)43 def __init__(self): raise AttributeError, "No constructor defined"44 __repr__ = _swig_repr45 __swig_destroy__ = _pMC_mult.delete_PySwigIterator46 __del__ = lambda self : None;47 def value(*args): return _pMC_mult.PySwigIterator_value(*args)48 def incr(*args): return _pMC_mult.PySwigIterator_incr(*args)49 def decr(*args): return _pMC_mult.PySwigIterator_decr(*args)50 def distance(*args): return _pMC_mult.PySwigIterator_distance(*args)51 def equal(*args): return _pMC_mult.PySwigIterator_equal(*args)52 def copy(*args): return _pMC_mult.PySwigIterator_copy(*args)53 def next(*args): return _pMC_mult.PySwigIterator_next(*args)54 def previous(*args): return _pMC_mult.PySwigIterator_previous(*args)55 def advance(*args): return _pMC_mult.PySwigIterator_advance(*args)56 def __eq__(*args): return _pMC_mult.PySwigIterator___eq__(*args)57 def __ne__(*args): return _pMC_mult.PySwigIterator___ne__(*args)58 def __iadd__(*args): return _pMC_mult.PySwigIterator___iadd__(*args)59 def __isub__(*args): return _pMC_mult.PySwigIterator___isub__(*args)60 def __add__(*args): return _pMC_mult.PySwigIterator___add__(*args)61 def __sub__(*args): return _pMC_mult.PySwigIterator___sub__(*args)62 def __iter__(self): return self63PySwigIterator_swigregister = _pMC_mult.PySwigIterator_swigregister64PySwigIterator_swigregister(PySwigIterator)65class IntVector(_object):66 __swig_setmethods__ = {}67 __setattr__ = lambda self, name, value: _swig_setattr(self, IntVector, name, value)68 __swig_getmethods__ = {}69 __getattr__ = lambda self, name: _swig_getattr(self, IntVector, name)70 __repr__ = _swig_repr71 def iterator(*args): return _pMC_mult.IntVector_iterator(*args)72 def __iter__(self): return self.iterator()73 def __nonzero__(*args): return _pMC_mult.IntVector___nonzero__(*args)74 def __len__(*args): return _pMC_mult.IntVector___len__(*args)75 def pop(*args): return _pMC_mult.IntVector_pop(*args)76 def __getslice__(*args): return _pMC_mult.IntVector___getslice__(*args)77 def __setslice__(*args): return _pMC_mult.IntVector___setslice__(*args)78 def __delslice__(*args): return _pMC_mult.IntVector___delslice__(*args)79 def __delitem__(*args): return _pMC_mult.IntVector___delitem__(*args)80 def __getitem__(*args): return _pMC_mult.IntVector___getitem__(*args)81 def __setitem__(*args): return _pMC_mult.IntVector___setitem__(*args)82 def append(*args): return _pMC_mult.IntVector_append(*args)83 def empty(*args): return _pMC_mult.IntVector_empty(*args)84 def size(*args): return _pMC_mult.IntVector_size(*args)85 def clear(*args): return _pMC_mult.IntVector_clear(*args)86 def swap(*args): return _pMC_mult.IntVector_swap(*args)87 def get_allocator(*args): return _pMC_mult.IntVector_get_allocator(*args)88 def begin(*args): return _pMC_mult.IntVector_begin(*args)89 def end(*args): return _pMC_mult.IntVector_end(*args)90 def rbegin(*args): return _pMC_mult.IntVector_rbegin(*args)91 def rend(*args): return _pMC_mult.IntVector_rend(*args)92 def pop_back(*args): return _pMC_mult.IntVector_pop_back(*args)93 def erase(*args): return _pMC_mult.IntVector_erase(*args)94 def __init__(self, *args): 95 this = _pMC_mult.new_IntVector(*args)96 try: self.this.append(this)97 except: self.this = this98 def push_back(*args): return _pMC_mult.IntVector_push_back(*args)99 def front(*args): return _pMC_mult.IntVector_front(*args)100 def back(*args): return _pMC_mult.IntVector_back(*args)101 def assign(*args): return _pMC_mult.IntVector_assign(*args)102 def resize(*args): return _pMC_mult.IntVector_resize(*args)103 def insert(*args): return _pMC_mult.IntVector_insert(*args)104 def reserve(*args): return _pMC_mult.IntVector_reserve(*args)105 def capacity(*args): return _pMC_mult.IntVector_capacity(*args)106 __swig_destroy__ = _pMC_mult.delete_IntVector107 __del__ = lambda self : None;108IntVector_swigregister = _pMC_mult.IntVector_swigregister109IntVector_swigregister(IntVector)110class DoubleVector(_object):111 __swig_setmethods__ = {}112 __setattr__ = lambda self, name, value: _swig_setattr(self, DoubleVector, name, value)113 __swig_getmethods__ = {}114 __getattr__ = lambda self, name: _swig_getattr(self, DoubleVector, name)115 __repr__ = _swig_repr116 def iterator(*args): return _pMC_mult.DoubleVector_iterator(*args)117 def __iter__(self): return self.iterator()118 def __nonzero__(*args): return _pMC_mult.DoubleVector___nonzero__(*args)119 def __len__(*args): return _pMC_mult.DoubleVector___len__(*args)120 def pop(*args): return _pMC_mult.DoubleVector_pop(*args)121 def __getslice__(*args): return _pMC_mult.DoubleVector___getslice__(*args)122 def __setslice__(*args): return _pMC_mult.DoubleVector___setslice__(*args)123 def __delslice__(*args): return _pMC_mult.DoubleVector___delslice__(*args)124 def __delitem__(*args): return _pMC_mult.DoubleVector___delitem__(*args)125 def __getitem__(*args): return _pMC_mult.DoubleVector___getitem__(*args)126 def __setitem__(*args): return _pMC_mult.DoubleVector___setitem__(*args)127 def append(*args): return _pMC_mult.DoubleVector_append(*args)128 def empty(*args): return _pMC_mult.DoubleVector_empty(*args)129 def size(*args): return _pMC_mult.DoubleVector_size(*args)130 def clear(*args): return _pMC_mult.DoubleVector_clear(*args)131 def swap(*args): return _pMC_mult.DoubleVector_swap(*args)132 def get_allocator(*args): return _pMC_mult.DoubleVector_get_allocator(*args)133 def begin(*args): return _pMC_mult.DoubleVector_begin(*args)134 def end(*args): return _pMC_mult.DoubleVector_end(*args)135 def rbegin(*args): return _pMC_mult.DoubleVector_rbegin(*args)136 def rend(*args): return _pMC_mult.DoubleVector_rend(*args)137 def pop_back(*args): return _pMC_mult.DoubleVector_pop_back(*args)138 def erase(*args): return _pMC_mult.DoubleVector_erase(*args)139 def __init__(self, *args): 140 this = _pMC_mult.new_DoubleVector(*args)141 try: self.this.append(this)142 except: self.this = this143 def push_back(*args): return _pMC_mult.DoubleVector_push_back(*args)144 def front(*args): return _pMC_mult.DoubleVector_front(*args)145 def back(*args): return _pMC_mult.DoubleVector_back(*args)146 def assign(*args): return _pMC_mult.DoubleVector_assign(*args)147 def resize(*args): return _pMC_mult.DoubleVector_resize(*args)148 def insert(*args): return _pMC_mult.DoubleVector_insert(*args)149 def reserve(*args): return _pMC_mult.DoubleVector_reserve(*args)150 def capacity(*args): return _pMC_mult.DoubleVector_capacity(*args)151 __swig_destroy__ = _pMC_mult.delete_DoubleVector152 __del__ = lambda self : None;153DoubleVector_swigregister = _pMC_mult.DoubleVector_swigregister154DoubleVector_swigregister(DoubleVector)155class FloatVector(_object):156 __swig_setmethods__ = {}157 __setattr__ = lambda self, name, value: _swig_setattr(self, FloatVector, name, value)158 __swig_getmethods__ = {}159 __getattr__ = lambda self, name: _swig_getattr(self, FloatVector, name)160 __repr__ = _swig_repr161 def iterator(*args): return _pMC_mult.FloatVector_iterator(*args)162 def __iter__(self): return self.iterator()163 def __nonzero__(*args): return _pMC_mult.FloatVector___nonzero__(*args)164 def __len__(*args): return _pMC_mult.FloatVector___len__(*args)165 def pop(*args): return _pMC_mult.FloatVector_pop(*args)166 def __getslice__(*args): return _pMC_mult.FloatVector___getslice__(*args)167 def __setslice__(*args): return _pMC_mult.FloatVector___setslice__(*args)168 def __delslice__(*args): return _pMC_mult.FloatVector___delslice__(*args)169 def __delitem__(*args): return _pMC_mult.FloatVector___delitem__(*args)170 def __getitem__(*args): return _pMC_mult.FloatVector___getitem__(*args)171 def __setitem__(*args): return _pMC_mult.FloatVector___setitem__(*args)172 def append(*args): return _pMC_mult.FloatVector_append(*args)173 def empty(*args): return _pMC_mult.FloatVector_empty(*args)174 def size(*args): return _pMC_mult.FloatVector_size(*args)175 def clear(*args): return _pMC_mult.FloatVector_clear(*args)176 def swap(*args): return _pMC_mult.FloatVector_swap(*args)177 def get_allocator(*args): return _pMC_mult.FloatVector_get_allocator(*args)178 def begin(*args): return _pMC_mult.FloatVector_begin(*args)179 def end(*args): return _pMC_mult.FloatVector_end(*args)180 def rbegin(*args): return _pMC_mult.FloatVector_rbegin(*args)181 def rend(*args): return _pMC_mult.FloatVector_rend(*args)182 def pop_back(*args): return _pMC_mult.FloatVector_pop_back(*args)183 def erase(*args): return _pMC_mult.FloatVector_erase(*args)184 def __init__(self, *args): 185 this = _pMC_mult.new_FloatVector(*args)186 try: self.this.append(this)187 except: self.this = this188 def push_back(*args): return _pMC_mult.FloatVector_push_back(*args)189 def front(*args): return _pMC_mult.FloatVector_front(*args)190 def back(*args): return _pMC_mult.FloatVector_back(*args)191 def assign(*args): return _pMC_mult.FloatVector_assign(*args)192 def resize(*args): return _pMC_mult.FloatVector_resize(*args)193 def insert(*args): return _pMC_mult.FloatVector_insert(*args)194 def reserve(*args): return _pMC_mult.FloatVector_reserve(*args)195 def capacity(*args): return _pMC_mult.FloatVector_capacity(*args)196 __swig_destroy__ = _pMC_mult.delete_FloatVector197 __del__ = lambda self : None;198FloatVector_swigregister = _pMC_mult.FloatVector_swigregister199FloatVector_swigregister(FloatVector)200class MC(_object):201 __swig_setmethods__ = {}202 __setattr__ = lambda self, name, value: _swig_setattr(self, MC, name, value)203 __swig_getmethods__ = {}204 __getattr__ = lambda self, name: _swig_getattr(self, MC, name)205 __repr__ = _swig_repr206 def __init__(self, *args): 207 this = _pMC_mult.new_MC(*args)208 try: self.this.append(this)209 except: self.this = this210 def calc_pKas(*args): return _pMC_mult.MC_calc_pKas(*args)211 def set_MCsteps(*args): return _pMC_mult.MC_set_MCsteps(*args)212 __swig_destroy__ = _pMC_mult.delete_MC213 __del__ = lambda self : None;214MC_swigregister = _pMC_mult.MC_swigregister...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Nose automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful