How to use test_picker method in Molotov

Best Python code snippet using molotov_python

test_brain.py

Source:test_brain.py Github

copy

Full Screen

1# -*- coding: utf-8 -*-2#3# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>4# Eric Larson <larson.eric.d@gmail.com>5# Joan Massich <mailsik@gmail.com>6# Guillaume Favelier <guillaume.favelier@gmail.com>7# Oleh Kozynets <ok7mailbox@gmail.com>8#9# License: Simplified BSD10import os11import os.path as op12import sys13import pytest14import numpy as np15from numpy.testing import assert_allclose, assert_array_equal16from mne import (read_source_estimate, read_evokeds, read_cov,17 read_forward_solution, pick_types_forward,18 SourceEstimate, MixedSourceEstimate, write_surface,19 VolSourceEstimate, vertex_to_mni)20from mne.minimum_norm import apply_inverse, make_inverse_operator21from mne.source_space import (read_source_spaces,22 setup_volume_source_space)23from mne.datasets import testing24from mne.fixes import _cell_data25from mne.io import read_info26from mne.utils import check_version27from mne.label import read_label28from mne.viz._brain import Brain, _LinkViewer, _BrainScraper, _LayeredMesh29from mne.viz._brain.colormap import calculate_lut30from matplotlib import cm, image31from matplotlib.lines import Line2D32data_path = testing.data_path(download=False)33subject_id = 'sample'34subjects_dir = op.join(data_path, 'subjects')35sample_dir = op.join(data_path, 'MEG', 'sample')36fname_raw_testing = op.join(sample_dir, 'sample_audvis_trunc_raw.fif')37fname_trans = op.join(sample_dir, 'sample_audvis_trunc-trans.fif')38fname_stc = op.join(sample_dir, 'sample_audvis_trunc-meg')39fname_label = op.join(sample_dir, 'labels', 'Vis-lh.label')40fname_cov = op.join(sample_dir, 'sample_audvis_trunc-cov.fif')41fname_evoked = op.join(sample_dir, 'sample_audvis_trunc-ave.fif')42fname_fwd = op.join(sample_dir, 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')43src_fname = op.join(subjects_dir, subject_id, 'bem', 'sample-oct-6-src.fif')44class _Collection(object):45 def __init__(self, actors):46 self._actors = actors47 def GetNumberOfItems(self):48 return len(self._actors)49 def GetItemAsObject(self, ii):50 return self._actors[ii]51class TstVTKPicker(object):52 """Class to test cell picking."""53 def __init__(self, mesh, cell_id, hemi, brain):54 self.mesh = mesh55 self.cell_id = cell_id56 self.point_id = None57 self.hemi = hemi58 self.brain = brain59 self._actors = ()60 def GetCellId(self):61 """Return the picked cell."""62 return self.cell_id63 def GetDataSet(self):64 """Return the picked mesh."""65 return self.mesh66 def GetPickPosition(self):67 """Return the picked position."""68 if self.hemi == 'vol':69 self.point_id = self.cell_id70 return self.brain._data['vol']['grid_coords'][self.cell_id]71 else:72 vtk_cell = self.mesh.GetCell(self.cell_id)73 cell = [vtk_cell.GetPointId(point_id) for point_id74 in range(vtk_cell.GetNumberOfPoints())]75 self.point_id = cell[0]76 return self.mesh.points[self.point_id]77 def GetProp3Ds(self):78 """Return all picked Prop3Ds."""79 return _Collection(self._actors)80 def GetRenderer(self):81 """Return the "renderer"."""82 return self # set this to also be the renderer and active camera83 GetActiveCamera = GetRenderer84 def GetPosition(self):85 """Return the position."""86 return np.array(self.GetPickPosition()) - (0, 0, 100)87def test_layered_mesh(renderer_interactive_pyvistaqt):88 """Test management of scalars/colormap overlay."""89 mesh = _LayeredMesh(90 renderer=renderer_interactive_pyvistaqt._get_renderer(size=(300, 300)),91 vertices=np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0], [1, 1, 0]]),92 triangles=np.array([[0, 1, 2], [1, 2, 3]]),93 normals=np.array([[0, 0, 1]] * 4),94 )95 assert not mesh._is_mapped96 mesh.map()97 assert mesh._is_mapped98 assert mesh._current_colors is None99 assert mesh._cached_colors is None100 mesh.update()101 assert len(mesh._overlays) == 0102 mesh.add_overlay(103 scalars=np.array([0, 1, 1, 0]),104 colormap=np.array([(1, 1, 1, 1), (0, 0, 0, 0)]),105 rng=[0, 1],106 opacity=None,107 name='test1',108 )109 assert mesh._current_colors is not None110 assert mesh._cached_colors is None111 assert len(mesh._overlays) == 1112 assert 'test1' in mesh._overlays113 mesh.add_overlay(114 scalars=np.array([1, 0, 0, 1]),115 colormap=np.array([(1, 1, 1, 1), (0, 0, 0, 0)]),116 rng=[0, 1],117 opacity=None,118 name='test2',119 )120 assert mesh._current_colors is not None121 assert mesh._cached_colors is not None122 assert len(mesh._overlays) == 2123 assert 'test2' in mesh._overlays124 mesh.remove_overlay('test2')125 assert 'test2' not in mesh._overlays126 mesh.update()127 assert len(mesh._overlays) == 1128 mesh._clean()129@testing.requires_testing_data130def test_brain_gc(renderer_pyvistaqt, brain_gc):131 """Test that a minimal version of Brain gets GC'ed."""132 brain = Brain('fsaverage', 'both', 'inflated', subjects_dir=subjects_dir)133 brain.close()134@testing.requires_testing_data135def test_brain_routines(renderer, brain_gc):136 """Test backend agnostic Brain routines."""137 brain_klass = renderer.get_brain_class()138 from mne.viz._brain import Brain139 assert brain_klass == Brain140@testing.requires_testing_data141def test_brain_init(renderer_pyvistaqt, tmp_path, pixel_ratio, brain_gc):142 """Test initialization of the Brain instance."""143 from mne.source_estimate import _BaseSourceEstimate144 class FakeSTC(_BaseSourceEstimate):145 def __init__(self):146 pass147 hemi = 'lh'148 surf = 'inflated'149 cortex = 'low_contrast'150 title = 'test'151 size = (300, 300)152 kwargs = dict(subject_id=subject_id, subjects_dir=subjects_dir)153 with pytest.raises(ValueError, match='"size" parameter must be'):154 Brain(hemi=hemi, surf=surf, size=[1, 2, 3], **kwargs)155 with pytest.raises(ValueError, match='.*hemi.*Allowed values.*'):156 Brain(hemi='foo', surf=surf, **kwargs)157 with pytest.raises(ValueError, match='.*view.*Allowed values.*'):158 Brain(hemi='lh', surf=surf, views='foo', **kwargs)159 with pytest.raises(TypeError, match='figure'):160 Brain(hemi=hemi, surf=surf, figure='foo', **kwargs)161 with pytest.raises(TypeError, match='interaction'):162 Brain(hemi=hemi, surf=surf, interaction=0, **kwargs)163 with pytest.raises(ValueError, match='interaction'):164 Brain(hemi=hemi, surf=surf, interaction='foo', **kwargs)165 with pytest.raises(FileNotFoundError, match=r'lh\.whatever'):166 Brain(subject_id, 'lh', 'whatever')167 with pytest.raises(ValueError, match='`surf` cannot be seghead'):168 Brain(hemi='lh', surf='seghead', **kwargs)169 with pytest.raises(ValueError, match='RGB argument'):170 Brain('sample', cortex='badcolor')171 Brain(subject_id, hemi=None, surf=None) # test no surfaces172 renderer_pyvistaqt.backend._close_all()173 brain = Brain(hemi=hemi, surf=surf, size=size, title=title,174 cortex=cortex, units='m',175 silhouette=dict(decimate=0.95), **kwargs)176 assert 'data' not in brain._actors177 with pytest.raises(TypeError, match='not supported'):178 brain._check_stc(hemi='lh', array=FakeSTC(), vertices=None)179 with pytest.raises(ValueError, match='add_data'):180 brain.setup_time_viewer(time_viewer=True)181 brain._hemi = 'foo' # for testing: hemis182 with pytest.raises(ValueError, match='not be None'):183 brain._check_hemi(hemi=None)184 with pytest.raises(ValueError, match='Invalid.*hemi.*Allowed'):185 brain._check_hemi(hemi='foo')186 brain._hemi = hemi # end testing: hemis187 with pytest.raises(ValueError, match='bool or positive'):188 brain._to_borders(None, None, 'foo')189 assert brain.interaction == 'trackball'190 # add_data191 stc = read_source_estimate(fname_stc)192 fmin = stc.data.min()193 fmax = stc.data.max()194 for h in brain._hemis:195 if h == 'lh':196 hi = 0197 else:198 hi = 1199 hemi_data = stc.data[:len(stc.vertices[hi]), 10]200 hemi_vertices = stc.vertices[hi]201 with pytest.raises(TypeError, match='scale_factor'):202 brain.add_data(hemi_data, hemi=h, scale_factor='foo')203 with pytest.raises(TypeError, match='vector_alpha'):204 brain.add_data(hemi_data, hemi=h, vector_alpha='foo')205 with pytest.raises(ValueError, match='thresh'):206 brain.add_data(hemi_data, hemi=h, thresh=-1)207 with pytest.raises(ValueError, match='remove_existing'):208 brain.add_data(hemi_data, hemi=h, remove_existing=-1)209 with pytest.raises(ValueError, match='time_label_size'):210 brain.add_data(hemi_data, hemi=h, time_label_size=-1,211 vertices=hemi_vertices)212 with pytest.raises(ValueError, match='is positive'):213 brain.add_data(hemi_data, hemi=h, smoothing_steps=-1,214 vertices=hemi_vertices)215 with pytest.raises(TypeError, match='int or NoneType'):216 brain.add_data(hemi_data, hemi=h, smoothing_steps='foo')217 with pytest.raises(ValueError, match='dimension mismatch'):218 brain.add_data(array=np.array([0, 1, 2]), hemi=h,219 vertices=hemi_vertices)220 with pytest.raises(ValueError, match='vertices parameter must not be'):221 brain.add_data(hemi_data, fmin=fmin, hemi=hemi,222 fmax=fmax, vertices=None)223 with pytest.raises(ValueError, match='has shape'):224 brain.add_data(hemi_data[:, np.newaxis], fmin=fmin, hemi=hemi,225 fmax=fmax, vertices=None, time=[0, 1])226 brain.add_data(hemi_data, fmin=fmin, hemi=h, fmax=fmax,227 colormap='hot', vertices=hemi_vertices,228 smoothing_steps='nearest', colorbar=(0, 0), time=None)229 with pytest.raises(ValueError, match='brain has no defined times'):230 brain.set_time(0.)231 assert brain.data['lh']['array'] is hemi_data232 assert brain.views == ['lateral']233 assert brain.hemis == ('lh',)234 brain.add_data(hemi_data[:, np.newaxis], fmin=fmin, hemi=h, fmax=fmax,235 colormap='hot', vertices=hemi_vertices,236 smoothing_steps=1, initial_time=0., colorbar=False,237 time=[0])238 with pytest.raises(ValueError, match='the range of available times'):239 brain.set_time(7.)240 brain.set_time(0.)241 brain.set_time_point(0) # should hit _safe_interp1d242 with pytest.raises(ValueError, match='consistent with'):243 brain.add_data(hemi_data[:, np.newaxis], fmin=fmin, hemi=h,244 fmax=fmax, colormap='hot', vertices=hemi_vertices,245 smoothing_steps='nearest', colorbar=False,246 time=[1])247 with pytest.raises(ValueError, match='different from'):248 brain.add_data(hemi_data[:, np.newaxis][:, [0, 0]],249 fmin=fmin, hemi=h, fmax=fmax, colormap='hot',250 vertices=hemi_vertices)251 with pytest.raises(ValueError, match='need shape'):252 brain.add_data(hemi_data[:, np.newaxis], time=[0, 1],253 fmin=fmin, hemi=h, fmax=fmax, colormap='hot',254 vertices=hemi_vertices)255 with pytest.raises(ValueError, match='If array has 3'):256 brain.add_data(hemi_data[:, np.newaxis, np.newaxis],257 fmin=fmin, hemi=h, fmax=fmax, colormap='hot',258 vertices=hemi_vertices)259 assert len(brain._actors['data']) == 4260 brain.remove_data()261 assert 'data' not in brain._actors262 # add label263 label = read_label(fname_label)264 with pytest.raises(ValueError, match="not a filename"):265 brain.add_label(0)266 with pytest.raises(ValueError, match="does not exist"):267 brain.add_label('foo', subdir='bar')268 label.name = None # test unnamed label269 brain.add_label(label, scalar_thresh=0., color="green")270 assert isinstance(brain.labels[label.hemi], list)271 overlays = brain._layered_meshes[label.hemi]._overlays272 assert 'unnamed0' in overlays273 assert np.allclose(overlays['unnamed0']._colormap[0],274 [0, 0, 0, 0]) # first component is transparent275 assert np.allclose(overlays['unnamed0']._colormap[1],276 [0, 128, 0, 255]) # second is green277 brain.remove_labels()278 assert 'unnamed0' not in overlays279 brain.add_label(fname_label)280 brain.add_label('V1', borders=True)281 brain.remove_labels()282 brain.remove_labels()283 # add foci284 brain.add_foci([0], coords_as_verts=True,285 hemi=hemi, color='blue')286 # add head and skull287 brain.add_head(color='red', alpha=0.1)288 brain.remove_head()289 brain.add_skull(outer=True, color='green', alpha=0.1)290 brain.remove_skull()291 # add volume labels292 brain.add_volume_labels(293 aseg='aseg', labels=('Brain-Stem', 'Left-Hippocampus',294 'Left-Amygdala'))295 brain.remove_volume_labels()296 # add sensors297 info = read_info(fname_raw_testing)298 brain.add_sensors(info, trans=fname_trans)299 for kind in ('meg', 'eeg', 'fnirs', 'ecog', 'seeg', 'dbs', 'helmet'):300 brain.remove_sensors(kind)301 brain.add_sensors(info, trans=fname_trans)302 brain.remove_sensors()303 info['chs'][0]['coord_frame'] = 99304 with pytest.raises(RuntimeError, match='must be "meg", "head" or "mri"'):305 brain.add_sensors(info, trans=fname_trans)306 # add text307 brain.add_text(x=0, y=0, text='foo')308 with pytest.raises(ValueError, match='already exists'):309 brain.add_text(x=0, y=0, text='foo')310 brain.remove_text('foo')311 brain.add_text(x=0, y=0, text='foo')312 brain.remove_text()313 brain.close()314 # add annotation315 annots = ['aparc', op.join(subjects_dir, 'fsaverage', 'label',316 'lh.PALS_B12_Lobes.annot')]317 borders = [True, 2]318 alphas = [1, 0.5]319 colors = [None, 'r']320 brain = Brain(subject_id='fsaverage', hemi='both', size=size,321 surf='inflated', subjects_dir=subjects_dir)322 with pytest.raises(RuntimeError, match="both hemispheres"):323 brain.add_annotation(annots[-1])324 with pytest.raises(ValueError, match="does not exist"):325 brain.add_annotation('foo')326 brain.close()327 brain = Brain(subject_id='fsaverage', hemi=hemi, size=size,328 surf='inflated', subjects_dir=subjects_dir)329 for a, b, p, color in zip(annots, borders, alphas, colors):330 brain.add_annotation(a, b, p, color=color)331 view_args = dict(roll=1, distance=500, focalpoint=(1e-5, 1e-5, 1e-5))332 cam = brain._renderer.figure.plotter.camera333 previous_roll = cam.GetRoll()334 brain.show_view(**view_args)335 assert_allclose(cam.GetFocalPoint(), view_args["focalpoint"])336 assert_allclose(cam.GetDistance(), view_args["distance"])337 assert_allclose(cam.GetRoll(), previous_roll + view_args["roll"])338 del view_args339 # image and screenshot340 fname = op.join(str(tmp_path), 'test.png')341 assert not op.isfile(fname)342 brain.save_image(fname)343 assert op.isfile(fname)344 fp = np.array(345 brain._renderer.figure.plotter.renderer.ComputeVisiblePropBounds())346 fp = (fp[1::2] + fp[::2]) * 0.5347 azimuth, elevation = 180., 90.348 for view_args in (dict(azimuth=azimuth, elevation=elevation,349 focalpoint='auto'),350 dict(view='lateral', hemi='lh')):351 brain.show_view(**view_args)352 assert_allclose(brain._renderer.figure._azimuth, azimuth)353 assert_allclose(brain._renderer.figure._elevation, elevation)354 assert_allclose(cam.GetFocalPoint(), fp)355 del view_args356 img = brain.screenshot(mode='rgba')357 want_size = np.array([size[0] * pixel_ratio, size[1] * pixel_ratio, 4])358 assert_allclose(img.shape, want_size)359 brain.close()360@testing.requires_testing_data361@pytest.mark.skipif(os.getenv('CI_OS_NAME', '') == 'osx',362 reason='Unreliable/segfault on macOS CI')363@pytest.mark.parametrize('hemi', ('lh', 'rh'))364def test_single_hemi(hemi, renderer_interactive_pyvistaqt, brain_gc):365 """Test single hemi support."""366 stc = read_source_estimate(fname_stc)367 idx, order = (0, 1) if hemi == 'lh' else (1, -1)368 stc = SourceEstimate(369 getattr(stc, f'{hemi}_data'), [stc.vertices[idx], []][::order],370 0, 1, 'sample')371 brain = stc.plot(372 subjects_dir=subjects_dir, hemi='both', size=300,373 cortex='0.5') # single cortex string arg374 brain.close()375 # test skipping when len(vertices) == 0376 stc.vertices[1 - idx] = np.array([])377 brain = stc.plot(378 subjects_dir=subjects_dir, hemi=hemi, size=300)379 brain.close()380@testing.requires_testing_data381@pytest.mark.slowtest382def test_brain_save_movie(tmp_path, renderer, brain_gc):383 """Test saving a movie of a Brain instance."""384 from imageio_ffmpeg import count_frames_and_secs385 brain = _create_testing_brain(hemi='lh', time_viewer=False,386 cortex=['r', 'b']) # custom binarized387 filename = str(op.join(tmp_path, "brain_test.mov"))388 for interactive_state in (False, True):389 # for coverage, we set interactivity390 if interactive_state:391 brain._renderer.plotter.enable()392 else:393 brain._renderer.plotter.disable()394 with pytest.raises(TypeError, match='unexpected keyword argument'):395 brain.save_movie(filename, time_dilation=1, tmin=1, tmax=1.1,396 bad_name='blah')397 assert not op.isfile(filename)398 tmin = 1399 tmax = 5400 duration = np.floor(tmax - tmin)401 brain.save_movie(filename, time_dilation=1., tmin=tmin,402 tmax=tmax, interpolation='nearest')403 assert op.isfile(filename)404 _, nsecs = count_frames_and_secs(filename)405 assert_allclose(duration, nsecs, atol=0.2)406 os.remove(filename)407 brain.close()408_TINY_SIZE = (350, 300)409def tiny(tmp_path):410 """Create a tiny fake brain."""411 # This is a minimal version of what we need for our viz-with-timeviewer412 # support currently413 subject = 'test'414 (tmp_path / subject).mkdir()415 subject_dir = tmp_path / subject416 (subject_dir / 'surf').mkdir()417 surf_dir = subject_dir / 'surf'418 rng = np.random.RandomState(0)419 rr = rng.randn(4, 3)420 tris = np.array([[0, 1, 2], [2, 1, 3]])421 curv = rng.randn(len(rr))422 with open(surf_dir / 'lh.curv', 'wb') as fid:423 fid.write(np.array([255, 255, 255], dtype=np.uint8))424 fid.write(np.array([len(rr), 0, 1], dtype='>i4'))425 fid.write(curv.astype('>f4'))426 write_surface(surf_dir / 'lh.white', rr, tris)427 write_surface(surf_dir / 'rh.white', rr, tris) # needed for vertex tc428 vertices = [np.arange(len(rr)), []]429 data = rng.randn(len(rr), 10)430 stc = SourceEstimate(data, vertices, 0, 1, subject)431 brain = stc.plot(subjects_dir=tmp_path, hemi='lh', surface='white',432 size=_TINY_SIZE)433 # in principle this should be sufficient:434 #435 # ratio = brain.mpl_canvas.canvas.window().devicePixelRatio()436 #437 # but in practice VTK can mess up sizes, so let's just calculate it.438 sz = brain.plotter.size()439 sz = (sz.width(), sz.height())440 sz_ren = brain.plotter.renderer.GetSize()441 ratio = np.median(np.array(sz_ren) / np.array(sz))442 return brain, ratio443@pytest.mark.filterwarnings('ignore:.*constrained_layout not applied.*:')444def test_brain_screenshot(renderer_interactive_pyvistaqt, tmp_path, brain_gc):445 """Test time viewer screenshot."""446 # XXX disable for sprint because it's too unreliable447 if sys.platform == 'darwin' and os.getenv('GITHUB_ACTIONS', '') == 'true':448 pytest.skip('Test is unreliable on GitHub Actions macOS')449 tiny_brain, ratio = tiny(tmp_path)450 img_nv = tiny_brain.screenshot(time_viewer=False)451 want = (_TINY_SIZE[1] * ratio, _TINY_SIZE[0] * ratio, 3)452 assert img_nv.shape == want453 img_v = tiny_brain.screenshot(time_viewer=True)454 assert img_v.shape[1:] == want[1:]455 assert_allclose(img_v.shape[0], want[0] * 4 / 3, atol=3) # some slop456 tiny_brain.close()457def _assert_brain_range(brain, rng):458 __tracebackhide__ = True459 assert brain._cmap_range == rng, 'brain._cmap_range == rng'460 for hemi, layerer in brain._layered_meshes.items():461 for key, mesh in layerer._overlays.items():462 if key == 'curv':463 continue464 assert mesh._rng == rng, \465 f'_layered_meshes[{repr(hemi)}][{repr(key)}]._rng != {rng}'466@testing.requires_testing_data467@pytest.mark.slowtest468def test_brain_time_viewer(renderer_interactive_pyvistaqt, pixel_ratio,469 brain_gc):470 """Test time viewer primitives."""471 with pytest.raises(ValueError, match="between 0 and 1"):472 _create_testing_brain(hemi='lh', show_traces=-1.0)473 with pytest.raises(ValueError, match="got unknown keys"):474 _create_testing_brain(hemi='lh', surf='white', src='volume',475 volume_options={'foo': 'bar'})476 brain = _create_testing_brain(477 hemi='both', show_traces=False,478 brain_kwargs=dict(silhouette=dict(decimate=0.95))479 )480 # test sub routines when show_traces=False481 brain._on_pick(None, None)482 brain._configure_vertex_time_course()483 brain._configure_label_time_course()484 brain.setup_time_viewer() # for coverage485 brain.callbacks["time"](value=0)486 assert "renderer" not in brain.callbacks487 brain.callbacks["orientation"](488 value='lat',489 update_widget=True490 )491 brain.callbacks["orientation"](492 value='medial',493 update_widget=True494 )495 brain.callbacks["time"](496 value=0.0,497 time_as_index=False,498 )499 # Need to process events for old Qt500 brain.callbacks["smoothing"](value=1)501 _assert_brain_range(brain, [0.1, 0.3])502 from mne.utils import use_log_level503 print('\nCallback fmin\n')504 with use_log_level('debug'):505 brain.callbacks["fmin"](value=12.0)506 assert brain._data["fmin"] == 12.0507 brain.callbacks["fmax"](value=4.0)508 _assert_brain_range(brain, [4.0, 4.0])509 brain.callbacks["fmid"](value=6.0)510 _assert_brain_range(brain, [4.0, 6.0])511 brain.callbacks["fmid"](value=4.0)512 brain.callbacks["fplus"]()513 brain.callbacks["fminus"]()514 brain.callbacks["fmin"](value=12.0)515 brain.callbacks["fmid"](value=4.0)516 _assert_brain_range(brain, [4.0, 12.0])517 brain._shift_time(op=lambda x, y: x + y)518 brain._shift_time(op=lambda x, y: x - y)519 brain._rotate_azimuth(15)520 brain._rotate_elevation(15)521 brain.toggle_interface()522 brain.toggle_interface(value=False)523 brain.callbacks["playback_speed"](value=0.1)524 brain.toggle_playback()525 brain.toggle_playback(value=False)526 brain.apply_auto_scaling()527 brain.restore_user_scaling()528 brain.reset()529 assert brain.help_canvas is not None530 assert not brain.help_canvas.canvas.isVisible()531 brain.help()532 assert brain.help_canvas.canvas.isVisible()533 # screenshot534 # Need to turn the interface back on otherwise the window is too wide535 # (it keeps the window size and expands the 3D area when the interface536 # is toggled off)537 brain.toggle_interface(value=True)538 brain.show_view(azimuth=180., elevation=90.)539 img = brain.screenshot(mode='rgb')540 want_shape = np.array([300 * pixel_ratio, 300 * pixel_ratio, 3])541 assert_allclose(img.shape, want_shape)542 brain.close()543@testing.requires_testing_data544@pytest.mark.parametrize('hemi', [545 'lh',546 pytest.param('rh', marks=pytest.mark.slowtest),547 pytest.param('split', marks=pytest.mark.slowtest),548 pytest.param('both', marks=pytest.mark.slowtest),549])550@pytest.mark.parametrize('src', [551 'surface',552 pytest.param('vector', marks=pytest.mark.slowtest),553 pytest.param('volume', marks=pytest.mark.slowtest),554 pytest.param('mixed', marks=pytest.mark.slowtest),555])556@pytest.mark.slowtest557def test_brain_traces(renderer_interactive_pyvistaqt, hemi, src, tmp_path,558 brain_gc):559 """Test brain traces."""560 hemi_str = list()561 if src in ('surface', 'vector', 'mixed'):562 hemi_str.extend([hemi] if hemi in ('lh', 'rh') else ['lh', 'rh'])563 if src in ('mixed', 'volume'):564 hemi_str.extend(['vol'])565 # label traces566 brain = _create_testing_brain(567 hemi=hemi, surf='white', src=src, show_traces='label',568 volume_options=None, # for speed, don't upsample569 n_time=5, initial_time=0,570 )571 if src == 'surface':572 brain._data['src'] = None # test src=None573 if src in ('surface', 'vector', 'mixed'):574 assert brain.show_traces575 assert brain.traces_mode == 'label'576 brain.widgets["extract_mode"].set_value('max')577 # test picking a cell at random578 rng = np.random.RandomState(0)579 for idx, current_hemi in enumerate(hemi_str):580 if current_hemi == 'vol':581 continue582 current_mesh = brain._layered_meshes[current_hemi]._polydata583 cell_id = rng.randint(0, current_mesh.n_cells)584 test_picker = TstVTKPicker(585 current_mesh, cell_id, current_hemi, brain)586 assert len(brain.picked_patches[current_hemi]) == 0587 brain._on_pick(test_picker, None)588 assert len(brain.picked_patches[current_hemi]) == 1589 for label_id in list(brain.picked_patches[current_hemi]):590 label = brain._annotation_labels[current_hemi][label_id]591 assert isinstance(label._line, Line2D)592 brain.widgets["extract_mode"].set_value('mean')593 brain.clear_glyphs()594 assert len(brain.picked_patches[current_hemi]) == 0595 brain._on_pick(test_picker, None) # picked and added596 assert len(brain.picked_patches[current_hemi]) == 1597 brain._on_pick(test_picker, None) # picked again so removed598 assert len(brain.picked_patches[current_hemi]) == 0599 # test switching from 'label' to 'vertex'600 brain.widgets["annotation"].set_value('None')601 brain.widgets["extract_mode"].set_value('max')602 else: # volume603 assert "annotation" not in brain.widgets604 assert "extract_mode" not in brain.widgets605 brain.close()606 # test colormap607 if src != 'vector':608 brain = _create_testing_brain(609 hemi=hemi, surf='white', src=src, show_traces=0.5,610 initial_time=0,611 volume_options=None, # for speed, don't upsample612 n_time=1 if src == 'mixed' else 5, diverging=True,613 add_data_kwargs=dict(colorbar_kwargs=dict(n_labels=3)),614 )615 # mne_analyze should be chosen616 ctab = brain._data['ctable']617 assert_array_equal(ctab[0], [0, 255, 255, 255]) # opaque cyan618 assert_array_equal(ctab[-1], [255, 255, 0, 255]) # opaque yellow619 assert_allclose(ctab[len(ctab) // 2], [128, 128, 128, 0], atol=3)620 brain.close()621 # vertex traces622 brain = _create_testing_brain(623 hemi=hemi, surf='white', src=src, show_traces=0.5,624 initial_time=0,625 volume_options=None, # for speed, don't upsample626 n_time=1 if src == 'mixed' else 5,627 add_data_kwargs=dict(colorbar_kwargs=dict(n_labels=3)),628 )629 assert brain.show_traces630 assert brain.traces_mode == 'vertex'631 assert hasattr(brain, "picked_points")632 assert hasattr(brain, "_spheres")633 assert brain._scalar_bar.GetNumberOfLabels() == 3634 # add foci should work for volumes635 brain.add_foci([[0, 0, 0]], hemi='lh' if src == 'surface' else 'vol')636 # test points picked by default637 picked_points = brain.get_picked_points()638 spheres = brain._spheres639 for current_hemi in hemi_str:640 assert len(picked_points[current_hemi]) == 1641 n_spheres = len(hemi_str)642 n_actors = n_spheres643 if hemi == 'split' and src in ('mixed', 'volume'):644 n_spheres += 1645 assert len(spheres) == n_spheres646 # test that there are actually enough actors647 assert len(brain._actors['data']) == n_actors648 # test switching from 'vertex' to 'label'649 if src == 'surface':650 brain.widgets["annotation"].set_value('aparc')651 brain.widgets["annotation"].set_value('None')652 # test removing points653 brain.clear_glyphs()654 assert len(spheres) == 0655 for key in ('lh', 'rh', 'vol'):656 assert len(picked_points[key]) == 0657 # test picking a cell at random658 rng = np.random.RandomState(0)659 for idx, current_hemi in enumerate(hemi_str):660 assert len(spheres) == 0661 if current_hemi == 'vol':662 current_mesh = brain._data['vol']['grid']663 vertices = brain._data['vol']['vertices']664 values = _cell_data(current_mesh)['values'][vertices]665 cell_id = vertices[np.argmax(np.abs(values))]666 else:667 current_mesh = brain._layered_meshes[current_hemi]._polydata668 cell_id = rng.randint(0, current_mesh.n_cells)669 test_picker = TstVTKPicker(None, None, current_hemi, brain)670 assert brain._on_pick(test_picker, None) is None671 test_picker = TstVTKPicker(672 current_mesh, cell_id, current_hemi, brain)673 assert cell_id == test_picker.cell_id674 assert test_picker.point_id is None675 brain._on_pick(test_picker, None)676 brain._on_pick(test_picker, None)677 assert test_picker.point_id is not None678 assert len(picked_points[current_hemi]) == 1679 assert picked_points[current_hemi][0] == test_picker.point_id680 assert len(spheres) > 0681 sphere = spheres[-1]682 vertex_id = sphere._vertex_id683 assert vertex_id == test_picker.point_id684 line = sphere._line685 hemi_prefix = current_hemi[0].upper()686 if current_hemi == 'vol':687 assert hemi_prefix + ':' in line.get_label()688 assert 'MNI' in line.get_label()689 continue # the MNI conversion is more complex690 hemi_int = 0 if current_hemi == 'lh' else 1691 mni = vertex_to_mni(692 vertices=vertex_id,693 hemis=hemi_int,694 subject=brain._subject_id,695 subjects_dir=brain._subjects_dir696 )697 label = "{}:{} MNI: {}".format(698 hemi_prefix, str(vertex_id).ljust(6),699 ', '.join('%5.1f' % m for m in mni))700 assert line.get_label() == label701 # remove the sphere by clicking in its vicinity702 old_len = len(spheres)703 test_picker._actors = sum((s._actors for s in spheres), [])704 brain._on_pick(test_picker, None)705 assert len(spheres) < old_len706 screenshot = brain.screenshot()707 screenshot_all = brain.screenshot(time_viewer=True)708 assert screenshot.shape[0] < screenshot_all.shape[0]709 # and the scraper for it (will close the instance)710 # only test one condition to save time711 if not (hemi == 'rh' and src == 'surface' and712 check_version('sphinx_gallery')):713 brain.close()714 return715 fnames = [str(tmp_path / f'temp_{ii}.png') for ii in range(2)]716 block_vars = dict(image_path_iterator=iter(fnames),717 example_globals=dict(brain=brain))718 block = ('code', """719something720# brain.save_movie(time_dilation=1, framerate=1,721# interpolation='linear', time_viewer=True)722#723""", 1)724 gallery_conf = dict(src_dir=str(tmp_path), compress_images=[])725 scraper = _BrainScraper()726 rst = scraper(block, block_vars, gallery_conf)727 assert brain.plotter is None # closed728 gif_0 = fnames[0][:-3] + 'gif'729 for fname in (gif_0, fnames[1]):730 assert op.basename(fname) in rst731 assert op.isfile(fname)732 img = image.imread(fname)733 assert img.shape[1] == screenshot.shape[1] # same width734 assert img.shape[0] > screenshot.shape[0] # larger height735 assert img.shape[:2] == screenshot_all.shape[:2]736@testing.requires_testing_data737@pytest.mark.slowtest738def test_brain_linkviewer(renderer_interactive_pyvistaqt, brain_gc):739 """Test _LinkViewer primitives."""740 brain1 = _create_testing_brain(hemi='lh', show_traces=False)741 brain2 = _create_testing_brain(hemi='lh', show_traces='separate')742 brain1._times = brain1._times * 2743 with pytest.warns(RuntimeWarning, match='linking time'):744 link_viewer = _LinkViewer(745 [brain1, brain2],746 time=True,747 camera=False,748 colorbar=False,749 picking=False,750 )751 brain1.close()752 brain_data = _create_testing_brain(hemi='split', show_traces='vertex')753 link_viewer = _LinkViewer(754 [brain2, brain_data],755 time=True,756 camera=True,757 colorbar=True,758 picking=True,759 )760 link_viewer.leader.set_time_point(0)761 link_viewer.leader.mpl_canvas.time_func(0)762 link_viewer.leader.callbacks["fmin"](0)763 link_viewer.leader.callbacks["fmid"](0.5)764 link_viewer.leader.callbacks["fmax"](1)765 link_viewer.leader.set_playback_speed(0.1)766 link_viewer.leader.toggle_playback()767 brain2.close()768 brain_data.close()769def test_calculate_lut():770 """Test brain's colormap functions."""771 colormap = "coolwarm"772 alpha = 1.0773 fmin = 0.0774 fmid = 0.5775 fmax = 1.0776 center = None777 calculate_lut(colormap, alpha=alpha, fmin=fmin,778 fmid=fmid, fmax=fmax, center=center)779 center = 0.0780 colormap = cm.get_cmap(colormap)781 calculate_lut(colormap, alpha=alpha, fmin=fmin,782 fmid=fmid, fmax=fmax, center=center)783 cmap = cm.get_cmap(colormap)784 zero_alpha = np.array([1., 1., 1., 0])785 half_alpha = np.array([1., 1., 1., 0.5])786 atol = 1.5 / 256.787 # fmin < fmid < fmax788 lut = calculate_lut(colormap, alpha, 1, 2, 3)789 assert lut.shape == (256, 4)790 assert_allclose(lut[0], cmap(0) * zero_alpha, atol=atol)791 assert_allclose(lut[127], cmap(0.5), atol=atol)792 assert_allclose(lut[-1], cmap(1.), atol=atol)793 # divergent794 lut = calculate_lut(colormap, alpha, 0, 1, 2, 0)795 assert lut.shape == (256, 4)796 assert_allclose(lut[0], cmap(0), atol=atol)797 assert_allclose(lut[63], cmap(0.25), atol=atol)798 assert_allclose(lut[127], cmap(0.5) * zero_alpha, atol=atol)799 assert_allclose(lut[192], cmap(0.75), atol=atol)800 assert_allclose(lut[-1], cmap(1.), atol=atol)801 # fmin == fmid == fmax802 lut = calculate_lut(colormap, alpha, 1, 1, 1)803 zero_alpha = np.array([1., 1., 1., 0])804 assert lut.shape == (256, 4)805 assert_allclose(lut[0], cmap(0) * zero_alpha, atol=atol)806 assert_allclose(lut[1], cmap(0.5), atol=atol)807 assert_allclose(lut[-1], cmap(1.), atol=atol)808 # divergent809 lut = calculate_lut(colormap, alpha, 0, 0, 0, 0)810 assert lut.shape == (256, 4)811 assert_allclose(lut[0], cmap(0), atol=atol)812 assert_allclose(lut[127], cmap(0.5) * zero_alpha, atol=atol)813 assert_allclose(lut[-1], cmap(1.), atol=atol)814 # fmin == fmid < fmax815 lut = calculate_lut(colormap, alpha, 1, 1, 2)816 assert lut.shape == (256, 4)817 assert_allclose(lut[0], cmap(0.) * zero_alpha, atol=atol)818 assert_allclose(lut[1], cmap(0.5), atol=atol)819 assert_allclose(lut[-1], cmap(1.), atol=atol)820 # divergent821 lut = calculate_lut(colormap, alpha, 1, 1, 2, 0)822 assert lut.shape == (256, 4)823 assert_allclose(lut[0], cmap(0), atol=atol)824 assert_allclose(lut[62], cmap(0.245), atol=atol)825 assert_allclose(lut[64], cmap(0.5) * zero_alpha, atol=atol)826 assert_allclose(lut[127], cmap(0.5) * zero_alpha, atol=atol)827 assert_allclose(lut[191], cmap(0.5) * zero_alpha, atol=atol)828 assert_allclose(lut[193], cmap(0.755), atol=atol)829 assert_allclose(lut[-1], cmap(1.), atol=atol)830 lut = calculate_lut(colormap, alpha, 0, 0, 1, 0)831 assert lut.shape == (256, 4)832 assert_allclose(lut[0], cmap(0), atol=atol)833 assert_allclose(lut[126], cmap(0.25), atol=atol)834 assert_allclose(lut[127], cmap(0.5) * zero_alpha, atol=atol)835 assert_allclose(lut[129], cmap(0.75), atol=atol)836 assert_allclose(lut[-1], cmap(1.), atol=atol)837 # fmin < fmid == fmax838 lut = calculate_lut(colormap, alpha, 1, 2, 2)839 assert lut.shape == (256, 4)840 assert_allclose(lut[0], cmap(0) * zero_alpha, atol=atol)841 assert_allclose(lut[-2], cmap(0.5), atol=atol)842 assert_allclose(lut[-1], cmap(1.), atol=atol)843 # divergent844 lut = calculate_lut(colormap, alpha, 1, 2, 2, 0)845 assert lut.shape == (256, 4)846 assert_allclose(lut[0], cmap(0), atol=atol)847 assert_allclose(lut[1], cmap(0.25), atol=2 * atol)848 assert_allclose(lut[32], cmap(0.375) * half_alpha, atol=atol)849 assert_allclose(lut[64], cmap(0.5) * zero_alpha, atol=atol)850 assert_allclose(lut[127], cmap(0.5) * zero_alpha, atol=atol)851 assert_allclose(lut[191], cmap(0.5) * zero_alpha, atol=atol)852 assert_allclose(lut[223], cmap(0.625) * half_alpha, atol=atol)853 assert_allclose(lut[-2], cmap(0.7475), atol=2 * atol)854 assert_allclose(lut[-1], cmap(1.), atol=2 * atol)855 lut = calculate_lut(colormap, alpha, 0, 1, 1, 0)856 assert lut.shape == (256, 4)857 assert_allclose(lut[0], cmap(0), atol=atol)858 assert_allclose(lut[1], cmap(0.25), atol=2 * atol)859 assert_allclose(lut[64], cmap(0.375) * half_alpha, atol=atol)860 assert_allclose(lut[127], cmap(0.5) * zero_alpha, atol=atol)861 assert_allclose(lut[191], cmap(0.625) * half_alpha, atol=atol)862 assert_allclose(lut[-2], cmap(0.75), atol=2 * atol)863 assert_allclose(lut[-1], cmap(1.), atol=atol)864 with pytest.raises(ValueError, match=r'.*fmin \(1\) <= fmid \(0\) <= fma'):865 calculate_lut(colormap, alpha, 1, 0, 2)866def _create_testing_brain(hemi, surf='inflated', src='surface',867 size=300, n_time=5, diverging=False, **kwargs):868 assert src in ('surface', 'vector', 'mixed', 'volume')869 meth = 'plot'870 if src in ('surface', 'mixed'):871 sample_src = read_source_spaces(src_fname)872 klass = MixedSourceEstimate if src == 'mixed' else SourceEstimate873 if src == 'vector':874 fwd = read_forward_solution(fname_fwd)875 fwd = pick_types_forward(fwd, meg=True, eeg=False)876 evoked = read_evokeds(fname_evoked, baseline=(None, 0))[0]877 noise_cov = read_cov(fname_cov)878 free = make_inverse_operator(879 evoked.info, fwd, noise_cov, loose=1.)880 stc = apply_inverse(evoked, free, pick_ori='vector')881 return stc.plot(882 subject=subject_id, hemi=hemi, size=size,883 subjects_dir=subjects_dir, colormap='auto',884 **kwargs)885 if src in ('volume', 'mixed'):886 vol_src = setup_volume_source_space(887 subject_id, 7., mri='aseg.mgz',888 volume_label='Left-Cerebellum-Cortex',889 subjects_dir=subjects_dir, add_interpolator=False)890 assert len(vol_src) == 1891 assert vol_src[0]['nuse'] == 150892 if src == 'mixed':893 sample_src = sample_src + vol_src894 else:895 sample_src = vol_src896 klass = VolSourceEstimate897 meth = 'plot_3d'898 assert sample_src.kind == src899 # dense version900 rng = np.random.RandomState(0)901 vertices = [s['vertno'] for s in sample_src]902 n_verts = sum(len(v) for v in vertices)903 stc_data = np.zeros((n_verts * n_time))904 stc_size = stc_data.size905 stc_data[(rng.rand(stc_size // 20) * stc_size).astype(int)] = \906 rng.rand(stc_data.size // 20)907 stc_data.shape = (n_verts, n_time)908 if diverging:909 stc_data -= 0.5910 stc = klass(stc_data, vertices, 1, 1)911 clim = dict(kind='value', lims=[0.1, 0.2, 0.3])912 if diverging:913 clim['pos_lims'] = clim.pop('lims')914 brain_data = getattr(stc, meth)(915 subject=subject_id, hemi=hemi, surface=surf, size=size,916 subjects_dir=subjects_dir, colormap='auto',917 clim=clim, src=sample_src,918 **kwargs)...

Full Screen

Full Screen

validate_best_model.py

Source:validate_best_model.py Github

copy

Full Screen

1import pandas as pd2import numpy as np3from sklearn.neighbors import KNeighborsClassifier4import matplotlib.pyplot as plt5import seaborn as sns6from sklearn.dummy import DummyClassifier7from util import train_test_model, print_results_cv, read_dataset, input_data, rescale_data8from sklearn.model_selection import GroupKFold, cross_val_score, train_test_split9from sklearn.naive_bayes import GaussianNB10from sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier, BaggingClassifier11from sklearn.tree import DecisionTreeClassifier12import time13from sklearn.manifold import TSNE14from sklearn.feature_selection import RFE, RFECV15from sklearn.feature_selection import SelectKBest16from sklearn.feature_selection import mutual_info_classif17from imblearn.under_sampling import RandomUnderSampler18# Calculando correlação e criando figura19def calc_corr_fig(standardized_values):20 plt.figure(figsize=(17, 15))21 matrix_correlation = standardized_values.corr()22 sns.heatmap(matrix_correlation, annot=True, fmt=".1f")23 plt.show()24# Visualizar em 2 dimensões os dados para facilitar o direcionamento da análise25def tsne_scatterplot(x_without_corr_feat, y):26 tsne = TSNE(n_components=2)27 tsne_x = tsne.fit_transform(x_without_corr_feat)28 sns.scatterplot(x=tsne_x[:, 0], y=tsne_x[:, 1], hue=y)29 plt.show()30def select_features(model, train_x, train_y, picker, test_x=None):31 picker.fit(train_x, train_y)32 train_picker = picker.transform(train_x)33 test_picker = None34 if test_x is not None:35 test_picker = picker.transform(test_x)36 return {'train_picker': train_picker, 'test_picker': test_picker, 'picker': picker}37def validate_models_holdout(train_x, train_y, test_x, test_y, models, k_size):38 for model in models:39 print("Imprimindo resultados da abordagem holdout para o %s" %40 model.__class__)41 t0 = time.time()42 model = train_test_model(model, train_x, train_y, test_x, test_y)43 # Se o classificador não possuir lista de importância de características, seleciona-se por métrica de filtro44 try:45 picker = select_features(model, train_x, train_y, RFE(46 estimator=model, n_features_to_select=k_size, step=1), test_x)47 except:48 picker = select_features(model, train_x, train_y, SelectKBest(49 mutual_info_classif, k=k_size), test_x)50 print("Selecionando-se as características com %s" % picker['picker'])51 train_test_model(model, picker['train_picker'],52 train_y, picker['test_picker'], test_y)53 print("Tempo do modelo %s: %d" %54 (model.__class__, round(time.time()-t0, 3)))55def validate_models_cv(x, y, random_groups, models, k_size):56 for model in models:57 print("Imprimindo resultados da abordagem por validação cruzada para o %s" %58 model.__class__)59 cv = GroupKFold(n_splits=5)60 t0 = time.time()61 results = cross_val_score(62 model, x, y, cv=cv, groups=random_groups, scoring='f1_micro')63 print_results_cv(results)64 try:65 picker = select_features(model, x, y, RFECV(estimator=model, cv=5, step=1, scoring="f1_micro"), None)66 except:67 picker = select_features(model, x, y, SelectKBest(68 mutual_info_classif, k=k_size), None)69 print("Selecionando-se as características com %s" % picker['picker'])70 results = cross_val_score(71 model, picker['train_picker'], y, cv=cv, groups=random_groups, scoring='f1_micro')72 print("Tempo do modelo %s: %d" %73 (model.__class__, round(time.time()-t0, 3)))74 print_results_cv(results)75def main():76 dataset = read_dataset("https://raw.githubusercontent.com/dataminerdbm/test_data_scientist/main/treino.csv")77 dataset.replace(to_replace=[None], value=np.nan, inplace=True)78 raw_dataset_values = dataset.drop(columns=['inadimplente'])79 transformed_values = input_data(raw_dataset_values)80 standardized_values = rescale_data(transformed_values, raw_dataset_values)81 # calc_corr_fig(standardized_values)82 x = standardized_values83 # Remove-se as demais características correlacionadas, mantendo-se apenas uma84 x_without_corr_feat = standardized_values.drop(85 columns=['vezes_passou_de_30_59_dias', 'numero_de_vezes_que_passou_60_89_dias'])86 y = dataset.inadimplente87 SEED = 770788 np.random.seed(SEED)89 # Realiza-se a estratificação dos dados tendo em vista o desbalanceamento da base90 train_x, test_x, train_y, test_y = train_test_split(91 x, y, test_size=0.3, stratify=y)92 train_x_without_corr_feat, test_x_without_corr_feat, train_y_without_corr_feat, test_y_without_corr_feat = train_test_split(93 x_without_corr_feat, y, test_size=0.3, stratify=y)94 undersample = RandomUnderSampler(sampling_strategy='majority')95 X_without_corr_feat_under, y_without_corr_feat_under = undersample.fit_resample(x_without_corr_feat, y)96 x_under, y_under = undersample.fit_resample(x, y)97 train_x_under, train_y_under = undersample.fit_resample(train_x, train_y)98 train_x_without_corr_feat_under, train_y_without_corr_feat_under = undersample.fit_resample(train_x_without_corr_feat, train_y_without_corr_feat)99 #tsne_scatterplot(x_without_corr_feat, y)100 # Os classificadores validados foram escolhidos de acordo com o aspecto da base de dados:101 # características numéricas, multidimensional com alto número de instâncias e problema não linearmente separável102 models = [DummyClassifier(), KNeighborsClassifier(), DecisionTreeClassifier(),103 GaussianNB(), AdaBoostClassifier(n_estimators=100), RandomForestClassifier(),104 BaggingClassifier(base_estimator=GaussianNB(), n_estimators=100)]105 k_size = 5106 # Criando aleatoridade nos grupos de folds (para evitar repetição). Abordagem mais adequada para bases desbalanceadas107 # https://scikit-learn.org/stable/modules/generated/sklearn.model_selection.GroupKFold.html#sklearn.model_selection.GroupKFold108 x_under['idade_r'] = x_under.idade + np.random.randint(-2, 3, size=14662)109 x_under.idade_r = x_under.idade + abs(x_under.idade.min()) + 1110 print("Validando modelos com todas as características")111 validate_models_cv(x_under, y_under, x_under.idade_r, models, k_size)112 validate_models_holdout(train_x_under, train_y_under, test_x, test_y, models, k_size)113 print("Validando modelos sem as características correlacionadas")114 validate_models_cv(X_without_corr_feat_under, y_without_corr_feat_under, x_under.idade_r, models, k_size)115 validate_models_holdout(train_x_without_corr_feat_under, train_y_without_corr_feat_under,116 test_x_without_corr_feat, test_y_without_corr_feat, models, k_size)117if __name__ == "__main__":...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run Molotov automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful