How to use freemem method in autotest

Best Python code snippet using autotest_python

test_memory.py

Source:test_memory.py Github

copy

Full Screen

...17# The GC need to be enabled for those tests to work correctly.18if not getattr(mode_with_gpu.linker, 'allow_gc', False):19 mode_with_gpu.linker = copy.copy(mode_with_gpu.linker)20 mode_with_gpu.linker.allow_gc = True21def freemem(extra_alloc=0):22 """23 Return the free memory on the gpu in megabytes.24 """25 gc.collect()26 gc.collect()27 gc.collect()28 n_mallocs = cuda.cuda_ndarray.cuda_ndarray.outstanding_mallocs()29 if hasattr(cuda.cuda_ndarray.cuda_ndarray, "theano_allocated"):30 theano_alloc = cuda.cuda_ndarray.cuda_ndarray.theano_allocated()31 return ("(n malloc/theano mem allocated in KB)",32 n_mallocs + extra_alloc,33 int(theano_alloc / 1024))34 return ("n malloc on the gpu", n_mallocs + extra_alloc)35 # I don't use the following by default as if there is other stuff running36 # on the GPU, this won't work.37 mem_info = cuda.cuda_ndarray.cuda_ndarray.mem_info()38 gpu_used = (mem_info[1] - mem_info[0]) / 1024 ** 239 mem_info_msg = "(n malloc/gpu mem used in MB)"40 return (mem_info_msg, n_mallocs, int(gpu_used))41def test_memory():42 """43 We test that we do not keep link to memory between Theano function call44 and during Theano compilation45 The origin of this code come from Aaron Vandenoord and Sander Dieleman.46 I have their autorisation to put this in Theano with the Theano license.47 note::48 This test can fail if there is other process running on the gpu.49 """50 shapes = (200, 100)51 # more_alloc1 was different for each dtype in the past.52 # more_alloc2 is still currently not the same for both dtype.53 # when dtype is float32, the computation is done on the gpu.54 # This insert constant on the gpu during compilation55 # that raise the number of alloc.56 # When dtype is float64, only the shared is on the gpu and it is transferd57 # to the cpu for computation. So no extra alloc after compilation.58 # more_alloc1 if after the first compilation, more_alloc2 after the second.59 for dtype, more_alloc1, more_alloc2 in [("float32", 0, 3),60 ("float64", 0, 0)]:61 print(dtype)62 test_params = np.asarray(np.random.randn(np.prod(shapes)), dtype)63 some_vector = tensor.vector('some_vector', dtype=dtype)64 some_matrix = some_vector.reshape(shapes)65 mem1 = freemem()66 print("Before shared variable", mem1)67 variables = cuda.shared_constructor(np.ones((shapes[1],),68 dtype='float32'))69 derp = tensor.sum(tensor.dot(some_matrix[:shapes[0]], variables))70 print("Shared took ",71 np.prod(variables.get_value(72 borrow=True,73 return_internal_type=True).shape) *74 4 / 1024,75 "kB")76 mem2 = freemem()77 print("Before compilation", mem2)78 mem2_1 = freemem(extra_alloc=more_alloc1)79 mem2_2 = freemem(extra_alloc=more_alloc2)80 obj = theano.function([some_vector], derp, mode=mode_with_gpu)81 mem3 = freemem()82 print("After function compilation 1", mem3)83 assert mem2_1 == mem3, (mem2_1, mem3, dtype)84 grad_derp = tensor.grad(derp, some_vector)85 grad = theano.function([some_vector], grad_derp, mode=mode_with_gpu)86 mem4 = freemem()87 print("After function compilation 2", mem4)88 assert mem2_2 == mem4, (mem2_2, mem4, dtype)89 for i in range(3):90 obj(test_params)91 print("After function evaluation 1", freemem())92 assert mem2_2 == freemem(), (mem2_2, freemem())93 grad(test_params)94 print("After function evaluation 2", freemem())95 assert mem2_2 == freemem(), (mem2_2, freemem())96 del obj97 # print "After deleting function 1", freemem()98 # assert mem2 == freemem(), (mem2, freemem())99 del grad100 print("After deleting function 2", freemem())101 assert mem2 == freemem(), (mem2, freemem())102 del derp, variables, grad_derp103 print("After deleting shared variable and ref to it", freemem())104 assert mem1 == freemem(), (mem1, freemem())105@theano.configparser.change_flags(**{'vm.lazy': True})106def test_memory_lazy():107 """As test_memory, but with the ifelse op.108 We need to test it as the ifelse op with the [c]vm create op not109 executed in the graph. This mess with [c]vm gc implementation.110 """111 shapes = (50, 100)112 # more_alloc1 is not the same for both dtype.113 # when dtype is float32, the computation is done on the gpu.114 # This insert constant on the gpu during compilation115 # that raise the number of alloc.116 # When dtype is float64, only the shared is on the gpu and it is transferd117 # to the cpu for computation. So no extra alloc after compilation.118 # more_alloc1 if after the first compilation119 for dtype, more_alloc1 in [("float32", 1),120 ("float64", 0)]:121 print(dtype)122 test_params = np.asarray(np.random.randn(np.prod(shapes)), dtype)123 some_vector = tensor.vector('some_vector', dtype=dtype)124 some_matrix = some_vector.reshape(shapes)125 branch_select = tensor.iscalar()126 mem1 = freemem()127 print("Before shared variable", mem1)128 variables = cuda.shared_constructor(np.ones((shapes[1],),129 dtype='float32'))130 derp = tensor.sum(tensor.dot(some_matrix[:shapes[0]], variables))131 derp = ifelse.IfElse(1)(branch_select,132 derp, some_matrix[:shapes[0]].sum())133 derp += 1134 print("Shared took ",135 np.prod(variables.get_value(136 borrow=True,137 return_internal_type=True).shape) *138 4 / 1024,139 "kB")140 mem2 = freemem()141 print("Before compilation", mem2)142 mem2_1 = freemem(extra_alloc=more_alloc1)143 obj = theano.function([some_vector, branch_select], derp,144 mode=mode_with_gpu)145 # theano.printing.debugprint(obj, print_type=True)146 mem3 = freemem()147 print("After function compilation 1", mem3)148 assert mem2_1 == mem3, (mem2_1, mem3)149 for i in range(3):150 obj(test_params, 1)151 print("After function evaluation branch true", freemem())152 assert mem2_1 == freemem(), (mem2_1, freemem())153 obj(test_params, 0)154 print("After function evaluation branch false", freemem())155 assert mem2_1 == freemem(), (mem2_1, freemem())156 del obj157 print("After deleting function 1", freemem())158 assert mem2 == freemem(), (mem2, freemem())159 del derp, variables160 print("After deleting shared variable and ref to it", freemem())...

Full Screen

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run autotest automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful