How to use Generator method in wpt

Best JavaScript code snippet using wpt

test_multiprocessing.py

Source:test_multiprocessing.py Github

copy

Full Screen

1from __future__ import print_function2import os3import threading4import pytest5import numpy as np6from keras.models import Sequential7from keras.layers.core import Dense8from keras.utils.test_utils import keras_test9from keras.utils import Sequence10STEPS_PER_EPOCH = 10011STEPS = 10012WORKERS = 413class DummySequence(Sequence):14 def __getitem__(self, idx):15 return np.zeros([10, 2]), np.ones([10])16 def __len__(self):17 return 1018@pytest.fixture19def in_tmpdir(tmpdir):20 """Runs a function in a temporary directory.21 Checks that the directory is empty afterwards.22 """23 with tmpdir.as_cwd():24 yield None25 assert not tmpdir.listdir()26@keras_test27def test_multiprocessing_training():28 arr_data = np.random.randint(0, 256, (50, 2))29 arr_labels = np.random.randint(0, 2, 50)30 arr_weights = np.random.random(50)31 def custom_generator(use_weights=False):32 batch_size = 1033 n_samples = 5034 while True:35 batch_index = np.random.randint(0, n_samples - batch_size)36 start = batch_index37 end = start + batch_size38 X = arr_data[start: end]39 y = arr_labels[start: end]40 if use_weights:41 w = arr_weights[start: end]42 yield X, y, w43 else:44 yield X, y45 # Build a NN46 model = Sequential()47 model.add(Dense(1, input_shape=(2, )))48 model.compile(loss='mse', optimizer='adadelta')49 # - Produce data on 4 worker processes, consume on main process:50 # - Each worker process runs OWN copy of generator51 # - BUT on Windows, `multiprocessing` won't marshall generators across52 # process boundaries -> make sure `fit_generator()` raises ValueError53 # exception and does not attempt to run the generator.54 if os.name is 'nt':55 with pytest.raises(ValueError):56 model.fit_generator(custom_generator(),57 steps_per_epoch=STEPS_PER_EPOCH,58 epochs=1,59 verbose=1,60 validation_steps=None,61 max_queue_size=10,62 workers=WORKERS,63 use_multiprocessing=True)64 else:65 model.fit_generator(custom_generator(),66 steps_per_epoch=STEPS_PER_EPOCH,67 epochs=1,68 verbose=1,69 validation_steps=None,70 max_queue_size=10,71 workers=WORKERS,72 use_multiprocessing=True)73 # - Produce data on 4 worker threads, consume on main thread:74 # - All worker threads share the SAME generator75 model.fit_generator(custom_generator(),76 steps_per_epoch=STEPS_PER_EPOCH,77 epochs=1,78 verbose=1,79 validation_steps=None,80 max_queue_size=10,81 workers=WORKERS,82 use_multiprocessing=False)83 # - Produce data on 1 worker process, consume on main process:84 # - Worker process runs generator85 # - BUT on Windows, `multiprocessing` won't marshall generators across86 # process boundaries -> make sure `fit_generator()` raises ValueError87 # exception and does not attempt to run the generator.88 if os.name is 'nt':89 with pytest.raises(ValueError):90 model.fit_generator(custom_generator(True),91 steps_per_epoch=STEPS_PER_EPOCH,92 validation_data=(arr_data[:10],93 arr_labels[:10],94 arr_weights[:10]),95 validation_steps=1,96 max_queue_size=10,97 workers=1,98 use_multiprocessing=True)99 else:100 model.fit_generator(custom_generator(True),101 steps_per_epoch=STEPS_PER_EPOCH,102 validation_data=(arr_data[:10],103 arr_labels[:10],104 arr_weights[:10]),105 validation_steps=1,106 max_queue_size=10,107 workers=1,108 use_multiprocessing=True)109 # - Produce data on 1 worker thread, consume on main thread:110 # - Worker thread is the only thread running the generator111 model.fit_generator(custom_generator(True),112 steps_per_epoch=STEPS_PER_EPOCH,113 validation_data=(arr_data[:10],114 arr_labels[:10],115 arr_weights[:10]),116 validation_steps=1,117 max_queue_size=10,118 workers=1,119 use_multiprocessing=False)120 # - Produce data on 1 worker process, consume on main process:121 # - Worker process runs generator122 # - BUT on Windows, `multiprocessing` won't marshall generators across123 # process boundaries -> make sure `fit_generator()` raises ValueError124 # exception and does not attempt to run the generator.125 if os.name is 'nt':126 with pytest.raises(ValueError):127 model.fit_generator(custom_generator(True),128 steps_per_epoch=STEPS_PER_EPOCH,129 validation_data=custom_generator(True),130 validation_steps=1,131 max_queue_size=10,132 workers=1,133 use_multiprocessing=True)134 else:135 model.fit_generator(custom_generator(True),136 steps_per_epoch=STEPS_PER_EPOCH,137 validation_data=custom_generator(True),138 validation_steps=1,139 max_queue_size=10,140 workers=1,141 use_multiprocessing=True)142 # - Produce data on 1 worker thread AT A TIME, consume on main thread:143 # - Worker threads for training and validation run generator SEQUENTIALLY144 model.fit_generator(custom_generator(True),145 steps_per_epoch=STEPS_PER_EPOCH,146 validation_data=custom_generator(True),147 validation_steps=1,148 max_queue_size=10,149 workers=1,150 use_multiprocessing=False)151 # - Produce and consume data without a queue on main thread152 # - Make sure the value of `use_multiprocessing` is ignored153 model.fit_generator(custom_generator(True),154 steps_per_epoch=STEPS_PER_EPOCH,155 validation_data=custom_generator(True),156 validation_steps=1,157 max_queue_size=10,158 workers=0,159 use_multiprocessing=True)160 model.fit_generator(custom_generator(True),161 steps_per_epoch=STEPS_PER_EPOCH,162 validation_data=custom_generator(True),163 validation_steps=1,164 max_queue_size=10,165 workers=0,166 use_multiprocessing=False)167 # - For Sequence168 model.fit_generator(DummySequence(),169 steps_per_epoch=STEPS_PER_EPOCH,170 validation_data=custom_generator(True),171 validation_steps=1,172 max_queue_size=10,173 workers=0,174 use_multiprocessing=True)175 model.fit_generator(DummySequence(),176 steps_per_epoch=STEPS_PER_EPOCH,177 validation_data=custom_generator(True),178 validation_steps=1,179 max_queue_size=10,180 workers=0,181 use_multiprocessing=False)182 # Test invalid use cases183 def invalid_generator():184 while True:185 yield arr_data[:10], arr_data[:10], arr_labels[:10], arr_labels[:10]186 # not specified `validation_steps`187 with pytest.raises(ValueError):188 model.fit_generator(custom_generator(),189 steps_per_epoch=STEPS_PER_EPOCH,190 validation_data=custom_generator(),191 validation_steps=None,192 max_queue_size=10,193 workers=1,194 use_multiprocessing=False)195 # validation data is neither a tuple nor a triple.196 with pytest.raises(ValueError):197 model.fit_generator(custom_generator(),198 steps_per_epoch=STEPS_PER_EPOCH,199 validation_data=(arr_data[:10],200 arr_data[:10],201 arr_labels[:10],202 arr_weights[:10]),203 validation_steps=1,204 max_queue_size=10,205 workers=1,206 use_multiprocessing=False)207 # validation generator is neither a tuple nor a triple.208 with pytest.raises(ValueError):209 model.fit_generator(custom_generator(),210 steps_per_epoch=STEPS_PER_EPOCH,211 validation_data=invalid_generator(),212 validation_steps=1,213 max_queue_size=10,214 workers=1,215 use_multiprocessing=False)216@keras_test217def test_multiprocessing_training_from_file(in_tmpdir):218 arr_data = np.random.randint(0, 256, (50, 2))219 arr_labels = np.random.randint(0, 2, 50)220 np.savez('data.npz', **{'data': arr_data, 'labels': arr_labels})221 def custom_generator():222 batch_size = 10223 n_samples = 50224 arr = np.load('data.npz')225 while True:226 batch_index = np.random.randint(0, n_samples - batch_size)227 start = batch_index228 end = start + batch_size229 X = arr['data'][start: end]230 y = arr['labels'][start: end]231 yield X, y232 # Build a NN233 model = Sequential()234 model.add(Dense(1, input_shape=(2, )))235 model.compile(loss='mse', optimizer='adadelta')236 # - Produce data on 4 worker processes, consume on main process:237 # - Each worker process runs OWN copy of generator238 # - BUT on Windows, `multiprocessing` won't marshall generators across239 # process boundaries -> make sure `fit_generator()` raises ValueError240 # exception and does not attempt to run the generator.241 if os.name is 'nt':242 with pytest.raises(ValueError):243 model.fit_generator(custom_generator(),244 steps_per_epoch=STEPS_PER_EPOCH,245 epochs=1,246 verbose=1,247 validation_steps=None,248 max_queue_size=10,249 workers=WORKERS,250 use_multiprocessing=True)251 else:252 model.fit_generator(custom_generator(),253 steps_per_epoch=STEPS_PER_EPOCH,254 epochs=1,255 verbose=1,256 validation_steps=None,257 max_queue_size=10,258 workers=WORKERS,259 use_multiprocessing=True)260 # - Produce data on 4 worker threads, consume on main thread:261 # - All worker threads share the SAME generator262 model.fit_generator(custom_generator(),263 steps_per_epoch=STEPS_PER_EPOCH,264 epochs=1,265 verbose=1,266 validation_steps=None,267 max_queue_size=10,268 workers=WORKERS,269 use_multiprocessing=False)270 # - Produce data on 1 worker process, consume on main process:271 # - Worker process runs generator272 # - BUT on Windows, `multiprocessing` won't marshall generators across273 # process boundaries -> make sure `fit_generator()` raises ValueError274 # exception and does not attempt to run the generator.275 if os.name is 'nt':276 with pytest.raises(ValueError):277 model.fit_generator(custom_generator(),278 steps_per_epoch=STEPS_PER_EPOCH,279 epochs=1,280 verbose=1,281 validation_steps=None,282 max_queue_size=10,283 workers=1,284 use_multiprocessing=True)285 else:286 model.fit_generator(custom_generator(),287 steps_per_epoch=STEPS_PER_EPOCH,288 epochs=1,289 verbose=1,290 validation_steps=None,291 max_queue_size=10,292 workers=1,293 use_multiprocessing=True)294 # - Produce data on 1 worker thread, consume on main thread:295 # - Worker thread is the only thread running the generator296 model.fit_generator(custom_generator(),297 steps_per_epoch=STEPS_PER_EPOCH,298 epochs=1,299 verbose=1,300 validation_steps=None,301 max_queue_size=10,302 workers=1,303 use_multiprocessing=False)304 # - Produce and consume data without a queue on main thread305 # - Make sure the value of `use_multiprocessing` is ignored306 model.fit_generator(custom_generator(),307 steps_per_epoch=STEPS_PER_EPOCH,308 epochs=1,309 verbose=1,310 validation_steps=None,311 max_queue_size=10,312 workers=0,313 use_multiprocessing=True)314 model.fit_generator(custom_generator(),315 steps_per_epoch=STEPS_PER_EPOCH,316 epochs=1,317 verbose=1,318 validation_steps=None,319 max_queue_size=10,320 workers=0,321 use_multiprocessing=False)322 os.remove('data.npz')323@keras_test324def test_multiprocessing_predicting():325 arr_data = np.random.randint(0, 256, (50, 2))326 def custom_generator():327 batch_size = 10328 n_samples = 50329 while True:330 batch_index = np.random.randint(0, n_samples - batch_size)331 start = batch_index332 end = start + batch_size333 X = arr_data[start: end]334 yield X335 # Build a NN336 model = Sequential()337 model.add(Dense(1, input_shape=(2, )))338 model.compile(loss='mse', optimizer='adadelta')339 # - Produce data on 4 worker processes, consume on main process:340 # - Each worker process runs OWN copy of generator341 # - BUT on Windows, `multiprocessing` won't marshall generators across342 # process boundaries -> make sure `predict_generator()` raises ValueError343 # exception and does not attempt to run the generator.344 if os.name is 'nt':345 with pytest.raises(ValueError):346 model.predict_generator(custom_generator(),347 steps=STEPS,348 max_queue_size=10,349 workers=WORKERS,350 use_multiprocessing=True)351 else:352 model.predict_generator(custom_generator(),353 steps=STEPS,354 max_queue_size=10,355 workers=WORKERS,356 use_multiprocessing=True)357 # - Produce data on 4 worker threads, consume on main thread:358 # - All worker threads share the SAME generator359 model.predict_generator(custom_generator(),360 steps=STEPS,361 max_queue_size=10,362 workers=WORKERS,363 use_multiprocessing=False)364 # - Produce data on 1 worker process, consume on main process:365 # - Worker process runs generator366 # - BUT on Windows, `multiprocessing` won't marshall generators across367 # process boundaries -> make sure `predict_generator()` raises ValueError368 # exception and does not attempt to run the generator.369 if os.name is 'nt':370 with pytest.raises(ValueError):371 model.predict_generator(custom_generator(),372 steps=STEPS,373 max_queue_size=10,374 workers=1,375 use_multiprocessing=True)376 else:377 model.predict_generator(custom_generator(),378 steps=STEPS,379 max_queue_size=10,380 workers=1,381 use_multiprocessing=True)382 # - Produce data on 1 worker thread, consume on main thread:383 # - Worker thread is the only thread running the generator384 model.predict_generator(custom_generator(),385 steps=STEPS,386 max_queue_size=10,387 workers=1,388 use_multiprocessing=False)389 # - Main thread runs the generator without a queue390 # - Make sure the value of `use_multiprocessing` is ignored391 model.predict_generator(custom_generator(),392 steps=STEPS,393 max_queue_size=10,394 workers=0,395 use_multiprocessing=True)396 model.predict_generator(custom_generator(),397 steps=STEPS,398 max_queue_size=10,399 workers=0,400 use_multiprocessing=False)401@keras_test402def test_multiprocessing_evaluating():403 arr_data = np.random.randint(0, 256, (50, 2))404 arr_labels = np.random.randint(0, 2, 50)405 def custom_generator():406 batch_size = 10407 n_samples = 50408 while True:409 batch_index = np.random.randint(0, n_samples - batch_size)410 start = batch_index411 end = start + batch_size412 X = arr_data[start: end]413 y = arr_labels[start: end]414 yield X, y415 # Build a NN416 model = Sequential()417 model.add(Dense(1, input_shape=(2, )))418 model.compile(loss='mse', optimizer='adadelta')419 # - Produce data on 4 worker processes, consume on main process:420 # - Each worker process runs OWN copy of generator421 # - BUT on Windows, `multiprocessing` won't marshall generators across422 # process boundaries423 # -> make sure `evaluate_generator()` raises raises ValueError424 # exception and does not attempt to run the generator.425 if os.name is 'nt':426 with pytest.raises(ValueError):427 model.evaluate_generator(custom_generator(),428 steps=STEPS,429 max_queue_size=10,430 workers=WORKERS,431 use_multiprocessing=True)432 else:433 model.evaluate_generator(custom_generator(),434 steps=STEPS,435 max_queue_size=10,436 workers=WORKERS,437 use_multiprocessing=True)438 # - Produce data on 4 worker threads, consume on main thread:439 # - All worker threads share the SAME generator440 model.evaluate_generator(custom_generator(),441 steps=STEPS,442 max_queue_size=10,443 workers=WORKERS,444 use_multiprocessing=False)445 # - Produce data on 1 worker process, consume on main process:446 # - Worker process runs generator447 # - BUT on Windows, `multiprocessing` won't marshall generators across448 # process boundaries -> make sure `evaluate_generator()` raises ValueError449 # exception and does not attempt to run the generator.450 if os.name is 'nt':451 with pytest.raises(ValueError):452 model.evaluate_generator(custom_generator(),453 steps=STEPS,454 max_queue_size=10,455 workers=1,456 use_multiprocessing=True)457 else:458 model.evaluate_generator(custom_generator(),459 steps=STEPS,460 max_queue_size=10,461 workers=1,462 use_multiprocessing=True)463 # - Produce data on 1 worker thread, consume on main thread:464 # - Worker thread is the only thread running the generator465 model.evaluate_generator(custom_generator(),466 steps=STEPS,467 max_queue_size=10,468 workers=1,469 use_multiprocessing=False)470 # - Produce and consume data without a queue on main thread471 # - Make sure the value of `use_multiprocessing` is ignored472 model.evaluate_generator(custom_generator(),473 steps=STEPS,474 max_queue_size=10,475 workers=0,476 use_multiprocessing=True)477 model.evaluate_generator(custom_generator(),478 steps=STEPS,479 max_queue_size=10,480 workers=0,481 use_multiprocessing=False)482@keras_test483def test_multiprocessing_fit_error():484 arr_data = np.random.randint(0, 256, (50, 2))485 arr_labels = np.random.randint(0, 2, 50)486 batch_size = 10487 n_samples = 50488 good_batches = 3489 def custom_generator(use_weights=False):490 """Raises an exception after a few good batches"""491 for i in range(good_batches):492 batch_index = np.random.randint(0, n_samples - batch_size)493 start = batch_index494 end = start + batch_size495 X = arr_data[start: end]496 y = arr_labels[start: end]497 yield X, y498 raise RuntimeError499 model = Sequential()500 model.add(Dense(1, input_shape=(2, )))501 model.compile(loss='mse', optimizer='adadelta')502 samples = batch_size * (good_batches + 1)503 # - Produce data on 4 worker processes, consume on main process:504 # - Each worker process runs OWN copy of generator505 # - BUT on Windows, `multiprocessing` won't marshall generators across506 # process boundaries -> make sure `fit_generator()` raises ValueError507 # exception and does not attempt to run the generator.508 # - On other platforms, make sure `RuntimeError` exception bubbles up509 if os.name is 'nt':510 with pytest.raises(ValueError):511 model.fit_generator(custom_generator(),512 steps_per_epoch=samples,513 validation_steps=None,514 max_queue_size=10,515 workers=WORKERS,516 use_multiprocessing=True)517 else:518 with pytest.raises(RuntimeError):519 model.fit_generator(custom_generator(),520 steps_per_epoch=samples,521 validation_steps=None,522 max_queue_size=10,523 workers=WORKERS,524 use_multiprocessing=True)525 # - Produce data on 4 worker threads, consume on main thread:526 # - All worker threads share the SAME generator527 # - Make sure `RuntimeError` exception bubbles up528 with pytest.raises(RuntimeError):529 model.fit_generator(custom_generator(),530 steps_per_epoch=samples,531 validation_steps=None,532 max_queue_size=10,533 workers=WORKERS,534 use_multiprocessing=False)535 # - Produce data on 1 worker process, consume on main process:536 # - Worker process runs generator537 # - BUT on Windows, `multiprocessing` won't marshall generators across538 # process boundaries -> make sure `fit_generator()` raises ValueError539 # exception and does not attempt to run the generator.540 # - On other platforms, make sure `RuntimeError` exception bubbles up541 if os.name is 'nt':542 with pytest.raises(ValueError):543 model.fit_generator(custom_generator(),544 steps_per_epoch=samples,545 validation_steps=None,546 max_queue_size=10,547 workers=1,548 use_multiprocessing=True)549 else:550 with pytest.raises(RuntimeError):551 model.fit_generator(custom_generator(),552 steps_per_epoch=samples,553 validation_steps=None,554 max_queue_size=10,555 workers=1,556 use_multiprocessing=True)557 # - Produce data on 1 worker thread, consume on main thread:558 # - Worker thread is the only thread running the generator559 # - Make sure `RuntimeError` exception bubbles up560 with pytest.raises(RuntimeError):561 model.fit_generator(custom_generator(),562 steps_per_epoch=samples,563 validation_steps=None,564 max_queue_size=10,565 workers=1,566 use_multiprocessing=False)567 # - Produce and consume data without a queue on main thread568 # - Make sure the value of `use_multiprocessing` is ignored569 # - Make sure `RuntimeError` exception bubbles up570 with pytest.raises(RuntimeError):571 model.fit_generator(custom_generator(),572 steps_per_epoch=samples,573 validation_steps=None,574 max_queue_size=10,575 workers=0,576 use_multiprocessing=True)577 with pytest.raises(RuntimeError):578 model.fit_generator(custom_generator(),579 steps_per_epoch=samples,580 validation_steps=None,581 max_queue_size=10,582 workers=0,583 use_multiprocessing=False)584@keras_test585def test_multiprocessing_evaluate_error():586 arr_data = np.random.randint(0, 256, (50, 2))587 arr_labels = np.random.randint(0, 2, 50)588 batch_size = 10589 n_samples = 50590 good_batches = 3591 def custom_generator():592 """Raises an exception after a few good batches"""593 for i in range(good_batches):594 batch_index = np.random.randint(0, n_samples - batch_size)595 start = batch_index596 end = start + batch_size597 X = arr_data[start: end]598 y = arr_labels[start: end]599 yield X, y600 raise RuntimeError601 model = Sequential()602 model.add(Dense(1, input_shape=(2, )))603 model.compile(loss='mse', optimizer='adadelta')604 # - Produce data on 4 worker processes, consume on main process:605 # - Each worker process runs OWN copy of generator606 # - BUT on Windows, `multiprocessing` won't marshall generators across607 # process boundaries -> make sure `evaluate_generator()` raises ValueError608 # exception and does not attempt to run the generator.609 # - On other platforms, make sure `RuntimeError` exception bubbles up610 if os.name is 'nt':611 with pytest.raises(ValueError):612 model.evaluate_generator(custom_generator(),613 steps=good_batches * WORKERS + 1,614 max_queue_size=10,615 workers=WORKERS,616 use_multiprocessing=True)617 else:618 with pytest.raises(RuntimeError):619 model.evaluate_generator(custom_generator(),620 steps=good_batches * WORKERS + 1,621 max_queue_size=10,622 workers=WORKERS,623 use_multiprocessing=True)624 # - Produce data on 4 worker threads, consume on main thread:625 # - All worker threads share the SAME generator626 # - Make sure `RuntimeError` exception bubbles up627 with pytest.raises(RuntimeError):628 model.evaluate_generator(custom_generator(),629 steps=good_batches * WORKERS + 1,630 max_queue_size=10,631 workers=WORKERS,632 use_multiprocessing=False)633 # - Produce data on 1 worker process, consume on main process:634 # - Worker process runs generator635 # - BUT on Windows, `multiprocessing` won't marshall generators across636 # process boundaries -> make sure `evaluate_generator()` raises ValueError637 # exception and does not attempt to run the generator.638 # - On other platforms, make sure `RuntimeError` exception bubbles up639 if os.name is 'nt':640 with pytest.raises(ValueError):641 model.evaluate_generator(custom_generator(),642 steps=good_batches + 1,643 max_queue_size=10,644 workers=1,645 use_multiprocessing=True)646 else:647 with pytest.raises(RuntimeError):648 model.evaluate_generator(custom_generator(),649 steps=good_batches + 1,650 max_queue_size=10,651 workers=1,652 use_multiprocessing=True)653 # - Produce data on 1 worker thread, consume on main thread:654 # - Worker thread is the only thread running the generator655 # - Make sure `RuntimeError` exception bubbles up656 with pytest.raises(RuntimeError):657 model.evaluate_generator(custom_generator(),658 steps=good_batches + 1,659 max_queue_size=10,660 workers=1,661 use_multiprocessing=False)662 # - Produce and consume data without a queue on main thread663 # - Make sure the value of `use_multiprocessing` is ignored664 # - Make sure `RuntimeError` exception bubbles up665 with pytest.raises(RuntimeError):666 model.evaluate_generator(custom_generator(),667 steps=good_batches + 1,668 max_queue_size=10,669 workers=0,670 use_multiprocessing=True)671 with pytest.raises(RuntimeError):672 model.evaluate_generator(custom_generator(),673 steps=good_batches + 1,674 max_queue_size=10,675 workers=0,676 use_multiprocessing=False)677@keras_test678def test_multiprocessing_predict_error():679 arr_data = np.random.randint(0, 256, (50, 2))680 good_batches = 3681 def custom_generator():682 """Raises an exception after a few good batches"""683 batch_size = 10684 n_samples = 50685 for i in range(good_batches):686 batch_index = np.random.randint(0, n_samples - batch_size)687 start = batch_index688 end = start + batch_size689 X = arr_data[start: end]690 yield X691 raise RuntimeError692 model = Sequential()693 model.add(Dense(1, input_shape=(2, )))694 model.compile(loss='mse', optimizer='adadelta')695 # - Produce data on 4 worker processes, consume on main process:696 # - Each worker process runs OWN copy of generator697 # - BUT on Windows, `multiprocessing` won't marshall generators across698 # process boundaries -> make sure `predict_generator()` raises ValueError699 # exception and does not attempt to run the generator.700 # - On other platforms, make sure `RuntimeError` exception bubbles up701 if os.name is 'nt':702 with pytest.raises(ValueError):703 model.predict_generator(custom_generator(),704 steps=good_batches * WORKERS + 1,705 max_queue_size=10,706 workers=WORKERS,707 use_multiprocessing=True)708 else:709 with pytest.raises(RuntimeError):710 model.predict_generator(custom_generator(),711 steps=good_batches * WORKERS + 1,712 max_queue_size=10,713 workers=WORKERS,714 use_multiprocessing=True)715 # - Produce data on 4 worker threads, consume on main thread:716 # - All worker threads share the SAME generator717 # - Make sure `RuntimeError` exception bubbles up718 with pytest.raises(RuntimeError):719 model.predict_generator(custom_generator(),720 steps=good_batches * WORKERS + 1,721 max_queue_size=10,722 workers=WORKERS,723 use_multiprocessing=False)724 # - Produce data on 1 worker process, consume on main process:725 # - Worker process runs generator726 # - BUT on Windows, `multiprocessing` won't marshall generators across727 # process boundaries -> make sure `predict_generator()` raises ValueError728 # exception and does not attempt to run the generator.729 # - On other platforms, make sure `RuntimeError` exception bubbles up730 if os.name is 'nt':731 with pytest.raises(ValueError):732 model.predict_generator(custom_generator(),733 steps=good_batches + 1,734 max_queue_size=10,735 workers=1,736 use_multiprocessing=True)737 else:738 with pytest.raises(RuntimeError):739 model.predict_generator(custom_generator(),740 steps=good_batches + 1,741 max_queue_size=10,742 workers=1,743 use_multiprocessing=True)744 # - Produce data on 1 worker thread, consume on main thread:745 # - Worker thread is the only thread running the generator746 # - Make sure `RuntimeError` exception bubbles up747 with pytest.raises(RuntimeError):748 model.predict_generator(custom_generator(),749 steps=good_batches + 1,750 max_queue_size=10,751 workers=1,752 use_multiprocessing=False)753 # - Produce and consume data without a queue on main thread754 # - Make sure the value of `use_multiprocessing` is ignored755 # - Make sure `RuntimeError` exception bubbles up756 with pytest.raises(RuntimeError):757 model.predict_generator(custom_generator(),758 steps=good_batches + 1,759 max_queue_size=10,760 workers=0,761 use_multiprocessing=True)762 with pytest.raises(RuntimeError):763 model.predict_generator(custom_generator(),764 steps=good_batches + 1,765 max_queue_size=10,766 workers=0,767 use_multiprocessing=False)768if __name__ == '__main__':...

Full Screen

Full Screen

test_direct.py

Source:test_direct.py Github

copy

Full Screen

...156 gauss = rs.standard_normal(25)157 assert_allclose(gauss,158 gauss_from_uint(self.data2['data'], n, self.bits))159 def test_uniform_double(self):160 rs = Generator(self.bit_generator(*self.data1['seed']))161 vals = uniform_from_uint(self.data1['data'], self.bits)162 uniforms = rs.random(len(vals))163 assert_allclose(uniforms, vals)164 assert_equal(uniforms.dtype, np.float64)165 rs = Generator(self.bit_generator(*self.data2['seed']))166 vals = uniform_from_uint(self.data2['data'], self.bits)167 uniforms = rs.random(len(vals))168 assert_allclose(uniforms, vals)169 assert_equal(uniforms.dtype, np.float64)170 def test_uniform_float(self):171 rs = Generator(self.bit_generator(*self.data1['seed']))172 vals = uniform32_from_uint(self.data1['data'], self.bits)173 uniforms = rs.random(len(vals), dtype=np.float32)174 assert_allclose(uniforms, vals)175 assert_equal(uniforms.dtype, np.float32)176 rs = Generator(self.bit_generator(*self.data2['seed']))177 vals = uniform32_from_uint(self.data2['data'], self.bits)178 uniforms = rs.random(len(vals), dtype=np.float32)179 assert_allclose(uniforms, vals)180 assert_equal(uniforms.dtype, np.float32)181 def test_repr(self):182 rs = Generator(self.bit_generator(*self.data1['seed']))183 assert 'Generator' in repr(rs)184 assert '{:#x}'.format(id(rs)).upper().replace('X', 'x') in repr(rs)185 def test_str(self):186 rs = Generator(self.bit_generator(*self.data1['seed']))187 assert 'Generator' in str(rs)188 assert str(self.bit_generator.__name__) in str(rs)189 assert '{:#x}'.format(id(rs)).upper().replace('X', 'x') not in str(rs)190 def test_pickle(self):191 import pickle192 bit_generator = self.bit_generator(*self.data1['seed'])193 state = bit_generator.state194 bitgen_pkl = pickle.dumps(bit_generator)195 reloaded = pickle.loads(bitgen_pkl)196 reloaded_state = reloaded.state197 assert_array_equal(Generator(bit_generator).standard_normal(1000),198 Generator(reloaded).standard_normal(1000))199 assert bit_generator is not reloaded200 assert_state_equal(reloaded_state, state)201 ss = SeedSequence(100)202 aa = pickle.loads(pickle.dumps(ss))203 assert_equal(ss.state, aa.state)204 def test_invalid_state_type(self):205 bit_generator = self.bit_generator(*self.data1['seed'])206 with pytest.raises(TypeError):207 bit_generator.state = {'1'}208 def test_invalid_state_value(self):209 bit_generator = self.bit_generator(*self.data1['seed'])210 state = bit_generator.state211 state['bit_generator'] = 'otherBitGenerator'212 with pytest.raises(ValueError):213 bit_generator.state = state214 def test_invalid_init_type(self):215 bit_generator = self.bit_generator216 for st in self.invalid_init_types:217 with pytest.raises(TypeError):218 bit_generator(*st)219 def test_invalid_init_values(self):220 bit_generator = self.bit_generator221 for st in self.invalid_init_values:222 with pytest.raises((ValueError, OverflowError)):223 bit_generator(*st)224 def test_benchmark(self):225 bit_generator = self.bit_generator(*self.data1['seed'])226 bit_generator._benchmark(1)227 bit_generator._benchmark(1, 'double')228 with pytest.raises(ValueError):229 bit_generator._benchmark(1, 'int32')230 @pytest.mark.skipif(MISSING_CFFI, reason='cffi not available')231 def test_cffi(self):232 bit_generator = self.bit_generator(*self.data1['seed'])233 cffi_interface = bit_generator.cffi234 assert isinstance(cffi_interface, interface)235 other_cffi_interface = bit_generator.cffi236 assert other_cffi_interface is cffi_interface237 @pytest.mark.skipif(MISSING_CTYPES, reason='ctypes not available')238 def test_ctypes(self):239 bit_generator = self.bit_generator(*self.data1['seed'])240 ctypes_interface = bit_generator.ctypes241 assert isinstance(ctypes_interface, interface)242 other_ctypes_interface = bit_generator.ctypes243 assert other_ctypes_interface is ctypes_interface244 def test_getstate(self):245 bit_generator = self.bit_generator(*self.data1['seed'])246 state = bit_generator.state247 alt_state = bit_generator.__getstate__()248 assert_state_equal(state, alt_state)249class TestPhilox(Base):250 @classmethod251 def setup_class(cls):252 cls.bit_generator = Philox253 cls.bits = 64254 cls.dtype = np.uint64255 cls.data1 = cls._read_csv(256 join(pwd, './data/philox-testset-1.csv'))257 cls.data2 = cls._read_csv(258 join(pwd, './data/philox-testset-2.csv'))259 cls.seed_error_type = TypeError260 cls.invalid_init_types = []261 cls.invalid_init_values = [(1, None, 1), (-1,), (None, None, 2 ** 257 + 1)]262 def test_set_key(self):263 bit_generator = self.bit_generator(*self.data1['seed'])264 state = bit_generator.state265 keyed = self.bit_generator(counter=state['state']['counter'],266 key=state['state']['key'])267 assert_state_equal(bit_generator.state, keyed.state)268class TestPCG64(Base):269 @classmethod270 def setup_class(cls):271 cls.bit_generator = PCG64272 cls.bits = 64273 cls.dtype = np.uint64274 cls.data1 = cls._read_csv(join(pwd, './data/pcg64-testset-1.csv'))275 cls.data2 = cls._read_csv(join(pwd, './data/pcg64-testset-2.csv'))276 cls.seed_error_type = (ValueError, TypeError)277 cls.invalid_init_types = [(3.2,), ([None],), (1, None)]278 cls.invalid_init_values = [(-1,)]279 def test_advance_symmetry(self):280 rs = Generator(self.bit_generator(*self.data1['seed']))281 state = rs.bit_generator.state282 step = -0x9e3779b97f4a7c150000000000000000283 rs.bit_generator.advance(step)284 val_neg = rs.integers(10)285 rs.bit_generator.state = state286 rs.bit_generator.advance(2**128 + step)287 val_pos = rs.integers(10)288 rs.bit_generator.state = state289 rs.bit_generator.advance(10 * 2**128 + step)290 val_big = rs.integers(10)291 assert val_neg == val_pos292 assert val_big == val_pos293class TestMT19937(Base):294 @classmethod295 def setup_class(cls):296 cls.bit_generator = MT19937297 cls.bits = 32298 cls.dtype = np.uint32299 cls.data1 = cls._read_csv(join(pwd, './data/mt19937-testset-1.csv'))300 cls.data2 = cls._read_csv(join(pwd, './data/mt19937-testset-2.csv'))301 cls.seed_error_type = ValueError302 cls.invalid_init_types = []303 cls.invalid_init_values = [(-1,)]304 def test_seed_float_array(self):305 assert_raises(TypeError, self.bit_generator, np.array([np.pi]))306 assert_raises(TypeError, self.bit_generator, np.array([-np.pi]))307 assert_raises(TypeError, self.bit_generator, np.array([np.pi, -np.pi]))308 assert_raises(TypeError, self.bit_generator, np.array([0, np.pi]))309 assert_raises(TypeError, self.bit_generator, [np.pi])310 assert_raises(TypeError, self.bit_generator, [0, np.pi])311 def test_state_tuple(self):312 rs = Generator(self.bit_generator(*self.data1['seed']))313 bit_generator = rs.bit_generator314 state = bit_generator.state315 desired = rs.integers(2 ** 16)316 tup = (state['bit_generator'], state['state']['key'],317 state['state']['pos'])318 bit_generator.state = tup319 actual = rs.integers(2 ** 16)320 assert_equal(actual, desired)321 tup = tup + (0, 0.0)322 bit_generator.state = tup323 actual = rs.integers(2 ** 16)324 assert_equal(actual, desired)325class TestSFC64(Base):326 @classmethod...

Full Screen

Full Screen

anchor_generator_builder_test.py

Source:anchor_generator_builder_test.py Github

copy

Full Screen

...31 anchor_generator_text_proto = """32 grid_anchor_generator {33 }34 """35 anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()36 text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)37 anchor_generator_object = anchor_generator_builder.build(38 anchor_generator_proto)39 self.assertIsInstance(anchor_generator_object,40 grid_anchor_generator.GridAnchorGenerator)41 self.assertListEqual(anchor_generator_object._scales, [])42 self.assertListEqual(anchor_generator_object._aspect_ratios, [])43 self.assertAllEqual(anchor_generator_object._anchor_offset, [0, 0])44 self.assertAllEqual(anchor_generator_object._anchor_stride, [16, 16])45 self.assertAllEqual(anchor_generator_object._base_anchor_size, [256, 256])46 def test_build_grid_anchor_generator_with_non_default_parameters(self):47 anchor_generator_text_proto = """48 grid_anchor_generator {49 height: 12850 width: 51251 height_stride: 1052 width_stride: 2053 height_offset: 3054 width_offset: 4055 scales: [0.4, 2.2]56 aspect_ratios: [0.3, 4.5]57 }58 """59 anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()60 text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)61 anchor_generator_object = anchor_generator_builder.build(62 anchor_generator_proto)63 self.assertIsInstance(anchor_generator_object,64 grid_anchor_generator.GridAnchorGenerator)65 self.assert_almost_list_equal(anchor_generator_object._scales,66 [0.4, 2.2])67 self.assert_almost_list_equal(anchor_generator_object._aspect_ratios,68 [0.3, 4.5])69 self.assertAllEqual(anchor_generator_object._anchor_offset, [30, 40])70 self.assertAllEqual(anchor_generator_object._anchor_stride, [10, 20])71 self.assertAllEqual(anchor_generator_object._base_anchor_size, [128, 512])72 def test_build_ssd_anchor_generator_with_defaults(self):73 anchor_generator_text_proto = """74 ssd_anchor_generator {75 aspect_ratios: [1.0]76 }77 """78 anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()79 text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)80 anchor_generator_object = anchor_generator_builder.build(81 anchor_generator_proto)82 self.assertIsInstance(anchor_generator_object,83 multiple_grid_anchor_generator.84 MultipleGridAnchorGenerator)85 for actual_scales, expected_scales in zip(86 list(anchor_generator_object._scales),87 [(0.1, 0.2, 0.2),88 (0.35, 0.418),89 (0.499, 0.570),90 (0.649, 0.721),91 (0.799, 0.871),92 (0.949, 0.974)]):93 self.assert_almost_list_equal(expected_scales, actual_scales, delta=1e-2)94 for actual_aspect_ratio, expected_aspect_ratio in zip(95 list(anchor_generator_object._aspect_ratios),96 [(1.0, 2.0, 0.5)] + 5 * [(1.0, 1.0)]):97 self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio)98 self.assertAllClose(anchor_generator_object._base_anchor_size, [1.0, 1.0])99 def test_build_ssd_anchor_generator_with_custom_scales(self):100 anchor_generator_text_proto = """101 ssd_anchor_generator {102 aspect_ratios: [1.0]103 scales: [0.1, 0.15, 0.2, 0.4, 0.6, 0.8]104 reduce_boxes_in_lowest_layer: false105 }106 """107 anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()108 text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)109 anchor_generator_object = anchor_generator_builder.build(110 anchor_generator_proto)111 self.assertIsInstance(anchor_generator_object,112 multiple_grid_anchor_generator.113 MultipleGridAnchorGenerator)114 for actual_scales, expected_scales in zip(115 list(anchor_generator_object._scales),116 [(0.1, math.sqrt(0.1 * 0.15)),117 (0.15, math.sqrt(0.15 * 0.2)),118 (0.2, math.sqrt(0.2 * 0.4)),119 (0.4, math.sqrt(0.4 * 0.6)),120 (0.6, math.sqrt(0.6 * 0.8)),121 (0.8, math.sqrt(0.8 * 1.0))]):122 self.assert_almost_list_equal(expected_scales, actual_scales, delta=1e-2)123 def test_build_ssd_anchor_generator_with_custom_interpolated_scale(self):124 anchor_generator_text_proto = """125 ssd_anchor_generator {126 aspect_ratios: [0.5]127 interpolated_scale_aspect_ratio: 0.5128 reduce_boxes_in_lowest_layer: false129 }130 """131 anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()132 text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)133 anchor_generator_object = anchor_generator_builder.build(134 anchor_generator_proto)135 self.assertIsInstance(anchor_generator_object,136 multiple_grid_anchor_generator.137 MultipleGridAnchorGenerator)138 for actual_aspect_ratio, expected_aspect_ratio in zip(139 list(anchor_generator_object._aspect_ratios),140 6 * [(0.5, 0.5)]):141 self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio)142 def test_build_ssd_anchor_generator_without_reduced_boxes(self):143 anchor_generator_text_proto = """144 ssd_anchor_generator {145 aspect_ratios: [1.0]146 reduce_boxes_in_lowest_layer: false147 }148 """149 anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()150 text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)151 anchor_generator_object = anchor_generator_builder.build(152 anchor_generator_proto)153 self.assertIsInstance(anchor_generator_object,154 multiple_grid_anchor_generator.155 MultipleGridAnchorGenerator)156 for actual_scales, expected_scales in zip(157 list(anchor_generator_object._scales),158 [(0.2, 0.264),159 (0.35, 0.418),160 (0.499, 0.570),161 (0.649, 0.721),162 (0.799, 0.871),163 (0.949, 0.974)]):164 self.assert_almost_list_equal(expected_scales, actual_scales, delta=1e-2)165 for actual_aspect_ratio, expected_aspect_ratio in zip(166 list(anchor_generator_object._aspect_ratios),167 6 * [(1.0, 1.0)]):168 self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio)169 self.assertAllClose(anchor_generator_object._base_anchor_size, [1.0, 1.0])170 def test_build_ssd_anchor_generator_with_non_default_parameters(self):171 anchor_generator_text_proto = """172 ssd_anchor_generator {173 num_layers: 2174 min_scale: 0.3175 max_scale: 0.8176 aspect_ratios: [2.0]177 height_stride: 16178 height_stride: 32179 width_stride: 20180 width_stride: 30181 height_offset: 8182 height_offset: 16183 width_offset: 0184 width_offset: 10185 }186 """187 anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()188 text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)189 anchor_generator_object = anchor_generator_builder.build(190 anchor_generator_proto)191 self.assertIsInstance(anchor_generator_object,192 multiple_grid_anchor_generator.193 MultipleGridAnchorGenerator)194 for actual_scales, expected_scales in zip(195 list(anchor_generator_object._scales),196 [(0.1, 0.3, 0.3), (0.8, 0.894)]):197 self.assert_almost_list_equal(expected_scales, actual_scales, delta=1e-2)198 for actual_aspect_ratio, expected_aspect_ratio in zip(199 list(anchor_generator_object._aspect_ratios),200 [(1.0, 2.0, 0.5), (2.0, 1.0)]):201 self.assert_almost_list_equal(expected_aspect_ratio, actual_aspect_ratio)202 for actual_strides, expected_strides in zip(203 list(anchor_generator_object._anchor_strides), [(16, 20), (32, 30)]):204 self.assert_almost_list_equal(expected_strides, actual_strides)205 for actual_offsets, expected_offsets in zip(206 list(anchor_generator_object._anchor_offsets), [(8, 0), (16, 10)]):207 self.assert_almost_list_equal(expected_offsets, actual_offsets)208 self.assertAllClose(anchor_generator_object._base_anchor_size, [1.0, 1.0])209 def test_raise_value_error_on_empty_anchor_genertor(self):210 anchor_generator_text_proto = """211 """212 anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()213 text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)214 with self.assertRaises(ValueError):215 anchor_generator_builder.build(anchor_generator_proto)216 def test_build_multiscale_anchor_generator_custom_aspect_ratios(self):217 anchor_generator_text_proto = """218 multiscale_anchor_generator {219 aspect_ratios: [1.0]220 }221 """222 anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()223 text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)224 anchor_generator_object = anchor_generator_builder.build(225 anchor_generator_proto)226 self.assertIsInstance(anchor_generator_object,227 multiscale_grid_anchor_generator.228 MultiscaleGridAnchorGenerator)229 for level, anchor_grid_info in zip(230 range(3, 8), anchor_generator_object._anchor_grid_info):231 self.assertEqual(set(anchor_grid_info.keys()), set(['level', 'info']))232 self.assertTrue(level, anchor_grid_info['level'])233 self.assertEqual(len(anchor_grid_info['info']), 4)234 self.assertAllClose(anchor_grid_info['info'][0], [2**0, 2**0.5])235 self.assertTrue(anchor_grid_info['info'][1], 1.0)236 self.assertAllClose(anchor_grid_info['info'][2],237 [4.0 * 2**level, 4.0 * 2**level])238 self.assertAllClose(anchor_grid_info['info'][3], [2**level, 2**level])239 self.assertTrue(anchor_generator_object._normalize_coordinates)240 def test_build_multiscale_anchor_generator_with_anchors_in_pixel_coordinates(241 self):242 anchor_generator_text_proto = """243 multiscale_anchor_generator {244 aspect_ratios: [1.0]245 normalize_coordinates: false246 }247 """248 anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()249 text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)250 anchor_generator_object = anchor_generator_builder.build(251 anchor_generator_proto)252 self.assertIsInstance(anchor_generator_object,253 multiscale_grid_anchor_generator.254 MultiscaleGridAnchorGenerator)255 self.assertFalse(anchor_generator_object._normalize_coordinates)256 def test_build_flexible_anchor_generator(self):257 anchor_generator_text_proto = """258 flexible_grid_anchor_generator {259 anchor_grid {260 base_sizes: [1.5]261 aspect_ratios: [1.0]262 height_stride: 16263 width_stride: 20264 height_offset: 8265 width_offset: 9266 }267 anchor_grid {268 base_sizes: [1.0, 2.0]269 aspect_ratios: [1.0, 0.5]270 height_stride: 32271 width_stride: 30272 height_offset: 10273 width_offset: 11274 }275 }276 """277 anchor_generator_proto = anchor_generator_pb2.AnchorGenerator()278 text_format.Merge(anchor_generator_text_proto, anchor_generator_proto)279 anchor_generator_object = anchor_generator_builder.build(280 anchor_generator_proto)281 self.assertIsInstance(anchor_generator_object,282 flexible_grid_anchor_generator.283 FlexibleGridAnchorGenerator)284 for actual_base_sizes, expected_base_sizes in zip(285 list(anchor_generator_object._base_sizes), [(1.5,), (1.0, 2.0)]):286 self.assert_almost_list_equal(expected_base_sizes, actual_base_sizes)287 for actual_aspect_ratios, expected_aspect_ratios in zip(288 list(anchor_generator_object._aspect_ratios), [(1.0,), (1.0, 0.5)]):289 self.assert_almost_list_equal(expected_aspect_ratios,290 actual_aspect_ratios)291 for actual_strides, expected_strides in zip(...

Full Screen

Full Screen

gan_estimator_impl.py

Source:gan_estimator_impl.py Github

copy

Full Screen

1# Copyright 2017 The TensorFlow Authors. All Rights Reserved.2#3# Licensed under the Apache License, Version 2.0 (the "License");4# you may not use this file except in compliance with the License.5# You may obtain a copy of the License at6#7# http://www.apache.org/licenses/LICENSE-2.08#9# Unless required by applicable law or agreed to in writing, software10# distributed under the License is distributed on an "AS IS" BASIS,11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.12# See the License for the specific language governing permissions and13# limitations under the License.14# ==============================================================================15"""A TFGAN-backed GAN Estimator."""16from __future__ import absolute_import17from __future__ import division18from __future__ import print_function19import functools20import enum21from tensorflow.contrib.framework.python.ops import variables as variable_lib22from tensorflow.contrib.gan.python import namedtuples as tfgan_tuples23from tensorflow.contrib.gan.python import train as tfgan_train24from tensorflow.contrib.gan.python.estimator.python import head as head_lib25from tensorflow.contrib.gan.python.eval.python import summaries as tfgan_summaries26from tensorflow.python.estimator import estimator27from tensorflow.python.estimator import model_fn as model_fn_lib28from tensorflow.python.framework import ops29from tensorflow.python.ops import variable_scope30from tensorflow.python.util import tf_inspect as inspect31__all__ = [32 'GANEstimator',33 'SummaryType'34]35class SummaryType(enum.IntEnum):36 NONE = 037 VARIABLES = 138 IMAGES = 239 IMAGE_COMPARISON = 340_summary_type_map = {41 SummaryType.VARIABLES: tfgan_summaries.add_gan_model_summaries,42 SummaryType.IMAGES: tfgan_summaries.add_gan_model_image_summaries,43 SummaryType.IMAGE_COMPARISON: tfgan_summaries.add_image_comparison_summaries, # pylint:disable=line-too-long44}45# TODO(joelshor): For now, this only supports 1:1 generator:discriminator46# training sequentially. Find a nice way to expose options to the user without47# exposing internals.48class GANEstimator(estimator.Estimator):49 """An estimator for Generative Adversarial Networks (GANs).50 This Estimator is backed by TFGAN. The network functions follow the TFGAN API51 except for one exception: if either `generator_fn` or `discriminator_fn` have52 an argument called `mode`, then the tf.Estimator mode is passed in for that53 argument. This helps with operations like batch normalization, which have54 different train and evaluation behavior.55 Example:56 ```python57 import tensorflow as tf58 tfgan = tf.contrib.gan59 # See TFGAN's `train.py` for a description of the generator and60 # discriminator API.61 def generator_fn(generator_inputs):62 ...63 return generated_data64 def discriminator_fn(data, conditioning):65 ...66 return logits67 # Create GAN estimator.68 gan_estimator = tfgan.estimator.GANEstimator(69 model_dir,70 generator_fn=generator_fn,71 discriminator_fn=discriminator_fn,72 generator_loss_fn=tfgan.losses.wasserstein_generator_loss,73 discriminator_loss_fn=tfgan.losses.wasserstein_discriminator_loss,74 generator_optimizer=tf.train.AdamOptimizier(0.1, 0.5),75 discriminator_optimizer=tf.train.AdamOptimizier(0.1, 0.5))76 # Train estimator.77 gan_estimator.train(train_input_fn, steps)78 # Evaluate resulting estimator.79 gan_estimator.evaluate(eval_input_fn)80 # Generate samples from generator.81 predictions = np.array([82 x for x in gan_estimator.predict(predict_input_fn)])83 ```84 """85 def __init__(self,86 model_dir=None,87 generator_fn=None,88 discriminator_fn=None,89 generator_loss_fn=None,90 discriminator_loss_fn=None,91 generator_optimizer=None,92 discriminator_optimizer=None,93 get_hooks_fn=None,94 add_summaries=None,95 use_loss_summaries=True,96 config=None):97 """Initializes a GANEstimator instance.98 Args:99 model_dir: Directory to save model parameters, graph and etc. This can100 also be used to load checkpoints from the directory into a estimator101 to continue training a previously saved model.102 generator_fn: A python function that takes a Tensor, Tensor list, or103 Tensor dictionary as inputs and returns the outputs of the GAN104 generator. See `TFGAN` for more details and examples. Additionally, if105 it has an argument called `mode`, the Estimator's `mode` will be passed106 in (ex TRAIN, EVAL, PREDICT). This is useful for things like batch107 normalization.108 discriminator_fn: A python function that takes the output of109 `generator_fn` or real data in the GAN setup, and `generator_inputs`.110 Outputs a Tensor in the range [-inf, inf]. See `TFGAN` for more details111 and examples.112 generator_loss_fn: The loss function on the generator. Takes a `GANModel`113 tuple.114 discriminator_loss_fn: The loss function on the discriminator. Takes a115 `GANModel` tuple.116 generator_optimizer: The optimizer for generator updates, or a function117 that takes no arguments and returns an optimizer. This function will118 be called when the default graph is the `GANEstimator`'s graph, so119 utilities like `tf.contrib.framework.get_or_create_global_step` will120 work.121 discriminator_optimizer: Same as `generator_optimizer`, but for the122 discriminator updates.123 get_hooks_fn: A function that takes a `GANTrainOps` tuple and returns a124 list of hooks. These hooks are run on the generator and discriminator125 train ops, and can be used to implement the GAN training scheme.126 Defaults to `train.get_sequential_train_hooks()`.127 add_summaries: `None`, a single `SummaryType`, or a list of `SummaryType`.128 use_loss_summaries: If `True`, add loss summaries. If `False`, does not.129 If `None`, uses defaults.130 config: `RunConfig` object to configure the runtime settings.131 """132 # TODO(joelshor): Explicitly validate inputs.133 def _model_fn(features, labels, mode):134 gopt = (generator_optimizer() if callable(generator_optimizer) else135 generator_optimizer)136 dopt = (discriminator_optimizer() if callable(discriminator_optimizer)137 else discriminator_optimizer)138 gan_head = head_lib.gan_head(139 generator_loss_fn, discriminator_loss_fn, gopt, dopt,140 use_loss_summaries, get_hooks_fn=get_hooks_fn)141 return _gan_model_fn(142 features, labels, mode, generator_fn, discriminator_fn, gan_head,143 add_summaries)144 super(GANEstimator, self).__init__(145 model_fn=_model_fn, model_dir=model_dir, config=config)146def _gan_model_fn(147 features,148 labels,149 mode,150 generator_fn,151 discriminator_fn,152 head,153 add_summaries=None,154 generator_scope_name='Generator'):155 """The `model_fn` for the GAN estimator.156 We make the following convention:157 features -> TFGAN's `generator_inputs`158 labels -> TFGAN's `real_data`159 Args:160 features: A dictionary to feed to generator. In the unconditional case,161 this might be just `noise`. In the conditional GAN case, this162 might be the generator's conditioning. The `generator_fn` determines163 what the required keys are.164 labels: Real data. Can be any structure, as long as `discriminator_fn`165 can accept it for the first argument.166 mode: Defines whether this is training, evaluation or prediction.167 See `ModeKeys`.168 generator_fn: A python lambda that takes `generator_inputs` as inputs and169 returns the outputs of the GAN generator.170 discriminator_fn: A python lambda that takes `real_data`/`generated data`171 and `generator_inputs`. Outputs a Tensor in the range [-inf, inf].172 head: A `Head` instance suitable for GANs.173 add_summaries: `None`, a single `SummaryType`, or a list of `SummaryType`.174 generator_scope_name: The name of the generator scope. We need this to be175 the same for GANModels produced by TFGAN's `train.gan_model` and the176 manually constructed ones for predictions.177 Returns:178 `ModelFnOps`179 Raises:180 ValueError: If `labels` isn't `None` during prediction.181 """182 real_data = labels183 generator_inputs = features184 if mode == model_fn_lib.ModeKeys.TRAIN:185 gan_model = _make_train_gan_model(186 generator_fn, discriminator_fn, real_data, generator_inputs,187 generator_scope_name, add_summaries)188 elif mode == model_fn_lib.ModeKeys.EVAL:189 gan_model = _make_eval_gan_model(190 generator_fn, discriminator_fn, real_data, generator_inputs,191 generator_scope_name, add_summaries)192 else:193 if real_data is not None:194 raise ValueError('`labels` must be `None` when mode is `predict`. '195 'Instead, found %s' % real_data)196 gan_model = _make_prediction_gan_model(197 generator_inputs, generator_fn, generator_scope_name)198 return head.create_estimator_spec(199 features=None,200 mode=mode,201 logits=gan_model,202 labels=None)203def _make_gan_model(generator_fn, discriminator_fn, real_data,204 generator_inputs, generator_scope, add_summaries, mode):205 """Make a `GANModel`, and optionally pass in `mode`."""206 # If network functions have an argument `mode`, pass mode to it.207 if 'mode' in inspect.getargspec(generator_fn).args:208 generator_fn = functools.partial(generator_fn, mode=mode)209 if 'mode' in inspect.getargspec(discriminator_fn).args:210 discriminator_fn = functools.partial(discriminator_fn, mode=mode)211 gan_model = tfgan_train.gan_model(212 generator_fn,213 discriminator_fn,214 real_data,215 generator_inputs,216 generator_scope=generator_scope,217 check_shapes=False)218 if add_summaries:219 if not isinstance(add_summaries, (tuple, list)):220 add_summaries = [add_summaries]221 with ops.name_scope(None):222 for summary_type in add_summaries:223 _summary_type_map[summary_type](gan_model)224 return gan_model225def _make_train_gan_model(generator_fn, discriminator_fn, real_data,226 generator_inputs, generator_scope, add_summaries):227 """Make a `GANModel` for training."""228 return _make_gan_model(generator_fn, discriminator_fn, real_data,229 generator_inputs, generator_scope, add_summaries,230 model_fn_lib.ModeKeys.TRAIN)231def _make_eval_gan_model(generator_fn, discriminator_fn, real_data,232 generator_inputs, generator_scope, add_summaries):233 """Make a `GANModel` for evaluation."""234 return _make_gan_model(generator_fn, discriminator_fn, real_data,235 generator_inputs, generator_scope, add_summaries,236 model_fn_lib.ModeKeys.EVAL)237def _make_prediction_gan_model(generator_inputs, generator_fn, generator_scope):238 """Make a `GANModel` from just the generator."""239 # If `generator_fn` has an argument `mode`, pass mode to it.240 if 'mode' in inspect.getargspec(generator_fn).args:241 generator_fn = functools.partial(generator_fn,242 mode=model_fn_lib.ModeKeys.PREDICT)243 with variable_scope.variable_scope(generator_scope) as gen_scope:244 generator_inputs = tfgan_train._convert_tensor_or_l_or_d(generator_inputs) # pylint:disable=protected-access245 generated_data = generator_fn(generator_inputs)246 generator_variables = variable_lib.get_trainable_variables(gen_scope)247 return tfgan_tuples.GANModel(248 generator_inputs,249 generated_data,250 generator_variables,251 gen_scope,252 generator_fn,253 real_data=None,254 discriminator_real_outputs=None,255 discriminator_gen_outputs=None,256 discriminator_variables=None,257 discriminator_scope=None,...

Full Screen

Full Screen

anchor_generator_builder.py

Source:anchor_generator_builder.py Github

copy

Full Screen

...34 'anchor_generator_pb2.AnchorGenerator')35 if anchor_generator_config.WhichOneof(36 'anchor_generator_oneof') == 'grid_anchor_generator':37 grid_anchor_generator_config = anchor_generator_config.grid_anchor_generator38 return grid_anchor_generator.GridAnchorGenerator(39 scales=[float(scale) for scale in grid_anchor_generator_config.scales],40 aspect_ratios=[float(aspect_ratio)41 for aspect_ratio42 in grid_anchor_generator_config.aspect_ratios],43 base_anchor_size=[grid_anchor_generator_config.height,44 grid_anchor_generator_config.width],45 anchor_stride=[grid_anchor_generator_config.height_stride,46 grid_anchor_generator_config.width_stride],47 anchor_offset=[grid_anchor_generator_config.height_offset,48 grid_anchor_generator_config.width_offset])49 elif anchor_generator_config.WhichOneof(50 'anchor_generator_oneof') == 'ssd_anchor_generator':51 ssd_anchor_generator_config = anchor_generator_config.ssd_anchor_generator52 anchor_strides = None53 if ssd_anchor_generator_config.height_stride:54 anchor_strides = zip(ssd_anchor_generator_config.height_stride,55 ssd_anchor_generator_config.width_stride)56 anchor_offsets = None57 if ssd_anchor_generator_config.height_offset:58 anchor_offsets = zip(ssd_anchor_generator_config.height_offset,59 ssd_anchor_generator_config.width_offset)60 return multiple_grid_anchor_generator.create_ssd_anchors(61 num_layers=ssd_anchor_generator_config.num_layers,62 min_scale=ssd_anchor_generator_config.min_scale,63 max_scale=ssd_anchor_generator_config.max_scale,64 scales=[float(scale) for scale in ssd_anchor_generator_config.scales],65 aspect_ratios=ssd_anchor_generator_config.aspect_ratios,66 interpolated_scale_aspect_ratio=(67 ssd_anchor_generator_config.interpolated_scale_aspect_ratio),68 base_anchor_size=[69 ssd_anchor_generator_config.base_anchor_height,70 ssd_anchor_generator_config.base_anchor_width71 ],72 anchor_strides=anchor_strides,73 anchor_offsets=anchor_offsets,74 reduce_boxes_in_lowest_layer=(75 ssd_anchor_generator_config.reduce_boxes_in_lowest_layer))76 elif anchor_generator_config.WhichOneof(77 'anchor_generator_oneof') == 'multiscale_anchor_generator':78 cfg = anchor_generator_config.multiscale_anchor_generator79 return multiscale_grid_anchor_generator.MultiscaleGridAnchorGenerator(80 cfg.min_level,81 cfg.max_level,82 cfg.anchor_scale,83 [float(aspect_ratio) for aspect_ratio in cfg.aspect_ratios],84 cfg.scales_per_octave,85 cfg.normalize_coordinates86 )87 elif anchor_generator_config.WhichOneof(88 'anchor_generator_oneof') == 'flexible_grid_anchor_generator':89 cfg = anchor_generator_config.flexible_grid_anchor_generator90 base_sizes = []91 aspect_ratios = []92 strides = []93 offsets = []94 for anchor_grid in cfg.anchor_grid:95 base_sizes.append(tuple(anchor_grid.base_sizes))96 aspect_ratios.append(tuple(anchor_grid.aspect_ratios))97 strides.append((anchor_grid.height_stride, anchor_grid.width_stride))98 offsets.append((anchor_grid.height_offset, anchor_grid.width_offset))99 return flexible_grid_anchor_generator.FlexibleGridAnchorGenerator(100 base_sizes, aspect_ratios, strides, offsets, cfg.normalize_coordinates)101 else:...

Full Screen

Full Screen

doc_generator_visitor_test.py

Source:doc_generator_visitor_test.py Github

copy

Full Screen

1# Copyright 2015 The TensorFlow Authors. All Rights Reserved.2#3# Licensed under the Apache License, Version 2.0 (the "License");4# you may not use this file except in compliance with the License.5# You may obtain a copy of the License at6#7# http://www.apache.org/licenses/LICENSE-2.08#9# Unless required by applicable law or agreed to in writing, software10# distributed under the License is distributed on an "AS IS" BASIS,11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.12# See the License for the specific language governing permissions and13# limitations under the License.14# ==============================================================================15"""Tests for tools.docs.doc_generator_visitor."""16from __future__ import absolute_import17from __future__ import division18from __future__ import print_function19from tensorflow.python.platform import googletest20from tensorflow.tools.docs import doc_generator_visitor21class DocGeneratorVisitorTest(googletest.TestCase):22 def test_call_module(self):23 visitor = doc_generator_visitor.DocGeneratorVisitor()24 visitor(25 'doc_generator_visitor', doc_generator_visitor,26 [('DocGeneratorVisitor', doc_generator_visitor.DocGeneratorVisitor)])27 self.assertEqual({'doc_generator_visitor': ['DocGeneratorVisitor']},28 visitor.tree)29 self.assertEqual({30 'doc_generator_visitor': doc_generator_visitor,31 'doc_generator_visitor.DocGeneratorVisitor':32 doc_generator_visitor.DocGeneratorVisitor,33 }, visitor.index)34 def test_call_class(self):35 visitor = doc_generator_visitor.DocGeneratorVisitor()36 visitor(37 'DocGeneratorVisitor', doc_generator_visitor.DocGeneratorVisitor,38 [('index', doc_generator_visitor.DocGeneratorVisitor.index)])39 self.assertEqual({'DocGeneratorVisitor': ['index']},40 visitor.tree)41 self.assertEqual({42 'DocGeneratorVisitor': doc_generator_visitor.DocGeneratorVisitor,43 'DocGeneratorVisitor.index':44 doc_generator_visitor.DocGeneratorVisitor.index45 }, visitor.index)46 def test_call_raises(self):47 visitor = doc_generator_visitor.DocGeneratorVisitor()48 with self.assertRaises(RuntimeError):49 visitor('non_class_or_module', 'non_class_or_module_object', [])50 def test_duplicates(self):51 visitor = doc_generator_visitor.DocGeneratorVisitor()52 visitor(53 'submodule.DocGeneratorVisitor',54 doc_generator_visitor.DocGeneratorVisitor,55 [('index', doc_generator_visitor.DocGeneratorVisitor.index),56 ('index2', doc_generator_visitor.DocGeneratorVisitor.index)])57 visitor(58 'submodule2.DocGeneratorVisitor',59 doc_generator_visitor.DocGeneratorVisitor,60 [('index', doc_generator_visitor.DocGeneratorVisitor.index),61 ('index2', doc_generator_visitor.DocGeneratorVisitor.index)])62 visitor(63 'DocGeneratorVisitor2',64 doc_generator_visitor.DocGeneratorVisitor,65 [('index', doc_generator_visitor.DocGeneratorVisitor.index),66 ('index2', doc_generator_visitor.DocGeneratorVisitor.index)])67 # The shorter path should be master, or if equal, the lexicographically68 # first will be.69 self.assertEqual(70 {'DocGeneratorVisitor2': sorted(['submodule.DocGeneratorVisitor',71 'submodule2.DocGeneratorVisitor',72 'DocGeneratorVisitor2']),73 'DocGeneratorVisitor2.index': sorted([74 'submodule.DocGeneratorVisitor.index',75 'submodule.DocGeneratorVisitor.index2',76 'submodule2.DocGeneratorVisitor.index',77 'submodule2.DocGeneratorVisitor.index2',78 'DocGeneratorVisitor2.index',79 'DocGeneratorVisitor2.index2'80 ]),81 }, visitor.duplicates)82 self.assertEqual({83 'submodule.DocGeneratorVisitor': 'DocGeneratorVisitor2',84 'submodule.DocGeneratorVisitor.index': 'DocGeneratorVisitor2.index',85 'submodule.DocGeneratorVisitor.index2': 'DocGeneratorVisitor2.index',86 'submodule2.DocGeneratorVisitor': 'DocGeneratorVisitor2',87 'submodule2.DocGeneratorVisitor.index': 'DocGeneratorVisitor2.index',88 'submodule2.DocGeneratorVisitor.index2': 'DocGeneratorVisitor2.index',89 'DocGeneratorVisitor2.index2': 'DocGeneratorVisitor2.index'90 }, visitor.duplicate_of)91 self.assertEqual({92 id(doc_generator_visitor.DocGeneratorVisitor): 'DocGeneratorVisitor2',93 id(doc_generator_visitor.DocGeneratorVisitor.index):94 'DocGeneratorVisitor2.index',95 }, visitor.reverse_index)96if __name__ == '__main__':...

Full Screen

Full Screen

__init__.py

Source:__init__.py Github

copy

Full Screen

1from conans.model import registered_generators2from conans.util.files import save, normalize3from os.path import join4from .text import TXTGenerator5from .gcc import GCCGenerator6from .cmake import CMakeGenerator7from .qmake import QmakeGenerator8from .qbs import QbsGenerator9from .visualstudio import VisualStudioGenerator10from .xcode import XCodeGenerator11from .ycm import YouCompleteMeGenerator12from .virtualenv import VirtualEnvGenerator13from conans.client.generators.env import ConanEnvGenerator14def _save_generator(name, klass):15 if name not in registered_generators:16 registered_generators.add(name, klass)17_save_generator("txt", TXTGenerator)18_save_generator("gcc", GCCGenerator)19_save_generator("cmake", CMakeGenerator)20_save_generator("qmake", QmakeGenerator)21_save_generator("qbs", QbsGenerator)22_save_generator("visual_studio", VisualStudioGenerator)23_save_generator("xcode", XCodeGenerator)24_save_generator("ycm", YouCompleteMeGenerator)25_save_generator("virtualenv", VirtualEnvGenerator)26_save_generator("env", ConanEnvGenerator)27def write_generators(conanfile, path, output):28 """ produces auxiliary files, required to build a project or a package.29 """30 for generator_name in conanfile.generators:31 if generator_name not in registered_generators:32 output.warn("Invalid generator '%s'. Available types: %s" %33 (generator_name, ", ".join(registered_generators.available)))34 else:35 generator_class = registered_generators[generator_name]36 try:37 generator = generator_class(conanfile)38 except TypeError:39 # To allow old-style generator packages to work (e.g. premake)40 output.warn("Generator %s failed with new __init__(), trying old one")41 generator = generator_class(conanfile.deps_cpp_info, conanfile.cpp_info)42 try:43 content = generator.content44 if isinstance(content, dict):45 if generator.filename:46 output.warn("Generator %s is multifile. Property 'filename' not used"47 % (generator_name,))48 for k, v in content.items():49 v = normalize(v)50 output.info("Generated %s created %s" % (generator_name, k))51 save(join(path, k), v)52 else:53 content = normalize(content)54 output.info("Generated %s created %s" % (generator_name, generator.filename))55 save(join(path, generator.filename), content)56 except Exception as e:57 output.error("Generator %s(file:%s) failed\n%s"...

Full Screen

Full Screen

_pickle.py

Source:_pickle.py Github

copy

Full Screen

...25 bit_generator = BitGenerators[bit_generator_name]26 else:27 raise ValueError(str(bit_generator_name) + ' is not a known '28 'BitGenerator module.')29 return Generator(bit_generator())30def __bit_generator_ctor(bit_generator_name='MT19937'):31 """32 Pickling helper function that returns a bit generator object33 Parameters34 ----------35 bit_generator_name: str36 String containing the name of the BitGenerator37 Returns38 -------39 bit_generator: BitGenerator40 BitGenerator instance41 """42 if bit_generator_name in BitGenerators:43 bit_generator = BitGenerators[bit_generator_name]...

Full Screen

Full Screen

Using AI Code Generation

copy

Full Screen

1var wptools = require('wptools');2var page = wptools.page('Albert Einstein');3page.get(function(err, data) {4 if (err) {5 console.log(err);6 } else {7 console.log(data);8 }9});10- Run the code using the command `node test.js`. You should see a JSON object with the data of the Wikipedia page of Albert Einstein. You can also check the [example.js](

Full Screen

Using AI Code Generation

copy

Full Screen

1const wptools = require('wptools');2const fs = require('fs');3const path = require('path');4const outputDir = path.join(__dirname, 'output');5const outputFile = path.join(outputDir, 'output.txt');6const output = fs.createWriteStream(outputFile);7const input = fs.createReadStream('input.txt');8const readline = require('readline');9const rl = readline.createInterface({10});11rl.on('line', (line) => {12 wptools.page(line)13 .get()14 .then(function(page) {15 console.log(page.infobox());16 output.write(page.infobox());17 })18 .catch(function(err) {19 console.log(err);20 });21});22output.write(page.infobox());

Full Screen

Using AI Code Generation

copy

Full Screen

1var wptools = require("wptools");2wptools.page('Albert Einstein').get().then(function(response) {3 console.log(response);4});5wptools.page('Albert Einstein').get().then(function(response) {6 console.log(response);7});8wptools.page('Albert Einstein').get(function(err, response) {9 console.log(response);10});11wptools.page('Albert Einstein').get().then(function(response) {12 console.log(response);13}, function(err) {14 console.log(err);15});16wptools.page('Albert Einstein').get().then(function(response) {17 console.log(response);18}, function(err) {19 console.log(err);20});21wptools.page('Albert Einstein').get(function(err, response) {22 console.log(response);23}, function(err) {24 console.log(err);25});26wptools.page('Albert Einstein').get(function(err, response) {27 console.log(response);28}, function(err) {29 console.log(err);30});31wptools.page('Albert Einstein').get().then(function(response) {32 console.log(response);33}, function(err) {34 console.log(err);35});36wptools.page('Albert Einstein').get().then(function(response) {37 console.log(response);38}, function(err) {39 console.log(err);40});41wptools.page('Albert Einstein').get(function(err, response) {42 console.log(response);43}, function(err) {44 console.log(err);45});46wptools.page('Albert Einstein').get(function(err, response) {47 console.log(response);48}, function(err) {49 console.log(err);50});51wptools.page('Albert Einstein').get().then(function(response) {52 console.log(response);53}, function(err) {

Full Screen

Using AI Code Generation

copy

Full Screen

1var wptools = require('wptools');2var fs = require('fs');3var path = require('path');4var wpt = new wptools.page('Barack Obama');5wpt.get(function(err, resp) {6 if (err) {7 console.log(err);8 }9 else {10 console.log(resp);11 }12});13var wptools = require('wptools');14var fs = require('fs');15var path = require('path');16var wpt = new wptools.page('Barack Obama');17wpt.get().then(function(resp) {18 console.log(resp);19}).catch(function(err) {20 console.log(err);21});22I am using the following code to use wptools in Node.js. But I am getting the following error: TypeError: wptools.page is not a function How can I fix this? var wptools = require('wptools'); var fs = require('fs'); var path = require('path'); var wpt = new wptools.page('Barack Obama'); wpt.get(function(err, resp) { if (err) { console.log(err); } else { console.log(resp); } });23I am using the following code to use wptools in Node.js. But I am getting the following error: TypeError: wptools.page is not a function How can I fix this? var wptools = require('wptools'); var fs = require('fs'); var path = require('path'); var wpt = new wptools.page('Barack Obama'); wpt.get().then(function(resp) { console.log(resp); }).catch(function(err) { console.log(err); });24I am using the following code to use wptools in Node.js. But I am getting the following error: TypeError: wptools.page is not a function How can I fix this? var wptools = require('wptools'); var fs = require('fs'); var path = require('path

Full Screen

Using AI Code Generation

copy

Full Screen

1const wptools = require('wptools');2const generator = wptools.page('Barack Obama').getGenerator('links');3generator.then((data) => {4 console.log(data);5});6MIT © [Rohit Sharma](

Full Screen

Using AI Code Generation

copy

Full Screen

1const wpt = require('webpagetest');2const wptClient = wpt('www.webpagetest.org', 'A.4e4f4a4e4f4a4e4f4a4e4f4a4e4f4a4');3 })4 .then((data) => {5 console.log(data);6 })7 .catch((err) => {8 console.error(err);9 });10[MIT](LICENSE)

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run wpt automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful