How to use toBeTrue method in jest-extended

Best JavaScript code snippet using jest-extended

autograd.test.ts

Source:autograd.test.ts Github

copy

Full Screen

...53 (await toCPU(a)) as CPUTensor<'float32'>,54 (a: CPUTensor<'float32'>) => a.exp() as CPUTensor<'float32'>55 )56 );57 expect(await v.grad?.compare(numericalGrad, epsilon)).toBeTrue();58 });59 it('should work with log', async () => {60 if (backend.wait !== undefined) {61 await backend.wait;62 }63 const a = backend.constructor([2, 2], [1, 2, 3, 4]);64 const ones = backend.constructor([2, 2], [1, 1, 1, 1]);65 const v = new Variable(a);66 const res = v.log() as Variable<'float32'>;67 res.backward(ones);68 const numericalGrad = await backend.toBackend(69 numericalGradient(70 (await toCPU(a)) as CPUTensor<'float32'>,71 (a: CPUTensor<'float32'>) => a.log() as CPUTensor<'float32'>72 )73 );74 expect(await v.grad?.compare(numericalGrad, epsilon)).toBeTrue();75 });76 it('should work with sqrt', async () => {77 if (backend.wait !== undefined) {78 await backend.wait;79 }80 const a = backend.constructor([2, 2], [1, 2, 4, 16]);81 const ones = backend.constructor([2, 2], [1, 1, 1, 1]);82 const v = new Variable(a);83 const res = v.sqrt() as Variable<'float32'>;84 res.backward(ones);85 const numericalGrad = await backend.toBackend(86 numericalGradient(87 (await toCPU(a)) as CPUTensor<'float32'>,88 (a: CPUTensor<'float32'>) => a.sqrt() as CPUTensor<'float32'>89 )90 );91 expect(await v.grad?.compare(numericalGrad, epsilon)).toBeTrue();92 });93 it('should work with reshape', async () => {94 if (backend.wait !== undefined) {95 await backend.wait;96 }97 const a = backend.constructor([2, 2], [1, 2, 4, 16]);98 const ones = backend.constructor([4], [1, 1, 1, 1]);99 const v = new Variable(a);100 const res = v.reshape([4]) as Variable<'float32'>;101 res.backward(ones);102 const numericalGrad = await backend.toBackend(103 numericalGradient(104 (await toCPU(a)) as CPUTensor<'float32'>,105 (a: CPUTensor<'float32'>) => a.reshape([4]) as CPUTensor<'float32'>106 )107 );108 expect(await v.grad?.compare(numericalGrad, epsilon)).toBeTrue();109 });110 it('should work with abs', async () => {111 if (backend.wait !== undefined) {112 await backend.wait;113 }114 const a = backend.constructor([2, 2], [-2, -1, 0.5, 1]);115 const ones = backend.constructor([2, 2], [1, 1, 1, 1]);116 const v = new Variable(a);117 const res = v.abs() as Variable<'float32'>;118 res.backward(ones);119 const numericalGrad = await backend.toBackend(120 numericalGradient(121 (await toCPU(a)) as CPUTensor<'float32'>,122 (a: CPUTensor<'float32'>) => a.abs() as CPUTensor<'float32'>123 )124 );125 expect(await v.grad?.compare(numericalGrad, epsilon)).toBeTrue();126 });127 it('should work with sin', async () => {128 if (backend.wait !== undefined) {129 await backend.wait;130 }131 const a = backend.constructor([2, 2], [-2, -1, 0.5, 1]);132 const ones = backend.constructor([2, 2], [1, 1, 1, 1]);133 const v = new Variable(a);134 const res = v.sin() as Variable<'float32'>;135 res.backward(ones);136 const numericalGrad = await backend.toBackend(137 numericalGradient(138 (await toCPU(a)) as CPUTensor<'float32'>,139 (a: CPUTensor<'float32'>) => a.sin() as CPUTensor<'float32'>140 )141 );142 expect(await v.grad?.compare(numericalGrad, epsilon)).toBeTrue();143 });144 it('should work with cos', async () => {145 if (backend.wait !== undefined) {146 await backend.wait;147 }148 const a = backend.constructor([2, 2], [-2, -1, 0.5, 1]);149 const ones = backend.constructor([2, 2], [1, 1, 1, 1]);150 const v = new Variable(a);151 const res = v.cos() as Variable<'float32'>;152 res.backward(ones);153 const numericalGrad = await backend.toBackend(154 numericalGradient(155 (await toCPU(a)) as CPUTensor<'float32'>,156 (a: CPUTensor<'float32'>) => a.cos() as CPUTensor<'float32'>157 )158 );159 expect(await v.grad?.compare(numericalGrad, epsilon)).toBeTrue();160 });161 it('should work with tan', async () => {162 if (backend.wait !== undefined) {163 await backend.wait;164 }165 const a = backend.constructor([2, 2], [-0.7, -0.3, 0.5, 0.7]);166 const ones = backend.constructor([2, 2], [1, 1, 1, 1]);167 const v = new Variable(a);168 const res = v.tan() as Variable<'float32'>;169 res.backward(ones);170 const numericalGrad = await backend.toBackend(171 numericalGradient(172 (await toCPU(a)) as CPUTensor<'float32'>,173 (a: CPUTensor<'float32'>) => a.tan() as CPUTensor<'float32'>174 )175 );176 expect(await v.grad?.compare(numericalGrad, epsilon)).toBeTrue();177 });178 it('should work with asin', async () => {179 if (backend.wait !== undefined) {180 await backend.wait;181 }182 const a = backend.constructor([2, 2], [-0.5, -0.1, 0.2, 0.7]);183 const ones = backend.constructor([2, 2], [1, 1, 1, 1]);184 const v = new Variable(a);185 const res = v.asin() as Variable<'float32'>;186 res.backward(ones);187 const numericalGrad = await backend.toBackend(188 numericalGradient(189 (await toCPU(a)) as CPUTensor<'float32'>,190 (a: CPUTensor<'float32'>) => a.asin() as CPUTensor<'float32'>191 )192 );193 expect(await v.grad?.compare(numericalGrad, epsilon)).toBeTrue();194 });195 it('should work with acos', async () => {196 if (backend.wait !== undefined) {197 await backend.wait;198 }199 const a = backend.constructor([2, 2], [-0.5, -0.1, 0.2, 0.7]);200 const ones = backend.constructor([2, 2], [1, 1, 1, 1]);201 const v = new Variable(a);202 const res = v.acos() as Variable<'float32'>;203 res.backward(ones);204 const numericalGrad = await backend.toBackend(205 numericalGradient(206 (await toCPU(a)) as CPUTensor<'float32'>,207 (a: CPUTensor<'float32'>) => a.acos() as CPUTensor<'float32'>208 )209 );210 expect(await v.grad?.compare(numericalGrad, epsilon)).toBeTrue();211 });212 it('should work with atan', async () => {213 if (backend.wait !== undefined) {214 await backend.wait;215 }216 const a = backend.constructor([2, 2], [-0.7, -0.3, 0.5, 0.7]);217 const ones = backend.constructor([2, 2], [1, 1, 1, 1]);218 const v = new Variable(a);219 const res = v.atan() as Variable<'float32'>;220 res.backward(ones);221 const numericalGrad = await backend.toBackend(222 numericalGradient(223 (await toCPU(a)) as CPUTensor<'float32'>,224 (a: CPUTensor<'float32'>) => a.atan() as CPUTensor<'float32'>225 )226 );227 expect(await v.grad?.compare(numericalGrad, epsilon)).toBeTrue();228 });229 it('should work with sinh', async () => {230 if (backend.wait !== undefined) {231 await backend.wait;232 }233 const a = backend.constructor([2, 2], [-0.5, -0.1, 0.2, 0.7]);234 const ones = backend.constructor([2, 2], [1, 1, 1, 1]);235 const v = new Variable(a);236 const res = v.sinh() as Variable<'float32'>;237 res.backward(ones);238 const numericalGrad = await backend.toBackend(239 numericalGradient(240 (await toCPU(a)) as CPUTensor<'float32'>,241 (a: CPUTensor<'float32'>) => a.sinh() as CPUTensor<'float32'>242 )243 );244 expect(await v.grad?.compare(numericalGrad, epsilon)).toBeTrue();245 });246 it('should work with cosh', async () => {247 if (backend.wait !== undefined) {248 await backend.wait;249 }250 const a = backend.constructor([2, 2], [-0.5, -0.1, 0.2, 0.7]);251 const ones = backend.constructor([2, 2], [1, 1, 1, 1]);252 const v = new Variable(a);253 const res = v.cosh() as Variable<'float32'>;254 res.backward(ones);255 const numericalGrad = await backend.toBackend(256 numericalGradient(257 (await toCPU(a)) as CPUTensor<'float32'>,258 (a: CPUTensor<'float32'>) => a.cosh() as CPUTensor<'float32'>259 )260 );261 expect(await v.grad?.compare(numericalGrad, epsilon)).toBeTrue();262 });263 it('should work with tanh', async () => {264 if (backend.wait !== undefined) {265 await backend.wait;266 }267 const a = backend.constructor([2, 2], [-0.7, -0.3, 0.5, 0.7]);268 const ones = backend.constructor([2, 2], [1, 1, 1, 1]);269 const v = new Variable(a);270 const res = v.tanh() as Variable<'float32'>;271 res.backward(ones);272 const numericalGrad = await backend.toBackend(273 numericalGradient(274 (await toCPU(a)) as CPUTensor<'float32'>,275 (a: CPUTensor<'float32'>) => a.tanh() as CPUTensor<'float32'>276 )277 );278 expect(await v.grad?.compare(numericalGrad, epsilon)).toBeTrue();279 });280 if (backend.name !== 'GPU') {281 it('should work with asinh', async () => {282 if (backend.wait !== undefined) {283 await backend.wait;284 }285 const a = backend.constructor([2, 2], [-0.5, -0.1, 0.2, 0.7]);286 const ones = backend.constructor([2, 2], [1, 1, 1, 1]);287 const v = new Variable(a);288 const res = v.asinh() as Variable<'float32'>;289 res.backward(ones);290 const numericalGrad = await backend.toBackend(291 numericalGradient(292 (await toCPU(a)) as CPUTensor<'float32'>,293 (a: CPUTensor<'float32'>) => a.asinh() as CPUTensor<'float32'>294 )295 );296 expect(await v.grad?.compare(numericalGrad, epsilon)).toBeTrue();297 });298 it('should work with acosh', async () => {299 if (backend.wait !== undefined) {300 await backend.wait;301 }302 const a = backend.constructor([2, 2], [2.0, 2.2, 3.1, 4.5]);303 const ones = backend.constructor([2, 2], [1, 1, 1, 1]);304 const v = new Variable(a);305 const res = v.acosh() as Variable<'float32'>;306 res.backward(ones);307 const numericalGrad = await backend.toBackend(308 numericalGradient(309 (await toCPU(a)) as CPUTensor<'float32'>,310 (a: CPUTensor<'float32'>) => a.acosh() as CPUTensor<'float32'>311 )312 );313 expect(await v.grad?.compare(numericalGrad, epsilon)).toBeTrue();314 });315 it('should work with atanh', async () => {316 if (backend.wait !== undefined) {317 await backend.wait;318 }319 const a = backend.constructor([2, 2], [-0.7, -0.3, 0.5, 0.7]);320 const ones = backend.constructor([2, 2], [1, 1, 1, 1]);321 const v = new Variable(a);322 const res = v.atanh() as Variable<'float32'>;323 res.backward(ones);324 const numericalGrad = await backend.toBackend(325 numericalGradient(326 (await toCPU(a)) as CPUTensor<'float32'>,327 (a: CPUTensor<'float32'>) => a.atanh() as CPUTensor<'float32'>328 )329 );330 expect(await v.grad?.compare(numericalGrad, epsilon)).toBeTrue();331 });332 }333 it('should work with negate', async () => {334 if (backend.wait !== undefined) {335 await backend.wait;336 }337 const a = backend.constructor([2, 2], [-2, -1, 0.5, 1]);338 const ones = backend.constructor([2, 2], [1, 1, 1, 1]);339 const v = new Variable(a);340 const res = v.negate() as Variable<'float32'>;341 res.backward(ones);342 const numericalGrad = await backend.toBackend(343 numericalGradient(344 (await toCPU(a)) as CPUTensor<'float32'>,345 (a: CPUTensor<'float32'>) => a.negate() as CPUTensor<'float32'>346 )347 );348 expect(await v.grad?.compare(numericalGrad, epsilon)).toBeTrue();349 });350 it('should work with add multiply scalar', async () => {351 if (backend.wait !== undefined) {352 await backend.wait;353 }354 const a = backend.constructor([2, 2], [-2, -1, 0.5, 1]);355 const ones = backend.constructor([2, 2], [1, 1, 1, 1]);356 const v = new Variable(a);357 const res = v.addMultiplyScalar(2.0, 5.0) as Variable<'float32'>;358 res.backward(ones);359 const numericalGrad = await backend.toBackend(360 numericalGradient(361 (await toCPU(a)) as CPUTensor<'float32'>,362 (a: CPUTensor<'float32'>) =>363 a.addMultiplyScalar(2.0, 5.0) as CPUTensor<'float32'>364 )365 );366 expect(await v.grad?.compare(numericalGrad, epsilon)).toBeTrue();367 });368 if (backend.name !== 'GPU') {369 it('should work with power scalar', async () => {370 if (backend.wait !== undefined) {371 await backend.wait;372 }373 const a = backend.constructor([2, 2], [-2, -1, 0.5, 1]);374 const ones = backend.constructor([2, 2], [1, 1, 1, 1]);375 const v = new Variable(a);376 const res = v.powerScalar(2.0, 3.0) as Variable<'float32'>;377 res.backward(ones);378 const numericalGrad = await backend.toBackend(379 numericalGradient(380 (await toCPU(a)) as CPUTensor<'float32'>,381 (a: CPUTensor<'float32'>) =>382 a.powerScalar(2.0, 3.0) as CPUTensor<'float32'>383 )384 );385 expect(await v.grad?.compare(numericalGrad, 0.5)).toBeTrue();386 });387 }388 it('should work with matmul', async () => {389 if (backend.wait !== undefined) {390 await backend.wait;391 }392 const a = new CPUTensor([2, 3], [1, 2, 3, 4, 5, 6], 'float32');393 const b = new CPUTensor(394 [3, 4],395 [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],396 'float32'397 );398 const ones = backend.constructor([2, 4], [1, 1, 1, 1, 1, 1, 1, 1]);399 const aBackend = await backend.toBackend(a);400 const bBackend = await backend.toBackend(b);401 const vA = new Variable(aBackend);402 const vB = new Variable(bBackend);403 const res = vA.matMul(vB) as Variable<'float32'>;404 res.backward(ones);405 const numericalGradA = numericalGradient(406 a,407 (a: CPUTensor<'float32'>) => a.matMul(b) as CPUTensor<'float32'>408 );409 const numericalGradB = numericalGradient(410 b,411 (b: CPUTensor<'float32'>) => a.matMul(b) as CPUTensor<'float32'>412 );413 expect(await vA.grad?.compare(numericalGradA, 1)).toBeTrue();414 expect(await vB.grad?.compare(numericalGradB, 1)).toBeTrue();415 });416 it('should work with concat', async () => {417 if (backend.wait !== undefined) {418 await backend.wait;419 }420 const a = new CPUTensor([2, 3], [1, 2, 3, 4, 5, 6], 'float32');421 const b = new CPUTensor([2, 4], [1, 2, 3, 4, 5, 6, 7, 8], 'float32');422 const ones = backend.constructor(423 [2, 7],424 [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]425 );426 const aBackend = await backend.toBackend(a);427 const bBackend = await backend.toBackend(b);428 const vA = new Variable(aBackend);429 const vB = new Variable(bBackend);430 const res = vA.concat(vB, 1) as Variable<'float32'>;431 res.backward(ones);432 const numericalGradA = numericalGradient(433 a,434 (a: CPUTensor<'float32'>) => a.concat(b, 1) as CPUTensor<'float32'>435 );436 const numericalGradB = numericalGradient(437 b,438 (b: CPUTensor<'float32'>) => a.concat(b, 1) as CPUTensor<'float32'>439 );440 expect(await vA.grad?.compare(numericalGradA, epsilon)).toBeTrue();441 expect(await vB.grad?.compare(numericalGradB, epsilon)).toBeTrue();442 });443 it('should work with clip', async () => {444 if (backend.wait !== undefined) {445 await backend.wait;446 }447 const a = backend.constructor([2, 3], [-2, -1, 0.5, 1, 5.5, 7]);448 const ones = backend.constructor([2, 3], [1, 1, 1, 1, 1, 1]);449 const v = new Variable(a);450 const res = v.clip(0, 6) as Variable<'float32'>;451 res.backward(ones);452 const numericalGrad = await backend.toBackend(453 numericalGradient(454 (await toCPU(a)) as CPUTensor<'float32'>,455 (a: CPUTensor<'float32'>) => a.clip(0, 6) as CPUTensor<'float32'>456 )457 );458 expect(await v.grad?.compare(numericalGrad, epsilon)).toBeTrue();459 });460 it('should work with repeat', async () => {461 if (backend.wait !== undefined) {462 await backend.wait;463 }464 const a = backend.constructor([2, 2], [1, 2, 3, 4]);465 const ones = backend.constructor([6, 4], new Array(24).fill(1));466 const v = new Variable(a);467 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;468 const res = v.repeat([3, 2]) as Variable<'float32'>;469 res.backward(ones);470 const numericalGrad = await backend.toBackend(471 numericalGradient(472 aCPU,473 (a: CPUTensor<'float32'>) => a.repeat([3, 2]) as CPUTensor<'float32'>474 )475 );476 expect(await v.grad?.compare(numericalGrad, 0.1)).toBeTrue();477 });478 it('should work with expand', async () => {479 if (backend.wait !== undefined) {480 await backend.wait;481 }482 const a = backend.constructor([2, 2], [1, 2, 3, 4]);483 const ones = backend.constructor([3, 2, 2], new Array(12).fill(1));484 const v = new Variable(a);485 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;486 const res = v.expand([3, 2, 2]) as Variable<'float32'>;487 res.backward(ones);488 const numericalGrad = await backend.toBackend(489 numericalGradient(490 aCPU,491 (a: CPUTensor<'float32'>) =>492 a.expand([3, 2, 2]) as CPUTensor<'float32'>493 )494 );495 expect(await v.grad?.compare(numericalGrad, 0.1)).toBeTrue();496 });497 it('should work with add', async () => {498 if (backend.wait !== undefined) {499 await backend.wait;500 }501 const a = backend.constructor([2, 2], [1, 2, 3, 4]);502 const b = backend.constructor([2, 2], [5, 6, 7, 8]);503 const ones = backend.constructor([2, 2], new Array(4).fill(1));504 const vA = new Variable(a);505 const vB = new Variable(b);506 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;507 const bCPU = (await toCPU(b)) as CPUTensor<'float32'>;508 const res = vA.add(vB) as Variable<'float32'>;509 res.backward(ones);510 const numericalGradA = await backend.toBackend(511 numericalGradient(512 aCPU,513 (a: CPUTensor<'float32'>) => a.add(bCPU) as CPUTensor<'float32'>514 )515 );516 const numericalGradB = await backend.toBackend(517 numericalGradient(518 bCPU,519 (b: CPUTensor<'float32'>) => aCPU.add(b) as CPUTensor<'float32'>520 )521 );522 expect(await vA.grad?.compare(numericalGradA, epsilon)).toBeTrue();523 expect(await vB.grad?.compare(numericalGradB, epsilon)).toBeTrue();524 });525 it('should work with broadcasted add', async () => {526 if (backend.wait !== undefined) {527 await backend.wait;528 }529 const a = backend.constructor([2, 2], [1, 2, 3, 4]);530 const b = backend.constructor([2], [5, 6]);531 const ones = backend.constructor([2, 2], new Array(4).fill(1));532 const vA = new Variable(a);533 const vB = new Variable(b);534 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;535 const bCPU = (await toCPU(b)) as CPUTensor<'float32'>;536 const res = vA.add(vB) as Variable<'float32'>;537 res.backward(ones);538 const numericalGradA = await backend.toBackend(539 numericalGradient(540 aCPU,541 (a: CPUTensor<'float32'>) => a.add(bCPU) as CPUTensor<'float32'>542 )543 );544 const numericalGradB = await backend.toBackend(545 numericalGradient(546 bCPU,547 (b: CPUTensor<'float32'>) => aCPU.add(b) as CPUTensor<'float32'>548 )549 );550 expect(await vA.grad?.compare(numericalGradA, 0.05)).toBeTrue();551 expect(await vB.grad?.compare(numericalGradB, 0.05)).toBeTrue();552 });553 it('should work with subtract', async () => {554 if (backend.wait !== undefined) {555 await backend.wait;556 }557 const a = backend.constructor([2, 2], [1, 2, 3, 4]);558 const b = backend.constructor([2, 2], [5, 6, 7, 8]);559 const ones = backend.constructor([2, 2], new Array(4).fill(1));560 const vA = new Variable(a);561 const vB = new Variable(b);562 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;563 const bCPU = (await toCPU(b)) as CPUTensor<'float32'>;564 const res = vA.subtract(vB) as Variable<'float32'>;565 res.backward(ones);566 const numericalGradA = await backend.toBackend(567 numericalGradient(568 aCPU,569 (a: CPUTensor<'float32'>) => a.subtract(bCPU) as CPUTensor<'float32'>570 )571 );572 const numericalGradB = await backend.toBackend(573 numericalGradient(574 bCPU,575 (b: CPUTensor<'float32'>) => aCPU.subtract(b) as CPUTensor<'float32'>576 )577 );578 expect(await vA.grad?.compare(numericalGradA, epsilon)).toBeTrue();579 expect(await vB.grad?.compare(numericalGradB, epsilon)).toBeTrue();580 });581 it('should work with broadcasted subtract', async () => {582 if (backend.wait !== undefined) {583 await backend.wait;584 }585 const a = backend.constructor([2, 2], [1, 2, 3, 4]);586 const b = backend.constructor([2], [5, 6]);587 const ones = backend.constructor([2, 2], new Array(4).fill(1));588 const vA = new Variable(a);589 const vB = new Variable(b);590 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;591 const bCPU = (await toCPU(b)) as CPUTensor<'float32'>;592 const res = vA.subtract(vB) as Variable<'float32'>;593 res.backward(ones);594 const numericalGradA = await backend.toBackend(595 numericalGradient(596 aCPU,597 (a: CPUTensor<'float32'>) => a.subtract(bCPU) as CPUTensor<'float32'>598 )599 );600 const numericalGradB = await backend.toBackend(601 numericalGradient(602 bCPU,603 (b: CPUTensor<'float32'>) => aCPU.subtract(b) as CPUTensor<'float32'>604 )605 );606 expect(await vA.grad?.compare(numericalGradA, 0.05)).toBeTrue();607 expect(await vB.grad?.compare(numericalGradB, 0.05)).toBeTrue();608 });609 it('should work with multiply', async () => {610 if (backend.wait !== undefined) {611 await backend.wait;612 }613 const a = backend.constructor([2, 2], [1, 2, 3, 4]);614 const b = backend.constructor([2, 2], [5, 6, 7, 8]);615 const ones = backend.constructor([2, 2], new Array(4).fill(1));616 const vA = new Variable(a);617 const vB = new Variable(b);618 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;619 const bCPU = (await toCPU(b)) as CPUTensor<'float32'>;620 const res = vA.multiply(vB) as Variable<'float32'>;621 res.backward(ones);622 const numericalGradA = await backend.toBackend(623 numericalGradient(624 aCPU,625 (a: CPUTensor<'float32'>) => a.multiply(bCPU) as CPUTensor<'float32'>626 )627 );628 const numericalGradB = await backend.toBackend(629 numericalGradient(630 bCPU,631 (b: CPUTensor<'float32'>) => aCPU.multiply(b) as CPUTensor<'float32'>632 )633 );634 expect(await vA.grad?.compare(numericalGradA, 0.1)).toBeTrue();635 expect(await vB.grad?.compare(numericalGradB, 0.1)).toBeTrue();636 });637 it('should work with broadcasted multiply', async () => {638 if (backend.wait !== undefined) {639 await backend.wait;640 }641 const a = backend.constructor([2, 2], [1, 2, 3, 4]);642 const b = backend.constructor([2], [5, 6]);643 const ones = backend.constructor([2, 2], new Array(4).fill(1));644 const vA = new Variable(a);645 const vB = new Variable(b);646 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;647 const bCPU = (await toCPU(b)) as CPUTensor<'float32'>;648 const res = vA.multiply(vB) as Variable<'float32'>;649 res.backward(ones);650 const numericalGradA = await backend.toBackend(651 numericalGradient(652 aCPU,653 (a: CPUTensor<'float32'>) => a.multiply(bCPU) as CPUTensor<'float32'>654 )655 );656 const numericalGradB = await backend.toBackend(657 numericalGradient(658 bCPU,659 (b: CPUTensor<'float32'>) => aCPU.multiply(b) as CPUTensor<'float32'>660 )661 );662 expect(await vA.grad?.compare(numericalGradA, 0.05)).toBeTrue();663 expect(await vB.grad?.compare(numericalGradB, 0.05)).toBeTrue();664 });665 it('should work with divide', async () => {666 if (backend.wait !== undefined) {667 await backend.wait;668 }669 const a = backend.constructor([2, 2], [1, 2, 3, 4]);670 const b = backend.constructor([2, 2], [5, 6, 7, 8]);671 const ones = backend.constructor([2, 2], new Array(4).fill(1));672 const vA = new Variable(a);673 const vB = new Variable(b);674 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;675 const bCPU = (await toCPU(b)) as CPUTensor<'float32'>;676 const res = vA.divide(vB) as Variable<'float32'>;677 res.backward(ones);678 const numericalGradA = await backend.toBackend(679 numericalGradient(680 aCPU,681 (a: CPUTensor<'float32'>) => a.divide(bCPU) as CPUTensor<'float32'>682 )683 );684 const numericalGradB = await backend.toBackend(685 numericalGradient(686 bCPU,687 (b: CPUTensor<'float32'>) => aCPU.divide(b) as CPUTensor<'float32'>688 )689 );690 expect(await vA.grad?.compare(numericalGradA, 0.05)).toBeTrue();691 expect(await vB.grad?.compare(numericalGradB, 0.05)).toBeTrue();692 });693 it('should work with broadcasted divide', async () => {694 if (backend.wait !== undefined) {695 await backend.wait;696 }697 const a = backend.constructor([2, 2], [1, 2, 3, 4]);698 const b = backend.constructor([2], [5, 6]);699 const ones = backend.constructor([2, 2], new Array(4).fill(1));700 const vA = new Variable(a);701 const vB = new Variable(b);702 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;703 const bCPU = (await toCPU(b)) as CPUTensor<'float32'>;704 const res = vA.divide(vB) as Variable<'float32'>;705 res.backward(ones);706 const numericalGradA = await backend.toBackend(707 numericalGradient(708 aCPU,709 (a: CPUTensor<'float32'>) => a.divide(bCPU) as CPUTensor<'float32'>710 )711 );712 const numericalGradB = await backend.toBackend(713 numericalGradient(714 bCPU,715 (b: CPUTensor<'float32'>) => aCPU.divide(b) as CPUTensor<'float32'>716 )717 );718 expect(await vA.grad?.compare(numericalGradA, 0.05)).toBeTrue();719 expect(await vB.grad?.compare(numericalGradB, 0.05)).toBeTrue();720 });721 it('should work with power', async () => {722 if (backend.wait !== undefined) {723 await backend.wait;724 }725 const a = backend.constructor([2, 2], [1, 2, 3, 4]);726 const b = backend.constructor([2, 2], [1.1, 2.2, 3.3, 2.5]);727 const ones = backend.constructor([2, 2], new Array(4).fill(1));728 const vA = new Variable(a);729 const vB = new Variable(b);730 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;731 const bCPU = (await toCPU(b)) as CPUTensor<'float32'>;732 const res = vA.power(vB) as Variable<'float32'>;733 res.backward(ones);734 const numericalGradA = await backend.toBackend(735 numericalGradient(736 aCPU,737 (a: CPUTensor<'float32'>) => a.power(bCPU) as CPUTensor<'float32'>738 )739 );740 const numericalGradB = await backend.toBackend(741 numericalGradient(742 bCPU,743 (b: CPUTensor<'float32'>) => aCPU.power(b) as CPUTensor<'float32'>744 )745 );746 expect(await vA.grad?.compare(numericalGradA, 0.5)).toBeTrue();747 expect(await vB.grad?.compare(numericalGradB, 0.5)).toBeTrue();748 });749 it('should work with broadcasted power', async () => {750 if (backend.wait !== undefined) {751 await backend.wait;752 }753 const a = backend.constructor([2, 2], [1, 2, 3, 4]);754 const b = backend.constructor([2], [1.5, 2.5]);755 const ones = backend.constructor([2, 2], new Array(4).fill(1));756 const vA = new Variable(a);757 const vB = new Variable(b);758 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;759 const bCPU = (await toCPU(b)) as CPUTensor<'float32'>;760 const res = vA.power(vB) as Variable<'float32'>;761 res.backward(ones);762 const numericalGradA = await backend.toBackend(763 numericalGradient(764 aCPU,765 (a: CPUTensor<'float32'>) => a.power(bCPU) as CPUTensor<'float32'>766 )767 );768 const numericalGradB = await backend.toBackend(769 numericalGradient(770 bCPU,771 (b: CPUTensor<'float32'>) => aCPU.power(b) as CPUTensor<'float32'>772 )773 );774 expect(await vA.grad?.compare(numericalGradA, 0.05)).toBeTrue();775 expect(await vB.grad?.compare(numericalGradB, 0.05)).toBeTrue();776 });777 it('should work with convolution', async () => {778 if (backend.wait !== undefined) {779 await backend.wait;780 }781 const x = backend.constructor([1, 1, 3, 3], [1, 2, 3, 4, 5, 6, 7, 8, 9]);782 const w = backend.constructor([1, 1, 2, 2], [1, 2, 3, 4]);783 const b = backend.constructor([1], [5]);784 const ones = backend.constructor([1, 1, 2, 2], new Array(4).fill(1));785 const vX = new Variable(x);786 const vW = new Variable(w);787 const vB = new Variable(b);788 const xCPU = (await toCPU(x)) as CPUTensor<'float32'>;789 const wCPU = (await toCPU(w)) as CPUTensor<'float32'>;790 const bCPU = (await toCPU(b)) as CPUTensor<'float32'>;791 const res = vX.conv(vW, vB) as Variable<'float32'>;792 res.backward(ones);793 const numericalGradX = await backend.toBackend(794 numericalGradient(795 xCPU,796 (x: CPUTensor<'float32'>) =>797 x.conv(wCPU, bCPU) as CPUTensor<'float32'>798 )799 );800 const numericalGradW = await backend.toBackend(801 numericalGradient(802 wCPU,803 (w: CPUTensor<'float32'>) =>804 xCPU.conv(w, bCPU) as CPUTensor<'float32'>805 )806 );807 const numericalGradB = await backend.toBackend(808 numericalGradient(809 bCPU,810 (b: CPUTensor<'float32'>) =>811 xCPU.conv(wCPU, b) as CPUTensor<'float32'>812 )813 );814 expect(await vX.grad?.compare(numericalGradX, 0.5)).toBeTrue();815 expect(await vW.grad?.compare(numericalGradW, 0.5)).toBeTrue();816 expect(await vB.grad?.compare(numericalGradB, 0.5)).toBeTrue();817 });818 it('should work with strided convolution', async () => {819 if (backend.wait !== undefined) {820 await backend.wait;821 }822 const x = backend.constructor(823 [1, 1, 4, 4],824 [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]825 );826 const w = backend.constructor([1, 1, 2, 2], [1, 2, 3, 4]);827 const b = backend.constructor([1], [5]);828 const ones = backend.constructor([1, 1, 2, 2], new Array(4).fill(1));829 const vX = new Variable(x);830 const vW = new Variable(w);831 const vB = new Variable(b);832 const xCPU = (await toCPU(x)) as CPUTensor<'float32'>;833 const wCPU = (await toCPU(w)) as CPUTensor<'float32'>;834 const bCPU = (await toCPU(b)) as CPUTensor<'float32'>;835 const res = vX.conv(vW, vB, undefined, undefined, undefined, [836 2,837 2,838 ]) as Variable<'float32'>;839 res.backward(ones);840 const numericalGradX = await backend.toBackend(841 numericalGradient(842 xCPU,843 (x: CPUTensor<'float32'>) =>844 x.conv(wCPU, bCPU, undefined, undefined, undefined, [845 2,846 2,847 ]) as CPUTensor<'float32'>848 )849 );850 const numericalGradW = await backend.toBackend(851 numericalGradient(852 wCPU,853 (w: CPUTensor<'float32'>) =>854 xCPU.conv(w, bCPU, undefined, undefined, undefined, [855 2,856 2,857 ]) as CPUTensor<'float32'>858 )859 );860 const numericalGradB = await backend.toBackend(861 numericalGradient(862 bCPU,863 (b: CPUTensor<'float32'>) =>864 xCPU.conv(wCPU, b, undefined, undefined, undefined, [865 2,866 2,867 ]) as CPUTensor<'float32'>868 )869 );870 expect(await vX.grad?.compare(numericalGradX, 0.5)).toBeTrue();871 expect(await vW.grad?.compare(numericalGradW, 0.5)).toBeTrue();872 expect(await vB.grad?.compare(numericalGradB, 0.5)).toBeTrue();873 });874 it('should work with dilated convolution', async () => {875 if (backend.wait !== undefined) {876 await backend.wait;877 }878 const x = backend.constructor(879 [1, 1, 4, 4],880 [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]881 );882 const w = backend.constructor([1, 1, 2, 2], [1, 2, 3, 4]);883 const b = backend.constructor([1], [5]);884 const ones = backend.constructor([1, 1, 2, 2], new Array(4).fill(1));885 const vX = new Variable(x);886 const vW = new Variable(w);887 const vB = new Variable(b);888 const xCPU = (await toCPU(x)) as CPUTensor<'float32'>;889 const wCPU = (await toCPU(w)) as CPUTensor<'float32'>;890 const bCPU = (await toCPU(b)) as CPUTensor<'float32'>;891 const res = vX.conv(vW, vB, [2, 2]) as Variable<'float32'>;892 res.backward(ones);893 const numericalGradX = await backend.toBackend(894 numericalGradient(895 xCPU,896 (x: CPUTensor<'float32'>) =>897 x.conv(wCPU, bCPU, [2, 2]) as CPUTensor<'float32'>898 )899 );900 const numericalGradW = await backend.toBackend(901 numericalGradient(902 wCPU,903 (w: CPUTensor<'float32'>) =>904 xCPU.conv(w, bCPU, [2, 2]) as CPUTensor<'float32'>905 )906 );907 const numericalGradB = await backend.toBackend(908 numericalGradient(909 bCPU,910 (b: CPUTensor<'float32'>) =>911 xCPU.conv(wCPU, b, [2, 2]) as CPUTensor<'float32'>912 )913 );914 expect(await vX.grad?.compare(numericalGradX, 0.5)).toBeTrue();915 expect(await vW.grad?.compare(numericalGradW, 0.5)).toBeTrue();916 expect(await vB.grad?.compare(numericalGradB, 0.5)).toBeTrue();917 });918 it('should work with padded convolution', async () => {919 if (backend.wait !== undefined) {920 await backend.wait;921 }922 const x = backend.constructor([1, 1, 3, 3], [1, 2, 3, 4, 5, 6, 7, 8, 9]);923 const w = backend.constructor([1, 1, 2, 2], [1, 2, 3, 4]);924 const b = backend.constructor([1], [5]);925 const ones = backend.constructor([1, 1, 4, 4], new Array(16).fill(1));926 const vX = new Variable(x);927 const vW = new Variable(w);928 const vB = new Variable(b);929 const xCPU = (await toCPU(x)) as CPUTensor<'float32'>;930 const wCPU = (await toCPU(w)) as CPUTensor<'float32'>;931 const bCPU = (await toCPU(b)) as CPUTensor<'float32'>;932 const res = vX.conv(vW, vB, undefined, undefined, [933 1,934 1,935 1,936 1,937 ]) as Variable<'float32'>;938 res.backward(ones);939 const numericalGradX = await backend.toBackend(940 numericalGradient(941 xCPU,942 (x: CPUTensor<'float32'>) =>943 x.conv(wCPU, bCPU, undefined, undefined, [944 1,945 1,946 1,947 1,948 ]) as CPUTensor<'float32'>949 )950 );951 const numericalGradW = await backend.toBackend(952 numericalGradient(953 wCPU,954 (w: CPUTensor<'float32'>) =>955 xCPU.conv(w, bCPU, undefined, undefined, [956 1,957 1,958 1,959 1,960 ]) as CPUTensor<'float32'>961 )962 );963 const numericalGradB = await backend.toBackend(964 numericalGradient(965 bCPU,966 (b: CPUTensor<'float32'>) =>967 xCPU.conv(wCPU, b, undefined, undefined, [968 1,969 1,970 1,971 1,972 ]) as CPUTensor<'float32'>973 )974 );975 expect(await vX.grad?.compare(numericalGradX, 0.5)).toBeTrue();976 expect(await vW.grad?.compare(numericalGradW, 0.8)).toBeTrue();977 expect(await vB.grad?.compare(numericalGradB, 0.5)).toBeTrue();978 });979 it('should work with padded strided dilated convolution', async () => {980 if (backend.wait !== undefined) {981 await backend.wait;982 }983 const x = backend.constructor([1, 1, 3, 3], [1, 2, 3, 4, 5, 6, 7, 8, 9]);984 const w = backend.constructor([1, 1, 2, 2], [1, 2, 3, 4]);985 const b = backend.constructor([1], [5]);986 const ones = backend.constructor([1, 1, 2, 2], new Array(4).fill(1));987 const vX = new Variable(x);988 const vW = new Variable(w);989 const vB = new Variable(b);990 const xCPU = (await toCPU(x)) as CPUTensor<'float32'>;991 const wCPU = (await toCPU(w)) as CPUTensor<'float32'>;992 const bCPU = (await toCPU(b)) as CPUTensor<'float32'>;993 const dil = [2, 2];994 const pads = [1, 1, 1, 1];995 const strd = [2, 2];996 const res = vX.conv(997 vW,998 vB,999 dil,1000 undefined,1001 pads,1002 strd1003 ) as Variable<'float32'>;1004 res.backward(ones);1005 const numericalGradX = await backend.toBackend(1006 numericalGradient(1007 xCPU,1008 (x: CPUTensor<'float32'>) =>1009 x.conv(1010 wCPU,1011 bCPU,1012 dil,1013 undefined,1014 pads,1015 strd1016 ) as CPUTensor<'float32'>1017 )1018 );1019 const numericalGradW = await backend.toBackend(1020 numericalGradient(1021 wCPU,1022 (w: CPUTensor<'float32'>) =>1023 xCPU.conv(1024 w,1025 bCPU,1026 dil,1027 undefined,1028 pads,1029 strd1030 ) as CPUTensor<'float32'>1031 )1032 );1033 const numericalGradB = await backend.toBackend(1034 numericalGradient(1035 bCPU,1036 (b: CPUTensor<'float32'>) =>1037 xCPU.conv(1038 wCPU,1039 b,1040 dil,1041 undefined,1042 pads,1043 strd1044 ) as CPUTensor<'float32'>1045 )1046 );1047 expect(await vX.grad?.compare(numericalGradX, 0.5)).toBeTrue();1048 expect(await vW.grad?.compare(numericalGradW, 0.5)).toBeTrue();1049 expect(await vB.grad?.compare(numericalGradB, 0.5)).toBeTrue();1050 });1051 it('should work gemm', async () => {1052 if (backend.wait !== undefined) {1053 await backend.wait;1054 }1055 const a = backend.constructor([2, 2, 2], [1, 2, 3, 4, 5, 6, 7, 8]);1056 const b = backend.constructor(1057 [2, 2, 3],1058 [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4]1059 );1060 const c = backend.constructor([3], [5, 6, 7]);1061 const ones = backend.constructor([2, 2, 3], new Array(12).fill(1));1062 const vA = new Variable(a);1063 const vB = new Variable(b);1064 const vC = new Variable(c);1065 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;1066 const bCPU = (await toCPU(b)) as CPUTensor<'float32'>;1067 const cCPU = (await toCPU(c)) as CPUTensor<'float32'>;1068 const res = vA.gemm(vB, false, false, 1, vC, 1) as Variable<'float32'>;1069 res.backward(ones);1070 const numericalGradA = await backend.toBackend(1071 numericalGradient(1072 aCPU,1073 (a: CPUTensor<'float32'>) =>1074 a.gemm(bCPU, false, false, 1, cCPU, 1) as CPUTensor<'float32'>1075 )1076 );1077 const numericalGradB = await backend.toBackend(1078 numericalGradient(1079 bCPU,1080 (b: CPUTensor<'float32'>) =>1081 aCPU.gemm(b, false, false, 1, cCPU, 1) as CPUTensor<'float32'>1082 )1083 );1084 const numericalGradC = await backend.toBackend(1085 numericalGradient(1086 cCPU,1087 (c: CPUTensor<'float32'>) =>1088 aCPU.gemm(bCPU, false, false, 1, c, 1) as CPUTensor<'float32'>1089 )1090 );1091 expect(await vA.grad?.compare(numericalGradA, 0.5)).toBeTrue();1092 expect(await vB.grad?.compare(numericalGradB, 0.5)).toBeTrue();1093 expect(await vC.grad?.compare(numericalGradC, 0.1)).toBeTrue();1094 });1095 it('should work gemm a transposed', async () => {1096 if (backend.wait !== undefined) {1097 await backend.wait;1098 }1099 const a = backend.constructor([2, 2, 2], [1, 2, 3, 4, 5, 6, 7, 8]);1100 const b = backend.constructor(1101 [2, 2, 3],1102 [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4]1103 );1104 const c = backend.constructor([3], [5, 6, 7]);1105 const ones = backend.constructor([2, 2, 3], new Array(12).fill(1));1106 const vA = new Variable(a);1107 const vB = new Variable(b);1108 const vC = new Variable(c);1109 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;1110 const bCPU = (await toCPU(b)) as CPUTensor<'float32'>;1111 const cCPU = (await toCPU(c)) as CPUTensor<'float32'>;1112 const res = vA.gemm(vB, true, false, 1, vC, 1) as Variable<'float32'>;1113 res.backward(ones);1114 const numericalGradA = await backend.toBackend(1115 numericalGradient(1116 aCPU,1117 (a: CPUTensor<'float32'>) =>1118 a.gemm(bCPU, true, false, 1, cCPU, 1) as CPUTensor<'float32'>1119 )1120 );1121 const numericalGradB = await backend.toBackend(1122 numericalGradient(1123 bCPU,1124 (b: CPUTensor<'float32'>) =>1125 aCPU.gemm(b, true, false, 1, cCPU, 1) as CPUTensor<'float32'>1126 )1127 );1128 const numericalGradC = await backend.toBackend(1129 numericalGradient(1130 cCPU,1131 (c: CPUTensor<'float32'>) =>1132 aCPU.gemm(bCPU, true, false, 1, c, 1) as CPUTensor<'float32'>1133 )1134 );1135 expect(await vA.grad?.compare(numericalGradA, 0.5)).toBeTrue();1136 expect(await vB.grad?.compare(numericalGradB, 0.5)).toBeTrue();1137 expect(await vC.grad?.compare(numericalGradC, 0.1)).toBeTrue();1138 });1139 it('should work gemm b transposed', async () => {1140 if (backend.wait !== undefined) {1141 await backend.wait;1142 }1143 const a = backend.constructor([2, 2, 2], [1, 2, 3, 4, 5, 6, 7, 8]);1144 const b = backend.constructor(1145 [2, 3, 2],1146 [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4]1147 );1148 const c = backend.constructor([3], [5, 6, 7]);1149 const ones = backend.constructor([2, 2, 3], new Array(12).fill(1));1150 const vA = new Variable(a);1151 const vB = new Variable(b);1152 const vC = new Variable(c);1153 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;1154 const bCPU = (await toCPU(b)) as CPUTensor<'float32'>;1155 const cCPU = (await toCPU(c)) as CPUTensor<'float32'>;1156 const res = vA.gemm(vB, false, true, 1, vC, 1) as Variable<'float32'>;1157 res.backward(ones);1158 const numericalGradA = await backend.toBackend(1159 numericalGradient(1160 aCPU,1161 (a: CPUTensor<'float32'>) =>1162 a.gemm(bCPU, false, true, 1, cCPU, 1) as CPUTensor<'float32'>1163 )1164 );1165 const numericalGradB = await backend.toBackend(1166 numericalGradient(1167 bCPU,1168 (b: CPUTensor<'float32'>) =>1169 aCPU.gemm(b, false, true, 1, cCPU, 1) as CPUTensor<'float32'>1170 )1171 );1172 const numericalGradC = await backend.toBackend(1173 numericalGradient(1174 cCPU,1175 (c: CPUTensor<'float32'>) =>1176 aCPU.gemm(bCPU, false, true, 1, c, 1) as CPUTensor<'float32'>1177 )1178 );1179 expect(await vA.grad?.compare(numericalGradA, 0.5)).toBeTrue();1180 expect(await vB.grad?.compare(numericalGradB, 0.5)).toBeTrue();1181 expect(await vC.grad?.compare(numericalGradC, 0.1)).toBeTrue();1182 });1183 it('should work gemm a and b transposed', async () => {1184 if (backend.wait !== undefined) {1185 await backend.wait;1186 }1187 const a = backend.constructor([2, 2, 2], [1, 2, 3, 4, 5, 6, 7, 8]);1188 const b = backend.constructor(1189 [2, 3, 2],1190 [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4]1191 );1192 const c = backend.constructor([3], [5, 6, 7]);1193 const ones = backend.constructor([2, 2, 3], new Array(12).fill(1));1194 const vA = new Variable(a);1195 const vB = new Variable(b);1196 const vC = new Variable(c);1197 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;1198 const bCPU = (await toCPU(b)) as CPUTensor<'float32'>;1199 const cCPU = (await toCPU(c)) as CPUTensor<'float32'>;1200 const res = vA.gemm(vB, true, true, 1, vC, 1) as Variable<'float32'>;1201 res.backward(ones);1202 const numericalGradA = await backend.toBackend(1203 numericalGradient(1204 aCPU,1205 (a: CPUTensor<'float32'>) =>1206 a.gemm(bCPU, true, true, 1, cCPU, 1) as CPUTensor<'float32'>1207 )1208 );1209 const numericalGradB = await backend.toBackend(1210 numericalGradient(1211 bCPU,1212 (b: CPUTensor<'float32'>) =>1213 aCPU.gemm(b, true, true, 1, cCPU, 1) as CPUTensor<'float32'>1214 )1215 );1216 const numericalGradC = await backend.toBackend(1217 numericalGradient(1218 cCPU,1219 (c: CPUTensor<'float32'>) =>1220 aCPU.gemm(bCPU, true, true, 1, c, 1) as CPUTensor<'float32'>1221 )1222 );1223 expect(await vA.grad?.compare(numericalGradA, 0.5)).toBeTrue();1224 expect(await vB.grad?.compare(numericalGradB, 0.5)).toBeTrue();1225 expect(await vC.grad?.compare(numericalGradC, 0.1)).toBeTrue();1226 });1227 it('should work gemm alpha=0.5', async () => {1228 if (backend.wait !== undefined) {1229 await backend.wait;1230 }1231 const a = backend.constructor([2, 2, 2], [1, 2, 3, 4, 5, 6, 7, 8]);1232 const b = backend.constructor(1233 [2, 3, 2],1234 [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4]1235 );1236 const c = backend.constructor([3], [5, 6, 7]);1237 const ones = backend.constructor([2, 2, 3], new Array(12).fill(1));1238 const vA = new Variable(a);1239 const vB = new Variable(b);1240 const vC = new Variable(c);1241 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;1242 const bCPU = (await toCPU(b)) as CPUTensor<'float32'>;1243 const cCPU = (await toCPU(c)) as CPUTensor<'float32'>;1244 const res = vA.gemm(vB, true, true, 0.5, vC, 1) as Variable<'float32'>;1245 res.backward(ones);1246 const numericalGradA = await backend.toBackend(1247 numericalGradient(1248 aCPU,1249 (a: CPUTensor<'float32'>) =>1250 a.gemm(bCPU, true, true, 0.5, cCPU, 1) as CPUTensor<'float32'>1251 )1252 );1253 const numericalGradB = await backend.toBackend(1254 numericalGradient(1255 bCPU,1256 (b: CPUTensor<'float32'>) =>1257 aCPU.gemm(b, true, true, 0.5, cCPU, 1) as CPUTensor<'float32'>1258 )1259 );1260 const numericalGradC = await backend.toBackend(1261 numericalGradient(1262 cCPU,1263 (c: CPUTensor<'float32'>) =>1264 aCPU.gemm(bCPU, true, true, 0.5, c, 1) as CPUTensor<'float32'>1265 )1266 );1267 expect(await vA.grad?.compare(numericalGradA, 0.5)).toBeTrue();1268 expect(await vB.grad?.compare(numericalGradB, 0.5)).toBeTrue();1269 expect(await vC.grad?.compare(numericalGradC, 0.1)).toBeTrue();1270 });1271 it('should work gemm beta=0.5', async () => {1272 if (backend.wait !== undefined) {1273 await backend.wait;1274 }1275 const a = backend.constructor([2, 2, 2], [1, 2, 3, 4, 5, 6, 7, 8]);1276 const b = backend.constructor(1277 [2, 3, 2],1278 [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4]1279 );1280 const c = backend.constructor([3], [5, 6, 7]);1281 const ones = backend.constructor([2, 2, 3], new Array(12).fill(1));1282 const vA = new Variable(a);1283 const vB = new Variable(b);1284 const vC = new Variable(c);1285 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;1286 const bCPU = (await toCPU(b)) as CPUTensor<'float32'>;1287 const cCPU = (await toCPU(c)) as CPUTensor<'float32'>;1288 const res = vA.gemm(vB, true, true, 1, vC, 0.5) as Variable<'float32'>;1289 res.backward(ones);1290 const numericalGradA = await backend.toBackend(1291 numericalGradient(1292 aCPU,1293 (a: CPUTensor<'float32'>) =>1294 a.gemm(bCPU, true, true, 1, cCPU, 0.5) as CPUTensor<'float32'>1295 )1296 );1297 const numericalGradB = await backend.toBackend(1298 numericalGradient(1299 bCPU,1300 (b: CPUTensor<'float32'>) =>1301 aCPU.gemm(b, true, true, 1, cCPU, 0.5) as CPUTensor<'float32'>1302 )1303 );1304 const numericalGradC = await backend.toBackend(1305 numericalGradient(1306 cCPU,1307 (c: CPUTensor<'float32'>) =>1308 aCPU.gemm(bCPU, true, true, 1, c, 0.5) as CPUTensor<'float32'>1309 )1310 );1311 expect(await vA.grad?.compare(numericalGradA, 0.5)).toBeTrue();1312 expect(await vB.grad?.compare(numericalGradB, 0.5)).toBeTrue();1313 expect(await vC.grad?.compare(numericalGradC, 0.3)).toBeTrue();1314 });1315 it('should work with transpose', async () => {1316 if (backend.wait !== undefined) {1317 await backend.wait;1318 }1319 const a = backend.constructor([2, 3, 4], new Array(24).fill(5));1320 const ones = backend.constructor([4, 2, 3], new Array(24).fill(1));1321 const vA = new Variable(a);1322 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;1323 const permutation = [2, 0, 1];1324 const res = vA.transpose(permutation) as Variable<'float32'>;1325 res.backward(ones);1326 const numericalGradA = await backend.toBackend(1327 numericalGradient(1328 aCPU,1329 (a: CPUTensor<'float32'>) =>1330 a.transpose(permutation) as CPUTensor<'float32'>1331 )1332 );1333 expect(await vA.grad?.compare(numericalGradA, 0.01)).toBeTrue();1334 });1335 it('should work with sum axis 1', async () => {1336 if (backend.wait !== undefined) {1337 await backend.wait;1338 }1339 const a = backend.constructor([2, 3], [1, 2, 3, 4, 5, 6]);1340 const ones = backend.constructor([2], new Array(2).fill(1));1341 const vA = new Variable(a);1342 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;1343 const res = vA.sum(1) as Variable<'float32'>;1344 res.backward(ones);1345 const numericalGradA = await backend.toBackend(1346 numericalGradient(1347 aCPU,1348 (a: CPUTensor<'float32'>) => a.sum(1) as CPUTensor<'float32'>1349 )1350 );1351 expect(await vA.grad?.compare(numericalGradA, 0.01)).toBeTrue();1352 });1353 it('should work with sum axis 0', async () => {1354 if (backend.wait !== undefined) {1355 await backend.wait;1356 }1357 const a = backend.constructor([2, 3], [1, 2, 3, 4, 5, 6]);1358 const ones = backend.constructor([3], new Array(3).fill(1));1359 const vA = new Variable(a);1360 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;1361 const res = vA.sum(0) as Variable<'float32'>;1362 res.backward(ones);1363 const numericalGradA = await backend.toBackend(1364 numericalGradient(1365 aCPU,1366 (a: CPUTensor<'float32'>) => a.sum(0) as CPUTensor<'float32'>1367 )1368 );1369 expect(await vA.grad?.compare(numericalGradA, 0.01)).toBeTrue();1370 });1371 it('should work with sum across all axes', async () => {1372 if (backend.wait !== undefined) {1373 await backend.wait;1374 }1375 const a = backend.constructor([2, 3], [1, 2, 3, 4, 5, 6]);1376 const ones = backend.constructor([1], new Array(1).fill(1));1377 const vA = new Variable(a);1378 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;1379 const res = vA.sum() as Variable<'float32'>;1380 res.backward(ones);1381 const numericalGradA = await backend.toBackend(1382 numericalGradient(1383 aCPU,1384 (a: CPUTensor<'float32'>) => a.sum() as CPUTensor<'float32'>1385 )1386 );1387 expect(await vA.grad?.compare(numericalGradA, 0.01)).toBeTrue();1388 });1389 it('should work with sum keepDims = True', async () => {1390 if (backend.wait !== undefined) {1391 await backend.wait;1392 }1393 const a = backend.constructor([2, 3], [1, 2, 3, 4, 5, 6]);1394 const ones = backend.constructor([1, 1], new Array(1).fill(1));1395 const vA = new Variable(a);1396 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;1397 const res = vA.sum(undefined, true) as Variable<'float32'>;1398 res.backward(ones);1399 const numericalGradA = await backend.toBackend(1400 numericalGradient(1401 aCPU,1402 (a: CPUTensor<'float32'>) =>1403 a.sum(undefined, true) as CPUTensor<'float32'>1404 )1405 );1406 expect(await vA.grad?.compare(numericalGradA, 0.01)).toBeTrue();1407 });1408 it('should work with sum square axis 1', async () => {1409 if (backend.wait !== undefined) {1410 await backend.wait;1411 }1412 const a = backend.constructor([2, 3], [1, -2, 3, -4, -5, 6]);1413 const ones = backend.constructor([2], new Array(2).fill(1));1414 const vA = new Variable(a);1415 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;1416 const res = vA.sumSquare(1) as Variable<'float32'>;1417 res.backward(ones);1418 const numericalGradA = await backend.toBackend(1419 numericalGradient(1420 aCPU,1421 (a: CPUTensor<'float32'>) => a.sumSquare(1) as CPUTensor<'float32'>1422 )1423 );1424 expect(await vA.grad?.compare(numericalGradA, 0.1)).toBeTrue();1425 });1426 it('should work with sum square axis 0', async () => {1427 if (backend.wait !== undefined) {1428 await backend.wait;1429 }1430 const a = backend.constructor([2, 3], [1, -2, 3, -4, -5, 6]);1431 const ones = backend.constructor([3], new Array(3).fill(1));1432 const vA = new Variable(a);1433 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;1434 const res = vA.sumSquare(0) as Variable<'float32'>;1435 res.backward(ones);1436 const numericalGradA = await backend.toBackend(1437 numericalGradient(1438 aCPU,1439 (a: CPUTensor<'float32'>) => a.sumSquare(0) as CPUTensor<'float32'>1440 )1441 );1442 expect(await vA.grad?.compare(numericalGradA, 0.1)).toBeTrue();1443 });1444 it('should work with sum square across all axes', async () => {1445 if (backend.wait !== undefined) {1446 await backend.wait;1447 }1448 const a = backend.constructor([2, 3], [1, -2, 3, -4, -5, 6]);1449 const ones = backend.constructor([1], new Array(1).fill(1));1450 const vA = new Variable(a);1451 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;1452 const res = vA.sumSquare() as Variable<'float32'>;1453 res.backward(ones);1454 const numericalGradA = await backend.toBackend(1455 numericalGradient(1456 aCPU,1457 (a: CPUTensor<'float32'>) => a.sumSquare() as CPUTensor<'float32'>1458 )1459 );1460 expect(await vA.grad?.compare(numericalGradA, 0.1)).toBeTrue();1461 });1462 it('should work with sum square keepDims = True', async () => {1463 if (backend.wait !== undefined) {1464 await backend.wait;1465 }1466 const a = backend.constructor([2, 3], [1, -2, 3, -4, -5, 6]);1467 const ones = backend.constructor([1, 1], new Array(1).fill(1));1468 const vA = new Variable(a);1469 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;1470 const res = vA.sumSquare(undefined, true) as Variable<'float32'>;1471 res.backward(ones);1472 const numericalGradA = await backend.toBackend(1473 numericalGradient(1474 aCPU,1475 (a: CPUTensor<'float32'>) =>1476 a.sumSquare(undefined, true) as CPUTensor<'float32'>1477 )1478 );1479 expect(await vA.grad?.compare(numericalGradA, 0.1)).toBeTrue();1480 });1481 it('should work with mean axis 1', async () => {1482 if (backend.wait !== undefined) {1483 await backend.wait;1484 }1485 const a = backend.constructor([2, 3], [1, 2, 3, 4, 5, 6]);1486 const ones = backend.constructor([2], new Array(2).fill(1));1487 const vA = new Variable(a);1488 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;1489 const res = vA.reduceMean(1) as Variable<'float32'>;1490 res.backward(ones);1491 const numericalGradA = await backend.toBackend(1492 numericalGradient(1493 aCPU,1494 (a: CPUTensor<'float32'>) => a.reduceMean(1) as CPUTensor<'float32'>1495 )1496 );1497 expect(await vA.grad?.compare(numericalGradA, 0.01)).toBeTrue();1498 });1499 it('should work with mean axis 0', async () => {1500 if (backend.wait !== undefined) {1501 await backend.wait;1502 }1503 const a = backend.constructor([2, 3], [1, 2, 3, 4, 5, 6]);1504 const ones = backend.constructor([3], new Array(3).fill(1));1505 const vA = new Variable(a);1506 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;1507 const res = vA.reduceMean(0) as Variable<'float32'>;1508 res.backward(ones);1509 const numericalGradA = await backend.toBackend(1510 numericalGradient(1511 aCPU,1512 (a: CPUTensor<'float32'>) => a.reduceMean(0) as CPUTensor<'float32'>1513 )1514 );1515 expect(await vA.grad?.compare(numericalGradA, 0.01)).toBeTrue();1516 });1517 it('should work with mean across all axes', async () => {1518 if (backend.wait !== undefined) {1519 await backend.wait;1520 }1521 const a = backend.constructor([2, 3], [1, 2, 3, 4, 5, 6]);1522 const ones = backend.constructor([1], new Array(1).fill(1));1523 const vA = new Variable(a);1524 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;1525 const res = vA.reduceMean() as Variable<'float32'>;1526 res.backward(ones);1527 const numericalGradA = await backend.toBackend(1528 numericalGradient(1529 aCPU,1530 (a: CPUTensor<'float32'>) => a.reduceMean() as CPUTensor<'float32'>1531 )1532 );1533 expect(await vA.grad?.compare(numericalGradA, 0.01)).toBeTrue();1534 });1535 it('should work with mean square axis 1', async () => {1536 if (backend.wait !== undefined) {1537 await backend.wait;1538 }1539 const a = backend.constructor([2, 3], [1, 2, 3, 4, 5, 6]);1540 const ones = backend.constructor([2], new Array(2).fill(1));1541 const vA = new Variable(a);1542 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;1543 const res = vA.reduceMeanSquare(1) as Variable<'float32'>;1544 res.backward(ones);1545 const numericalGradA = await backend.toBackend(1546 numericalGradient(1547 aCPU,1548 (a: CPUTensor<'float32'>) =>1549 a.reduceMeanSquare(1) as CPUTensor<'float32'>1550 )1551 );1552 expect(await vA.grad?.compare(numericalGradA, 0.05)).toBeTrue();1553 });1554 it('should work with mean square axis 0', async () => {1555 if (backend.wait !== undefined) {1556 await backend.wait;1557 }1558 const a = backend.constructor([2, 3], [1, 2, 3, 4, 5, 6]);1559 const ones = backend.constructor([3], new Array(3).fill(1));1560 const vA = new Variable(a);1561 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;1562 const res = vA.reduceMeanSquare(0) as Variable<'float32'>;1563 res.backward(ones);1564 const numericalGradA = await backend.toBackend(1565 numericalGradient(1566 aCPU,1567 (a: CPUTensor<'float32'>) =>1568 a.reduceMeanSquare(0) as CPUTensor<'float32'>1569 )1570 );1571 expect(await vA.grad?.compare(numericalGradA, 0.05)).toBeTrue();1572 });1573 it('should work with mean square across all axes', async () => {1574 if (backend.wait !== undefined) {1575 await backend.wait;1576 }1577 const a = backend.constructor([2, 3], [1, 2, 3, 4, 5, 6]);1578 const ones = backend.constructor([1], new Array(1).fill(1));1579 const vA = new Variable(a);1580 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;1581 const res = vA.reduceMeanSquare() as Variable<'float32'>;1582 res.backward(ones);1583 const numericalGradA = await backend.toBackend(1584 numericalGradient(1585 aCPU,1586 (a: CPUTensor<'float32'>) =>1587 a.reduceMeanSquare() as CPUTensor<'float32'>1588 )1589 );1590 expect(await vA.grad?.compare(numericalGradA, 0.05)).toBeTrue();1591 });1592 it('should work with log sum axis 1', async () => {1593 if (backend.wait !== undefined) {1594 await backend.wait;1595 }1596 const a = backend.constructor([2, 3], [1, 2, 3, 4, 5, 6]);1597 const ones = backend.constructor([2], new Array(2).fill(1));1598 const vA = new Variable(a);1599 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;1600 const res = vA.reduceLogSum(1) as Variable<'float32'>;1601 res.backward(ones);1602 const numericalGradA = await backend.toBackend(1603 numericalGradient(1604 aCPU,1605 (a: CPUTensor<'float32'>) => a.reduceLogSum(1) as CPUTensor<'float32'>1606 )1607 );1608 expect(await vA.grad?.compare(numericalGradA, 0.05)).toBeTrue();1609 });1610 it('should work with log sum axis 0', async () => {1611 if (backend.wait !== undefined) {1612 await backend.wait;1613 }1614 const a = backend.constructor([2, 3], [1, 2, 3, 4, 5, 6]);1615 const ones = backend.constructor([3], new Array(3).fill(1));1616 const vA = new Variable(a);1617 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;1618 const res = vA.reduceLogSum(0) as Variable<'float32'>;1619 res.backward(ones);1620 const numericalGradA = await backend.toBackend(1621 numericalGradient(1622 aCPU,1623 (a: CPUTensor<'float32'>) => a.reduceLogSum(0) as CPUTensor<'float32'>1624 )1625 );1626 expect(await vA.grad?.compare(numericalGradA, 0.05)).toBeTrue();1627 });1628 it('should work with log sum across all axes', async () => {1629 if (backend.wait !== undefined) {1630 await backend.wait;1631 }1632 const a = backend.constructor([2, 3], [1, 2, 3, 4, 5, 6]);1633 const ones = backend.constructor([1], new Array(1).fill(1));1634 const vA = new Variable(a);1635 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;1636 const res = vA.reduceLogSum() as Variable<'float32'>;1637 res.backward(ones);1638 const numericalGradA = await backend.toBackend(1639 numericalGradient(1640 aCPU,1641 (a: CPUTensor<'float32'>) => a.reduceLogSum() as CPUTensor<'float32'>1642 )1643 );1644 expect(await vA.grad?.compare(numericalGradA, 0.05)).toBeTrue();1645 });1646 it('should work with log sum exp axis 1', async () => {1647 if (backend.wait !== undefined) {1648 await backend.wait;1649 }1650 const a = backend.constructor([2, 3], [1, 2, 3, 4, 5, 6]);1651 const ones = backend.constructor([2], new Array(2).fill(1));1652 const vA = new Variable(a);1653 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;1654 const res = vA.reduceLogSumExp(1) as Variable<'float32'>;1655 res.backward(ones);1656 const numericalGradA = await backend.toBackend(1657 numericalGradient(1658 aCPU,1659 (a: CPUTensor<'float32'>) =>1660 a.reduceLogSumExp(1) as CPUTensor<'float32'>1661 )1662 );1663 expect(await vA.grad?.compare(numericalGradA, 0.05)).toBeTrue();1664 });1665 it('should work with log sum exp axis 0', async () => {1666 if (backend.wait !== undefined) {1667 await backend.wait;1668 }1669 const a = backend.constructor([2, 3], [1, 2, 3, 4, 5, 6]);1670 const ones = backend.constructor([3], new Array(3).fill(1));1671 const vA = new Variable(a);1672 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;1673 const res = vA.reduceLogSumExp(0) as Variable<'float32'>;1674 res.backward(ones);1675 const numericalGradA = await backend.toBackend(1676 numericalGradient(1677 aCPU,1678 (a: CPUTensor<'float32'>) =>1679 a.reduceLogSumExp(0) as CPUTensor<'float32'>1680 )1681 );1682 expect(await vA.grad?.compare(numericalGradA, 0.05)).toBeTrue();1683 });1684 it('should work with log sum exp across all axes', async () => {1685 if (backend.wait !== undefined) {1686 await backend.wait;1687 }1688 const a = backend.constructor([2, 3], [1, 2, 3, 4, 5, 6]);1689 const ones = backend.constructor([1], new Array(1).fill(1));1690 const vA = new Variable(a);1691 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;1692 const res = vA.reduceLogSumExp() as Variable<'float32'>;1693 res.backward(ones);1694 const numericalGradA = await backend.toBackend(1695 numericalGradient(1696 aCPU,1697 (a: CPUTensor<'float32'>) =>1698 a.reduceLogSumExp() as CPUTensor<'float32'>1699 )1700 );1701 expect(await vA.grad?.compare(numericalGradA, 0.05)).toBeTrue();1702 });1703 it('should work with slice', async () => {1704 if (backend.wait !== undefined) {1705 await backend.wait;1706 }1707 const a = backend.constructor([2, 3, 4], new Array(24).fill(5));1708 const ones = backend.constructor([2, 1, 3], new Array(6).fill(1));1709 const vA = new Variable(a);1710 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;1711 const res = vA.slice([2, 0], [3, 3], [1, 2]) as Variable<'float32'>;1712 res.backward(ones);1713 const numericalGradA = await backend.toBackend(1714 numericalGradient(1715 aCPU,1716 (a: CPUTensor<'float32'>) =>1717 a.slice([2, 0], [3, 3], [1, 2]) as CPUTensor<'float32'>1718 )1719 );1720 expect(await vA.grad?.compare(numericalGradA, 0.01)).toBeTrue();1721 });1722 it('should work with product axis 1', async () => {1723 if (backend.wait !== undefined) {1724 await backend.wait;1725 }1726 const a = backend.constructor([2, 3], [1, 2, 3, 4, 5, 6]);1727 const ones = backend.constructor([2], new Array(2).fill(1));1728 const vA = new Variable(a);1729 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;1730 const res = vA.product(1) as Variable<'float32'>;1731 res.backward(ones);1732 const numericalGradA = await backend.toBackend(1733 numericalGradient(1734 aCPU,1735 (a: CPUTensor<'float32'>) => a.product(1) as CPUTensor<'float32'>1736 )1737 );1738 expect(await vA.grad?.compare(numericalGradA, 0.05)).toBeTrue();1739 });1740 it('should work with product axis 0', async () => {1741 if (backend.wait !== undefined) {1742 await backend.wait;1743 }1744 const a = backend.constructor([2, 3], [1, 2, 3, 4, 5, 6]);1745 const ones = backend.constructor([3], new Array(3).fill(1));1746 const vA = new Variable(a);1747 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;1748 const res = vA.product(0) as Variable<'float32'>;1749 res.backward(ones);1750 const numericalGradA = await backend.toBackend(1751 numericalGradient(1752 aCPU,1753 (a: CPUTensor<'float32'>) => a.product(0) as CPUTensor<'float32'>1754 )1755 );1756 expect(await vA.grad?.compare(numericalGradA, 0.05)).toBeTrue();1757 });1758 it('should work with product across all axes', async () => {1759 if (backend.wait !== undefined) {1760 await backend.wait;1761 }1762 const a = backend.constructor([2, 3], [1, 2, 3, 4, 5, 6]);1763 const ones = backend.constructor([1], new Array(1).fill(1));1764 const vA = new Variable(a);1765 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;1766 const res = vA.product() as Variable<'float32'>;1767 res.backward(ones);1768 const numericalGradA = await backend.toBackend(1769 numericalGradient(1770 aCPU,1771 (a: CPUTensor<'float32'>) => a.product() as CPUTensor<'float32'>1772 )1773 );1774 expect(await vA.grad?.compare(numericalGradA, 0.8)).toBeTrue();1775 });1776 it('should work with sigmoid', async () => {1777 if (backend.wait !== undefined) {1778 await backend.wait;1779 }1780 const a = backend.constructor([2, 3], [-1, 2, -3, 4, -5, 6]);1781 const ones = backend.constructor([2, 3], new Array(6).fill(1));1782 const vA = new Variable(a);1783 const aCPU = (await toCPU(a)) as CPUTensor<'float32'>;1784 const res = vA.sigmoid() as Variable<'float32'>;1785 res.backward(ones);1786 const numericalGradA = await backend.toBackend(1787 numericalGradient(1788 aCPU,1789 (a: CPUTensor<'float32'>) => a.sigmoid() as CPUTensor<'float32'>1790 )1791 );1792 expect(await vA.grad?.compare(numericalGradA, 0.05)).toBeTrue();1793 });1794 it('should work for bce', async () => {1795 if (backend.wait !== undefined) {1796 await backend.wait;1797 }1798 const x = backend.constructor([2, 2], [0.4, 0.5, 0.6, 0.7]);1799 const y = backend.constructor([2, 2], [1, 0, 1, 0]);1800 const ones = backend.constructor([2, 2], new Array(4).fill(1));1801 const vX = new Variable(x);1802 const vY = new Variable(y);1803 const xCPU = (await toCPU(x)) as CPUTensor<'float32'>;1804 const yCPU = (await toCPU(y)) as CPUTensor<'float32'>;1805 const res = bce(vX, vY) as Variable<'float32'>;1806 res.backward(ones);1807 const numericalGradX = await backend.toBackend(1808 numericalGradient(1809 xCPU,1810 (x: CPUTensor<'float32'>) => bce(x, yCPU) as CPUTensor<'float32'>1811 )1812 );1813 expect(await vX.grad?.compare(numericalGradX, 0.01)).toBeTrue();1814 });1815 });...

Full Screen

Full Screen

inview-spec.js

Source:inview-spec.js Github

copy

Full Screen

...15 callbackContext = this16 hits[key] = true17 }18 }19 function toBeTrue(key) {20 return function() {21 return hits[key]22 }23 }24 beforeEach(function() {25 Waypoint.Adapter = adapter.Adapter26 loadFixtures('standard.html')27 $target = $('#near2')28 hits = {}29 })30 afterEach(function() {31 waypoint.destroy()32 $scroller.scrollTop(0).scrollLeft(0)33 })34 describe('vertical', function() {35 beforeEach(function() {36 waypoint = new Waypoint.Inview({37 element: $target[0],38 enter: setsTrue('enter'),39 entered: setsTrue('entered'),40 exit: setsTrue('exit'),41 exited: setsTrue('exited')42 })43 })44 describe('enter callback', function() {45 it('triggers when element starts entering from below', function() {46 runs(function() {47 var top = $target.offset().top48 $scroller.scrollTop(top - Waypoint.viewportHeight())49 })50 waitsFor(toBeTrue('enter'), 'enter to trigger')51 runs(function() {52 expect(callbackContext).toEqual(waypoint)53 })54 })55 it('triggers when element starts entering from above', function() {56 runs(function() {57 $scroller.scrollTop($target.offset().top + $target.outerHeight())58 })59 waits(standard)60 runs(function() {61 hits.enter = false62 $scroller.scrollTop($scroller.scrollTop() - 1)63 })64 waitsFor(toBeTrue('enter'), 'enter to trigger')65 })66 })67 describe('entered callback', function() {68 it('triggers when element finishes entering from below', function() {69 runs(function() {70 var top = $target.offset().top71 var viewportHeight = Waypoint.viewportHeight()72 var elementHeight = $target.outerHeight()73 $scroller.scrollTop(top - viewportHeight + elementHeight)74 })75 waitsFor(toBeTrue('entered'), 'entered to trigger')76 runs(function() {77 expect(callbackContext).toEqual(waypoint)78 })79 })80 it('triggers when element finishes entering from above', function() {81 runs(function() {82 $scroller.scrollTop($target.offset().top)83 })84 waits(standard)85 runs(function() {86 hits.entered = false87 $scroller.scrollTop($scroller.scrollTop() - 1)88 })89 waitsFor(toBeTrue('entered'), 'entered to trigger')90 })91 })92 describe('exit callback', function() {93 it('triggers when element starts leaving below', function() {94 runs(function() {95 var top = $target.offset().top96 var viewportHeight = Waypoint.viewportHeight()97 var elementHeight = $target.outerHeight()98 $scroller.scrollTop(top - viewportHeight + elementHeight)99 })100 waits(standard)101 runs(function() {102 expect(hits.exit).toBeFalsy()103 $scroller.scrollTop($scroller.scrollTop() - 1)104 })105 waitsFor(toBeTrue('exit'), 'exit to trigger')106 })107 it('triggers when element starts leaving above', function() {108 runs(function() {109 $scroller.scrollTop($target.offset().top)110 })111 waitsFor(toBeTrue('exit'), 'exit to trigger')112 runs(function() {113 expect(callbackContext).toEqual(waypoint)114 })115 })116 })117 describe('exited callback', function() {118 it('triggers when element finishes exiting below', function() {119 runs(function() {120 var top = $target.offset().top121 $scroller.scrollTop(top - Waypoint.viewportHeight())122 })123 waits(standard)124 runs(function() {125 $scroller.scrollTop($scroller.scrollTop() - 1)126 })127 waitsFor(toBeTrue('exited'), 'exited to trigger')128 })129 it('triggers when element finishes exiting above', function() {130 runs(function() {131 $scroller.scrollTop($target.offset().top + $target.outerHeight())132 })133 waitsFor(toBeTrue('exited'), 'exited to trigger')134 runs(function() {135 expect(callbackContext).toEqual(waypoint)136 })137 })138 })139 })140 describe('horizontal', function() {141 beforeEach(function() {142 waypoint = new Waypoint.Inview({143 horizontal: true,144 element: $target[0],145 enter: setsTrue('enter'),146 entered: setsTrue('entered'),147 exit: setsTrue('exit'),148 exited: setsTrue('exited')149 })150 })151 describe('enter callback', function() {152 it('triggers when element starts entering from right', function() {153 runs(function() {154 $scroller.scrollLeft($target.offset().left - $scroller.width())155 })156 waitsFor(toBeTrue('enter'), 'enter to trigger')157 })158 it('triggers when element starts entering from left', function() {159 runs(function() {160 var left = $target.offset().left161 $scroller.scrollLeft(left + $target.outerWidth())162 })163 waits(standard)164 runs(function() {165 hits.enter = false166 $scroller.scrollLeft($scroller.scrollLeft() - 1)167 })168 waitsFor(toBeTrue('enter'), 'enter to trigger')169 })170 })171 describe('entered callback', function() {172 it('triggers when element finishes entering from right', function() {173 runs(function() {174 var left = $target.offset().left175 var viewportWidth = $scroller.width()176 var elementWidth = $target.outerWidth()177 $scroller.scrollLeft(left - viewportWidth + elementWidth)178 })179 waitsFor(toBeTrue('entered'), 'entered to trigger')180 })181 it('triggers when element finishes entering from left', function() {182 runs(function() {183 $scroller.scrollLeft($target.offset().left)184 })185 waits(standard)186 runs(function() {187 hits.entered = false188 $scroller.scrollLeft($scroller.scrollLeft() - 1)189 })190 waitsFor(toBeTrue('entered'), 'entered to trigger')191 })192 })193 describe('exit callback', function() {194 it('triggers when element starts leaving on the right', function() {195 runs(function() {196 var left = $target.offset().left197 var viewportWidth = $scroller.width()198 var elementWidth = $target.outerWidth()199 $scroller.scrollLeft(left - viewportWidth + elementWidth)200 })201 waits(standard)202 runs(function() {203 expect(hits.exit).toBeFalsy()204 $scroller.scrollLeft($scroller.scrollLeft() - 1)205 })206 waitsFor(toBeTrue('exit'), 'exit to trigger')207 })208 it('triggers when element starts leaving on the left', function() {209 runs(function() {210 $scroller.scrollLeft($target.offset().left)211 })212 waitsFor(toBeTrue('exit'), 'exit to trigger')213 })214 })215 describe('exited callback', function() {216 it('triggers when element finishes exiting to the right', function() {217 runs(function() {218 var left = $target.offset().left219 $scroller.scrollLeft(left - $scroller.width())220 })221 waitsFor(toBeTrue('enter'), 'enter to trigger')222 runs(function() {223 $scroller.scrollLeft($scroller.scrollLeft() - 1)224 })225 waitsFor(toBeTrue('exited'), 'exited to trigger')226 })227 it('triggers when element finishes exiting to the left', function() {228 runs(function() {229 var left = $target.offset().left230 $scroller.scrollLeft(left + $target.outerWidth())231 })232 waitsFor(toBeTrue('exited'), 'exited to trigger')233 })234 })235 })236 describe('disabled', function() {237 beforeEach(function() {238 waypoint = new Waypoint.Inview({239 element: $target[0],240 enabled: false,241 enter: setsTrue('enter'),242 entered: setsTrue('entered'),243 exit: setsTrue('exit'),244 exited: setsTrue('exited')245 })246 })...

Full Screen

Full Screen

Using AI Code Generation

copy

Full Screen

1expect(true).toBeTrue();2expect(false).toBeFalse();3expect(true).toBeBoolean();4expect(false).toBeBoolean();5expect(1).toBeNumber();6expect(1.1).toBeNumber();7expect('hello').toBeString();8expect([1,2,3]).toBeArray();9expect({name:'John',age:25}).toBeObject();10expect(() => {}).toBeFunction();11expect([]).toBeEmpty();12expect('').toBeEmpty();13expect({}).toBeEmpty();14expect([]).toBeNonEmptyArray();15expect('').toBeNonEmptyString();16expect({}).toBeNonEmptyObject();17expect({}).toBeEmptyObject();18expect([]).toBeEmptyArray();19expect('').toBeEmptyString();20expect(true).toBeTrue();21expect(false).toBeFalse();22expect(true).toBeBoolean();23expect(false).toBeBoolean();24expect(1).toBeNumber();25expect(1.1).toBeNumber();26expect('hello').toBeString();27expect([1,2,3]).toBeArray();28expect({name:'John',age:25}).toBeObject();

Full Screen

Using AI Code Generation

copy

Full Screen

1const { toBeTrue } = require('jest-extended');2expect.extend({ toBeTrue });3expect(true).toBeTrue();4const { toBeTrue } = require('jest-extended');5expect.extend({ toBeTrue });6expect(true).toBeTrue();7const { toBeTrue } = require('jest-extended');8expect.extend({ toBeTrue });9expect(true).toBeTrue();10const { toBeTrue } = require('jest-extended');11expect.extend({ toBeTrue });12expect(true).toBeTrue();13const { toBeTrue } = require('jest-extended');14expect.extend({ toBeTrue });15expect(true).toBeTrue();16const { toBeTrue } = require('jest-extended');17expect.extend({ toBeTrue });18expect(true).toBeTrue();19const { toBeTrue } = require('jest-extended');20expect.extend({ toBeTrue });21expect(true).toBeTrue();22const { toBeTrue } = require('jest-extended');23expect.extend({ toBeTrue });24expect(true).toBeTrue();25const { toBeTrue } = require('jest-extended');26expect.extend({ toBeTrue });27expect(true).toBeTrue();28const { toBeTrue } = require('jest-extended');29expect.extend({ toBeTrue });30expect(true).toBeTrue();31const { toBeTrue } = require('jest-extended');32expect.extend({ toBeTrue });33expect(true).toBeTrue();34const { toBeTrue } = require('jest-extended');35expect.extend({ toBeTrue });36expect(true).toBeTrue();37const { toBeTrue } = require('jest-extended');38expect.extend({ toBeTrue });39expect(true).toBeTrue();

Full Screen

Using AI Code Generation

copy

Full Screen

1const { toBeTrue } = require('jest-extended');2expect.extend({ toBeTrue });3test('true is true', () => {4 expect(true).toBeTrue();5});6const { toBeFalse } = require('jest-extended');7expect.extend({ toBeFalse });8test('false is false', () => {9 expect(false).toBeFalse();10});11const { toBeBoolean } = require('jest-extended');12expect.extend({ toBeBoolean });13test('true is boolean', () => {14 expect(true).toBeBoolean();15});16const { toBeNumber } = require('jest-extended');17expect.extend({ toBeNumber });18test('1 is number', () => {19 expect(1).toBeNumber();20});21const { toBeString } = require('jest-extended');22expect.extend({ toBeString });23test('hello is string', () => {24 expect('hello').toBeString();25});26const { toBeObject } = require('jest-extended');27expect.extend({ toBeObject });28test('object is object', () => {29 expect({}).toBeObject();30});31const { toBeArray } = require('jest-extended');32expect.extend({ toBeArray });33test('[] is array', () => {34 expect([]).toBeArray();35});36const { toBeArrayOfSize } = require('jest-extended');37expect.extend({ toBeArrayOfSize });38test('[] is array of size 0', () => {39 expect([]).toBeArrayOfSize(0);40});41const { toBeEmpty } = require('jest-extended');42expect.extend({ toBeEmpty });43test('[] is empty', () => {44 expect([]).toBeEmpty();45});46const { toBeEmptyString } = require('jest-extended');47expect.extend({ toBeEmptyString });48test('"" is empty string', () => {49 expect('').toBe

Full Screen

Using AI Code Generation

copy

Full Screen

1const { toBeTrue } = require('jest-extended');2expect.extend({ toBeTrue });3test('true is true', () => {4 expect(true).toBeTrue();5});6const { toBeFalse } = require('jest-extended');7expect.extend({ toBeFalse });8test('false is false', () => {9 expect(false).toBeFalse();10});11 ✓ true is true (2ms)12 ✓ false is false (1ms)13const { toBeEmpty } = require('jest-extended');14expect.extend({ toBeEmpty });15test('empty string is empty', () => {16 expect('').toBeEmpty();17});18const { toBeFalse } = require('jest-extended');19expect.extend({ toBeFalse });20test('false is false', () => {21 expect(false).toBeFalse();22});23 ✓ empty string is empty (1ms)24 ✓ false is false (1ms)

Full Screen

Using AI Code Generation

copy

Full Screen

1const { toBeTrue } = require('jest-extended');2expect.extend({ toBeTrue });3test('true is true', () => {4 expect(true).toBeTrue();5});6const { toBeTrue } = require('jest-extended');7expect.extend({ toBeTrue });8test('true is true', () => {9 expect(true).toBeTrue();10});11const { toBeTrue } = require('jest-extended');12expect.extend({ toBeTrue });13test('true is true', () => {14 expect(true).toBeTrue();15});16const { toBeTrue } = require('jest-extended');17expect.extend({ toBeTrue });18test('true is true', () => {19 expect(true).toBeTrue();20});21const { toBeTrue } = require('jest-extended');22expect.extend({ toBeTrue });23test('true is true', () => {24 expect(true).toBeTrue();25});26const { toBeTrue } = require('jest-extended');27expect.extend({ toBeTrue });28test('true is true', () => {29 expect(true).toBeTrue();30});31const { toBeTrue } = require('jest-extended');32expect.extend({ toBeTrue });33test('true is true', () => {34 expect(true).toBeTrue();35});36const { toBeTrue } = require('jest-extended');37expect.extend({ toBeTrue });38test('true is true', () => {39 expect(true).toBeTrue();40});41const { toBeTrue } = require('jest-extended');42expect.extend({ toBeTrue });43test('true is true', () => {44 expect(true).toBeTrue();45});

Full Screen

Using AI Code Generation

copy

Full Screen

1const { toBeTrue } = require("jest-extended");2expect.extend({ toBeTrue });3expect(true).toBeTrue();4const { toBeTrue } = require("jest-extended");5expect.extend({ toBeTrue });6expect(true).toBeTrue();7const { toBeTrue } = require("jest-extended");8expect.extend({ toBeTrue });9expect(true).toBeTrue();10const { toBeTrue } = require("jest-extended");11expect.extend({ toBeTrue });12expect(true).toBeTrue();13const { toBeTrue } = require("jest-extended");14expect.extend({ toBeTrue });15expect(true).toBeTrue();16const { toBeTrue } = require("jest-extended");17expect.extend({ toBeTrue });18expect(true).toBeTrue();19const { toBeTrue } = require("jest-extended");20expect.extend({ toBeTrue });21expect(true).toBeTrue();22const { toBeTrue } = require("jest-extended");23expect.extend({ toBeTrue });24expect(true).toBeTrue();25const { toBeTrue } = require("jest-extended");26expect.extend({ toBeTrue });27expect(true).toBeTrue();28const { toBeTrue } = require("jest-extended");29expect.extend({ toBeTrue });30expect(true).toBeTrue();31const { toBeTrue } = require("jest-extended");32expect.extend({ toBeTrue });33expect(true).toBeTrue();34const { toBeTrue } = require("jest-extended");35expect.extend({ toBeTrue });36expect(true).toBeTrue();37const { toBeTrue } = require("jest-extended");38expect.extend({ toBe

Full Screen

Using AI Code Generation

copy

Full Screen

1const { toBeTrue } = require('jest-extended');2expect.extend({ toBeTrue });3describe('Jest-extended', () => {4 it('toBeTrue', () => {5 expect(true).toBeTrue();6 });7});8toBeTrue (4ms)9const { toBeTrue } = require('jest-extended');10expect.extend({ toBeTrue });11describe('Jest-extended', () => {12 it('toBeTrue', () => {13 expect(true).toBeTrue();14 });15});16toBeTrue (4ms)17const { toBeTrue } = require('jest-extended');18expect.extend({ toBeTrue });19describe('Jest-extended', () => {20 it('toBeTrue', () => {21 expect(true).toBeTrue();22 });23});24toBeTrue (4ms)25const { toBeTrue } = require('jest-extended');26expect.extend({ toBeTrue });27describe('Jest-extended', () => {28 it('toBeTrue', () => {29 expect(true).toBeTrue();30 });31});32toBeTrue (4ms)

Full Screen

Using AI Code Generation

copy

Full Screen

1const extend = require("jest-extended");2expect.extend(extend);3expect(true).toBeTrue();4const extend = require("jest-extended");5expect.extend(extend);6expect(true).toBeTrue();7const extend = require("jest-extended");8expect.extend(extend);9expect(true).toBeTrue();10const extend = require("jest-extended");11expect.extend(extend);12expect(true).toBeTrue();13const extend = require("jest-extended");14expect.extend(extend);15expect(true).toBeTrue();16const extend = require("jest-extended");17expect.extend(extend);18expect(true).toBeTrue();19const extend = require("jest-extended");20expect.extend(extend);21expect(true).toBeTrue();22const extend = require("jest-extended");23expect.extend(extend);24expect(true).toBeTrue();25const extend = require("jest-extended");26expect.extend(extend);27expect(true).toBeTrue();28const extend = require("jest-extended");29expect.extend(extend);30expect(true).toBeTrue();31const extend = require("jest-extended");32expect.extend(extend);33expect(true).toBeTrue();34const extend = require("jest-extended");35expect.extend(extend);36expect(true).toBeTrue();37const extend = require("jest-extended");38expect.extend(extend);39expect(true).toBeTrue();

Full Screen

Using AI Code Generation

copy

Full Screen

1const { toBeTrue } = require('jest-extended');2expect.extend({ toBeTrue });3test('true', () => {4 expect(true).toBeTrue();5});6 ✓ true (1ms)7Jest toBeTruthy() Method8Jest toBeFalsy() Method9Jest toBe() Method10Jest toContain() Method11Jest toBeLessThan() Method12Jest toBeLessThanOrEqual() Method13Jest toBeGreaterThan() Method14Jest toBeGreaterThanOrEqual() Method15Jest toBeCloseTo() Method16Jest toBeUndefined() Method17Jest toBeDefined() Method18Jest toBeNull() Method19Jest toBeNaN() Method20Jest toBeInstanceOf() Method21Jest toHaveLength() Method22Jest toHaveProperty() Method23Jest toHaveBeenCalled() Method24Jest toHaveBeenCalledTimes() Method25Jest toHaveBeenCalledWith() Method26Jest toHaveBeenLastCalledWith() Method27Jest toHaveBeenNthCalledWith() Method28Jest toHaveReturned() Method29Jest toHaveReturnedTimes() Method30Jest toHaveReturnedWith() Method31Jest toHaveLastReturnedWith() Method32Jest toHaveNthReturnedWith() Method33Jest toHaveBeenCalledBefore() Method34Jest toHaveBeenCalledAfter() Method35Jest toHaveBeenCalledImmediatelyBefore() Method36Jest toHaveBeenCalledImmediatelyAfter() Method37Jest toHaveBeenWarned() Method38Jest toHaveBeenWarnedTimes() Method39Jest toHaveBeenWarnedWith() Method

Full Screen

Using AI Code Generation

copy

Full Screen

1const { toBeTrue } = require('jest-extended');2expect.extend({ toBeTrue });3test('true is true', () => {4 expect(true).toBeTrue();5});6test('true is true', () => {7 expect(true).toBe(true);8});9test('true is true', () => {10 expect(true).toBe(false);11});12test('true is true', () => {13 expect(true).toBeNull();14});15test('true is true', () => {16 expect(true).not.toBeNull();17});18test('true is true', () => {19 expect(true).not.toBe(false);20});21test('true is true', () => {22 expect(true).not.toBe(true);23});24test('true is true', () => {25 expect(true).toBeUndefined();26});27test('true is true', () => {28 expect(true).not.toBeUndefined();29});30test('true is true', () => {31 expect(true).toBeDefined();32});33test('true is true', () => {34 expect(true).not.toBeDefined();35});36test('true is true', () => {37 expect(true).toBeNaN();38});39test('true is true', () => {40 expect(true).not.toBeNaN();41});42test('true is true', () => {43 expect(true).toBeInstanceOf(Object);44});45test('true is true', () => {46 expect(true).not.toBeInstanceOf(Object);47});48test('true is true',

Full Screen

Automation Testing Tutorials

Learn to execute automation testing from scratch with LambdaTest Learning Hub. Right from setting up the prerequisites to run your first automation test, to following best practices and diving deeper into advanced test scenarios. LambdaTest Learning Hubs compile a list of step-by-step guides to help you be proficient with different test automation frameworks i.e. Selenium, Cypress, TestNG etc.

LambdaTest Learning Hubs:

YouTube

You could also refer to video tutorials over LambdaTest YouTube channel to get step by step demonstration from industry experts.

Run jest-extended automation tests on LambdaTest cloud grid

Perform automation testing on 3000+ real desktop and mobile devices online.

Try LambdaTest Now !!

Get 100 minutes of automation test minutes FREE!!

Next-Gen App & Browser Testing Cloud

Was this article helpful?

Helpful

NotHelpful