test matmul gradcheck and cpu/cuda equality
1 foreach (i; [2, 3, 4]) { 2 foreach (j; [2, 3, 4]) { 3 import std.typecons : tuple; 4 import numir : uniform; 5 import mir.ndslice : slice; 6 import grain.testing; 7 8 auto k = 3; 9 auto a = uniform!float(i, k).slice.variable; 10 auto b = uniform!float(k, j).slice.variable; 11 auto gc = uniform!float(i, j).slice.variable; 12 MatMul!float func; 13 gradCheck(func, tuple(a, b), gc, 1e-3, 1e-3, 1e-3); 14 15 version (grain_cuda) { 16 import numir.testing; 17 MatMul!float func2; 18 auto hc = func.forward(a, b); 19 auto dc = func2.forward(a.to!DeviceStorage, b.to!DeviceStorage); 20 assert(approxEqual(dc.to!HostStorage.sliced, hc.sliced)); 21 auto hgab = func.backward(gc); 22 auto dgab = func2.backward(gc.to!DeviceStorage); 23 // writefln!"%s vs %s"(dgab[0].to!HostStorage.sliced, hgab[0].sliced); 24 assert(approxEqual(dgab[0].to!HostStorage.sliced, hgab[0].sliced)); 25 assert(approxEqual(dgab[1].to!HostStorage.sliced, hgab[1].sliced)); 26 } 27 } 28 }
matmul with variable.backward
1 import std.typecons; 2 import numir; 3 import mir.ndslice; 4 static import grain.config; 5 6 grain.config.backprop = true; 7 auto func = new MatMul!float; 8 auto a = uniform!float(3, 4).slice.variable(true); 9 auto b = uniform!float(4, 2).slice.variable(true); 10 auto c = func.applyForward(a, b); 11 auto gc = uniform!float(3, 2).slice.variable; 12 auto ugc = UntypedVariable(gc); 13 c.backward(&ugc); 14 auto gab = func.backward(gc); 15 assert(a.gradSlice == gab[0].sliced); 16 assert(b.gradSlice == gab[1].sliced);
Matrix-Matrix multiplication (using cuBLAS)