test logsoftmax simple case, gradcheck and cpu/cuda equality
1 import grain.testing; 2 import std.typecons; 3 import numir; 4 import mir.ndslice; 5 import mir.math; 6 7 auto e = log(exp(-1.0) + exp(2.0) + exp(3.0)); 8 auto xs = [[-1.0f, 2.0f, 3.0f], [-1.0f, 2.0f, 3.0f], [-1.0f, 2.0f, 3.0f]] 9 .nparray; 10 LogSoftmax!float hfunc; 11 auto _hx = xs.variable; 12 auto _hy = hfunc.forward(_hx); 13 assert(approxEqual(_hy.sliced, xs - e)); 14 15 auto hx = uniform!float(2, 2).slice.variable; 16 auto hy = hfunc.forward(hx); 17 auto hgy = uniform!float(2, 2).slice.variable; 18 auto hgx = hfunc.backward(hgy); 19 gradCheck(hfunc, hx, hgy, 1e-3, 1e-3, 1e-3); 20 21 version (grain_cuda) { 22 alias Storage = DeviceStorage; 23 auto func = LogSoftmax!float(); 24 auto dx = hx.to!Storage; 25 auto dy = func.forward(dx); 26 assert(approxEqual(dy.to!HostStorage.sliced, hy.sliced)); 27 auto dgy = hgy.to!Storage; 28 auto dgx = func.backward(dgy); 29 assert(approxEqual(dgx.to!HostStorage.sliced, hgx.sliced)); 30 }