1 import std.typecons; 2 import grain.testing; 3 import numir; 4 import mir.ndslice; 5 6 AddBias!float func; 7 auto hx = [[0f, 1f], [2f, 3f], [4f, 5f]].variable; // 3x2 8 auto hb = [-1f, 1f].variable; // 2 9 auto hy = func.forward(hx, hb); 10 assert(hy.sliced == [[-1f, 2f], [1f, 4f], [3f, 6f]]); 11 12 auto hgy = uniform!float(hy.shape.castArray!size_t).slice.variable; 13 auto hgxb = func.backward(hgy); 14 assert(hgxb[0].sliced == hgy.sliced); 15 assert(hgxb[1].sliced == [hgy.sliced[0, 0] + hgy.sliced[1, 0] + hgy.sliced[2, 0], 16 hgy.sliced[0, 1] + hgy.sliced[1, 1] + hgy.sliced[2, 1]]); 17 gradCheck(func, tuple(hx, hb), hgy); 18 19 version (grain_cuda) { 20 auto dx = hx.to!DeviceStorage; 21 auto db = hb.to!DeviceStorage; 22 auto dy = func.forward(dx, db); 23 assert(dy.to!HostStorage.sliced == [[-1f, 2f], [1f, 4f], [3f, 6f]]); 24 auto dgy = hgy.to!DeviceStorage; 25 auto dgxb = func.backward(dgy); 26 assert(dgxb[0].to!HostStorage.sliced == hgxb[0].sliced); 27 assert(dgxb[1].to!HostStorage.sliced == hgxb[1].sliced); 28 }
Add bias vector to matrix used inside grain.chain.Linear TODO: generalize to broadcastable addition (use cudnnAddTensor)