1 import std.stdio; 2 import numir; 3 import grain.autograd; // : Variable, HostStorage; 4 5 { 6 auto mlp = MLP!(float, HostStorage)(3); 7 mlp.fc1.weight.grad[0] = 1.0; 8 mlp.zeroGrad(); 9 assert(mlp.fc1.weight.grad[0] == 0.0); 10 11 auto sgd = SGD!(typeof(mlp))(mlp, 0.5); 12 mlp.fc1.weight.data.zero_(); 13 mlp.fc1.weight.grad = [[1.0f, 0.0f, 0.0f], [0.0f, 0.0f, 0.0f]].variable 14 .data; 15 sgd.update(); 16 assert(mlp.fc1.weight.sliced == [[-0.5, 0.0, 0.0], [0.0, 0.0, 0.0]]); 17 } 18 version (grain_cuda) { 19 auto mlp = MLP!(float, DeviceStorage)(3); 20 mlp.fc1.weight.grad = [[1.0f, 0.0f, 0.0f], [0.0f, 0.0f, 21 0.0f]].variable.to!DeviceStorage.data; 22 mlp.zeroGrad(); 23 assert(mlp.fc1.weight.to!HostStorage.gradSliced == [[0.0, 0.0, 0.0], [0.0, 24 0.0, 0.0]]); 25 26 auto sgd = SGD!(typeof(mlp))(mlp, 0.5); 27 mlp.fc1.weight.data.zero_(); 28 mlp.fc1.weight.grad = [[1.0f, 0.0f, 0.0f], [0.0f, 0.0f, 29 0.0f]].variable.to!DeviceStorage.data; 30 sgd.update(); 31 assert(mlp.fc1.weight.to!HostStorage.sliced == [[-0.5, 0.0, 0.0], [0.0, 0.0, 32 0.0]]); 33 }
stochastic gradient descent optimizer