diff --git a/README.md b/README.md index f9f39b9..bec00c4 100644 --- a/README.md +++ b/README.md @@ -19,12 +19,12 @@ The API is currently inspired by PyTorch, with one notable difference: when you int main() { // You can use enums to set the gradient requirement: - net::Tensor x({2,3}, net::requires_gradient::False); x.fill({1,2,3,4,5,6}); - net::Tensor w({4,3}, net::requires_gradient::True); w.fill({1,2,-3,4,5,6,7,8,-9,10,11,-12}); + net::Tensor x({2,3}, net::requires_gradient::False); x.fill({1,2,3,4,5,6}); + net::Tensor w({4,3}, net::requires_gradient::True); w.fill({1,2,-3,4,5,6,7,8,-9,10,11,-12}); // Or use just a boolean. Whatever you prefer. - net::Tensor b({1,4}, true); b.fill({1,2,3,4}); - net::Tensor I({2,4}, false); I.fill(1); + net::Tensor b({1,4}, true); b.fill({1,2,3,4}); + net::Tensor I({2,4}, false); I.fill(1); x = net::function::linear(x,w,b); x = net::function::relu(x); @@ -61,7 +61,7 @@ struct Autoencoder : public net::Model { net::layer::LogSoftmax(1/*axis*/) }; - net::Tensor forward(net::Tensor x) { + net::Tensor forward(net::Tensor x) { x = encoder(x); x = decoder(x); return x;