You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
Orbits/Code/BasicNeuralNet.ipynb

223 lines
6.0 KiB
Plaintext

{
"cells": [
{
"cell_type": "markdown",
"id": "usual-deviation",
"metadata": {},
"source": [
"Note on pytorch. NN optimization acts imperitively/by side effect as follows.\n",
" - Define model\n",
" - loop\n",
" - Calculate loss\n",
" - Zero gradients\n",
" - backprop to model\n",
" - check conditions for exit\n",
" - report diagnostics\n",
" - disect results"
]
},
{
"cell_type": "code",
"execution_count": 1,
"id": "generous-alexandria",
"metadata": {},
"outputs": [],
"source": [
"import torch"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "surrounded-jurisdiction",
"metadata": {},
"outputs": [],
"source": [
"class LinearNet(torch.nn.Module):\n",
" def __init__(self, input_size,output_size,layers_size):\n",
" super().__init__()\n",
" \n",
" #So, this next section constructs different layers within the NN\n",
" #sinlge linear section\n",
" self.linear_step1 = torch.nn.Linear(input_size,layers_size)\n",
" #single linear section\n",
" self.linear_step2 = torch.nn.Linear(layers_size,output_size)\n",
" \n",
" def forward(self, input_values):\n",
" intermediate_values = self.linear_step1(input_values)\n",
" out_values = self.linear_step2(intermediate_values)\n",
" \n",
" return out_values"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "legal-character",
"metadata": {},
"outputs": [],
"source": [
"model = LinearNet(input_size = 5, output_size=5, layers_size=5)"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "statutory-bachelor",
"metadata": {},
"outputs": [],
"source": [
"data_in = torch.tensor([1.5,2,3,4,5])"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "informational-bennett",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([1.5000, 2.0000, 3.0000, 4.0000, 5.0000])"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"data_in"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "basic-printer",
"metadata": {},
"outputs": [],
"source": [
"target = torch.zeros(5)"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "nonprofit-sponsorship",
"metadata": {},
"outputs": [],
"source": [
"def loss_fn2(output,target):\n",
" return torch.nn.MSELoss()(2*output,target)"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "logical-conversation",
"metadata": {},
"outputs": [],
"source": [
"#Prep Optimizer\n",
"optimizer = torch.optim.SGD(model.parameters(),lr=0.01)"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "inner-stations",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"tensor([-0.2090, -0.2099, 0.9129, -1.1506, 0.9998])\n",
"tensor([-0.2780, 0.1246, -0.0349, 0.0779, -0.1034])\n",
"tensor([-0.0678, 0.0177, 0.0479, -0.0193, 0.0401])\n",
"tensor([-0.0338, 0.0143, 0.0187, 0.0088, 0.0175])\n",
"tensor([-0.0157, 0.0081, 0.0111, 0.0089, 0.0140])\n",
"tensor([-0.0083, 0.0051, 0.0063, 0.0076, 0.0101])\n",
"tensor([-0.0049, 0.0033, 0.0038, 0.0058, 0.0074])\n",
"tensor([-0.0032, 0.0022, 0.0023, 0.0043, 0.0054])\n",
"tensor([-0.0022, 0.0015, 0.0014, 0.0031, 0.0040])\n",
"tensor([-0.0016, 0.0010, 0.0009, 0.0023, 0.0030])\n",
"tensor([-0.0012, 0.0007, 0.0006, 0.0017, 0.0022])\n",
"tensor([-0.0009, 0.0005, 0.0004, 0.0013, 0.0017])\n",
"tensor([-0.0007, 0.0003, 0.0003, 0.0010, 0.0013])\n",
"tensor([-0.0005, 0.0002, 0.0002, 0.0007, 0.0010])\n",
"tensor([-0.0004, 0.0002, 0.0002, 0.0005, 0.0007])\n",
"tensor([-0.0003, 0.0001, 0.0001, 0.0004, 0.0006])\n",
"tensor([-2.4156e-04, 9.8367e-05, 8.3392e-05, 3.1965e-04, 4.3452e-04])\n",
"tensor([-1.8733e-04, 7.3545e-05, 6.3141e-05, 2.4562e-04, 3.3559e-04])\n",
"tensor([-1.4602e-04, 5.5818e-05, 4.7690e-05, 1.8978e-04, 2.5897e-04])\n",
"tensor([-1.1313e-04, 4.2276e-05, 3.6786e-05, 1.4635e-04, 2.0107e-04])\n",
"tensor([-8.8156e-05, 3.2514e-05, 2.8075e-05, 1.1362e-04, 1.5562e-04])\n",
"tensor([-6.8587e-05, 2.5118e-05, 2.1561e-05, 8.8054e-05, 1.2085e-04])\n",
"tensor([-5.3484e-05, 1.9454e-05, 1.6658e-05, 6.8377e-05, 9.3834e-05])\n",
"tensor([-4.1497e-05, 1.4939e-05, 1.3124e-05, 5.3061e-05, 7.3066e-05])\n",
"tensor([-3.2303e-05, 1.1659e-05, 1.0152e-05, 4.1244e-05, 5.6852e-05])\n",
"\n",
" tensor(1.3901e-06, grad_fn=<MseLossBackward>)\n"
]
}
],
"source": [
"for i in range(25):\n",
" #training loop\n",
" optimizer.zero_grad()\n",
"\n",
" output = model.forward(data_in)\n",
" output,output.shape\n",
"\n",
" l = loss_fn2(output, target)\n",
"\n",
" l.backward()\n",
"\n",
" optimizer.step()\n",
"\n",
" print(model.linear_step1.bias.grad)\n",
"print(\"\\n\",l)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "native-bristol",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "moral-apollo",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.9.2"
}
},
"nbformat": 4,
"nbformat_minor": 5
}