{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "tested-harassment", "metadata": {}, "outputs": [], "source": [ "import torch" ] }, { "cell_type": "code", "execution_count": 2, "id": "absolute-psychiatry", "metadata": {}, "outputs": [], "source": [ "class LinearNet(torch.nn.Module):\n", " def __init__(self, input_size,output_size,layers_size):\n", " super().__init__()\n", " \n", " #So, this next section constructs different layers within the NN\n", " #sinlge linear section\n", " self.linear_step1 = torch.nn.Linear(input_size,layers_size)\n", " #single linear section\n", " self.linear_step2 = torch.nn.Linear(layers_size,output_size)\n", " \n", " def forward(self, input_values):\n", " intermediate_values = self.linear_step1(input_values)\n", " out_values = self.linear_step2(intermediate_values)\n", " \n", " return out_values" ] }, { "cell_type": "code", "execution_count": 29, "id": "exciting-pipeline", "metadata": {}, "outputs": [], "source": [ "class LinearNet2(torch.nn.Module):\n", " def __init__(self, input_size,output_size,layers_size):\n", " super().__init__()\n", " \n", " #combined together into a set of sequential workers\n", " self.sequential_layers = torch.nn.Sequential(\n", " torch.nn.Linear(input_size,layers_size)\n", " ,torch.nn.Linear(layers_size,output_size)\n", " )\n", " \n", " def forward(self, input_values):\n", " return self.sequential_layers(input_values)\n", " \n", " def fwd_wrapper(self,input1,input2):\n", " #Combines two layers together.\n", " #This supports the thought that I can create a \"launch\" nn that takes two values as input.\n", " return self.sequential_layers(torch.cat((input1,input2),0))" ] }, { "cell_type": "code", "execution_count": 22, "id": "romantic-visit", "metadata": {}, "outputs": [], "source": [ "model = LinearNet2(input_size = 5, output_size=5, layers_size=15)" ] }, { "cell_type": "code", "execution_count": 23, "id": "finished-taste", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "LinearNet2(\n", " (sequential_layers): Sequential(\n", " (0): Linear(in_features=5, out_features=15, bias=True)\n", " (1): Linear(in_features=15, out_features=5, bias=True)\n", " )\n", ")\n" ] } ], "source": [ "print(model)" ] }, { "cell_type": "code", "execution_count": 24, "id": "dress-baltimore", "metadata": {}, "outputs": [], "source": [ "data_in = torch.tensor([1.5,2,3,4,5])\n", "data_in1 = torch.tensor([1.5,2])\n", "data_in2 = torch.tensor([3,4,5])" ] }, { "cell_type": "code", "execution_count": 25, "id": "korean-width", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "tensor([8.7445e-04, 5.1066e-02, 5.5142e-01, 3.3832e-01, 1.8822e+00],\n", " grad_fn=)" ] }, "execution_count": 25, "metadata": {}, "output_type": "execute_result" } ], "source": [ "model.forward(data_in)" ] }, { "cell_type": "code", "execution_count": 30, "id": "novel-composer", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "tensor([8.7445e-04, 5.1066e-02, 5.5142e-01, 3.3832e-01, 1.8822e+00],\n", " grad_fn=)" ] }, "execution_count": 30, "metadata": {}, "output_type": "execute_result" } ], "source": [ "test = model.fwd_wrapper(data_in1,data_in2)\n", "test" ] }, { "cell_type": "code", "execution_count": 33, "id": "accepting-fireplace", "metadata": {}, "outputs": [ { "ename": "RuntimeError", "evalue": "grad can be implicitly created only for scalar outputs", "output_type": "error", "traceback": [ "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "\u001b[0;31mRuntimeError\u001b[0m Traceback (most recent call last)", "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mtest\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", "\u001b[0;32m~/miniconda3/envs/pytorch-CPU/lib/python3.8/site-packages/torch/tensor.py\u001b[0m in \u001b[0;36mbackward\u001b[0;34m(self, gradient, retain_graph, create_graph, inputs)\u001b[0m\n\u001b[1;32m 243\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcreate_graph\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 244\u001b[0m inputs=inputs)\n\u001b[0;32m--> 245\u001b[0;31m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mautograd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgradient\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mretain_graph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 246\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 247\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mregister_hook\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhook\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/miniconda3/envs/pytorch-CPU/lib/python3.8/site-packages/torch/autograd/__init__.py\u001b[0m in \u001b[0;36mbackward\u001b[0;34m(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)\u001b[0m\n\u001b[1;32m 139\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 140\u001b[0m \u001b[0mgrad_tensors_\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_tensor_or_tensors_to_tuple\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgrad_tensors\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtensors\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 141\u001b[0;31m \u001b[0mgrad_tensors_\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_make_grads\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtensors\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgrad_tensors_\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 142\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mretain_graph\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 143\u001b[0m \u001b[0mretain_graph\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;32m~/miniconda3/envs/pytorch-CPU/lib/python3.8/site-packages/torch/autograd/__init__.py\u001b[0m in \u001b[0;36m_make_grads\u001b[0;34m(outputs, grads)\u001b[0m\n\u001b[1;32m 48\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mout\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrequires_grad\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 49\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mout\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnumel\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 50\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mRuntimeError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"grad can be implicitly created only for scalar outputs\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 51\u001b[0m \u001b[0mnew_grads\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mones_like\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mout\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmemory_format\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpreserve_format\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 52\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "\u001b[0;31mRuntimeError\u001b[0m: grad can be implicitly created only for scalar outputs" ] } ], "source": [ "test.backward()" ] }, { "cell_type": "code", "execution_count": 10, "id": "rapid-spoke", "metadata": {}, "outputs": [ { "data": { "text/plain": [ "OrderedDict([('linear_step1.weight',\n", " tensor([[ 0.1832, -0.0033, -0.0508, -0.3904, 0.3259],\n", " [-0.1713, 0.0096, -0.0525, 0.4356, -0.2567],\n", " [ 0.3026, -0.4448, 0.2032, -0.2238, -0.1767],\n", " [ 0.4211, 0.3466, 0.4140, 0.1798, 0.0438],\n", " [ 0.2925, 0.2805, -0.1709, -0.0526, 0.1170],\n", " [ 0.3011, -0.1513, -0.1232, -0.3212, 0.1243],\n", " [-0.3001, 0.4073, -0.3111, 0.4319, -0.1216],\n", " [ 0.2732, 0.2345, 0.0355, 0.3887, 0.4408],\n", " [-0.4345, -0.0974, 0.0280, 0.2111, -0.3547],\n", " [ 0.0311, 0.3230, 0.0452, 0.0421, -0.3895],\n", " [-0.4149, -0.0149, 0.2047, 0.3821, -0.1537],\n", " [-0.3941, -0.1030, -0.3153, 0.1546, -0.2481],\n", " [-0.2590, -0.3550, -0.3910, -0.1634, 0.0569],\n", " [-0.2632, -0.3178, -0.1942, -0.2556, -0.0210],\n", " [-0.1319, -0.4315, 0.2441, -0.3021, -0.2024]])),\n", " ('linear_step1.bias',\n", " tensor([ 0.4211, 0.3796, -0.0749, 0.1055, 0.2941, -0.0833, 0.2162, 0.4153,\n", " -0.0988, 0.3072, -0.2772, 0.0678, 0.3566, 0.1708, -0.0892])),\n", " ('linear_step2.weight',\n", " tensor([[ 0.0047, 0.0587, 0.1857, -0.2414, 0.0850, 0.2135, 0.0963, 0.0224,\n", " 0.0328, 0.0438, -0.2483, 0.0613, 0.2572, 0.2077, 0.0558],\n", " [ 0.2141, 0.0382, 0.1007, -0.0127, 0.2511, 0.0085, 0.1415, 0.1731,\n", " -0.0145, 0.0885, -0.0495, 0.1828, 0.2506, -0.1226, -0.1583],\n", " [ 0.0772, 0.1059, 0.0391, -0.0915, -0.2160, 0.1320, 0.0727, 0.1585,\n", " 0.1454, -0.0973, -0.0960, -0.0753, -0.2022, 0.0092, 0.1243],\n", " [-0.2120, 0.1741, -0.0886, 0.0816, 0.0032, 0.1702, 0.1375, -0.1119,\n", " -0.0059, -0.1185, 0.2243, -0.1849, -0.1739, -0.0298, -0.1838],\n", " [ 0.0095, 0.0369, 0.0736, 0.1567, -0.1493, 0.2170, 0.1332, 0.1684,\n", " 0.1232, 0.1393, -0.2525, -0.1990, -0.1293, -0.1827, 0.1345]])),\n", " ('linear_step2.bias',\n", " tensor([ 0.1185, -0.1514, 0.1662, 0.0849, -0.1373]))])" ] }, "execution_count": 10, "metadata": {}, "output_type": "execute_result" } ], "source": [ "model.state_dict()" ] }, { "cell_type": "code", "execution_count": null, "id": "compatible-least", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.8" } }, "nbformat": 4, "nbformat_minor": 5 }