You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
Orbits/Code/Untitled.ipynb

289 lines
13 KiB
Plaintext

{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "royal-trace",
"metadata": {},
"outputs": [],
"source": [
"import torch\n",
"import combined as c\n",
"import NeuralNetworkSpecifications as nns"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "atlantic-finish",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"tensor([[[8., 5., 3.]],\n",
"\n",
" [[3., 6., 6.]],\n",
"\n",
" [[3., 7., 2.]],\n",
"\n",
" [[4., 8., 2.]],\n",
"\n",
" [[0., 6., 8.]]], grad_fn=<CatBackward>) torch.Size([5, 1, 3])\n"
]
}
],
"source": [
"BATCH_SIZE = 5\n",
"STATES = 3\n",
"CONSTELLATIONS = STATES -1 #determined by debris tracking\n",
"MAX = 10\n",
"FEATURES = 1\n",
"\n",
"stocks = torch.randint(MAX,(BATCH_SIZE,1,CONSTELLATIONS), dtype=torch.float32, requires_grad=True)\n",
"debris = torch.randint(MAX,(BATCH_SIZE,1,1), dtype=torch.float32, requires_grad=True)\n",
"\n",
"s = c.States(stocks, debris)\n",
"\n",
"print(s.values,s.values.shape)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "prostate-liverpool",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 3,
"id": "simplified-permission",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"tensor([[[[1.],\n",
" [0.]],\n",
"\n",
" [[0.],\n",
" [1.]]]]) torch.Size([1, 2, 2, 1])\n",
"tensor([[ 1.0000, 0.0000],\n",
" [ 0.0000, 1.0000],\n",
" [-0.2000, -0.2000]]) torch.Size([3, 2])\n"
]
}
],
"source": [
"#launch_costs = torch.randint(3,(1,CONSTELLATIONS,CONSTELLATIONS,FEATURES), dtype=torch.float32)\n",
"launch_costs = torch.tensor([[[[1.0],[0]],[[0.0],[1]]]])\n",
"print(launch_costs, launch_costs.shape)\n",
"#payoff = torch.randint(5,(STATES,CONSTELLATIONS), dtype=torch.float32)\n",
"payoff = torch.tensor([[1.0, 0],[0,1.0],[-0.2,-0.2]])\n",
"print(payoff, payoff.shape)"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "level-angle",
"metadata": {},
"outputs": [],
"source": [
"def linear_profit(states, choices):\n",
" #Pay particular attention to the dimensions\n",
" #note that there is an extra dimension in there just ot match that of the profit vector we'll be giving out.\n",
" \n",
" #calculate launch expenses\n",
" \n",
" launch_expense = torch.tensordot(choices,launch_costs, [[-2,-1],[-2,-1]])\n",
"\n",
" #calculate revenue\n",
"\n",
" revenue = torch.tensordot(s.values, payoff, [[-1],[0]])\n",
"\n",
"\n",
" profit = revenue - launch_expense\n",
" return profit"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "copyrighted-acting",
"metadata": {},
"outputs": [],
"source": [
"policy = nns.ChoiceFunction(BATCH_SIZE\n",
" ,STATES\n",
" ,FEATURES\n",
" ,CONSTELLATIONS\n",
" ,12\n",
" )"
]
},
{
"cell_type": "markdown",
"id": "casual-career",
"metadata": {},
"source": [
"example to get profit = 1\n",
"```python\n",
"optimizer = torch.optim.Adam(policy.parameters(),lr=0.001)\n",
"\n",
"for i in range(10000):\n",
" #training loop\n",
" optimizer.zero_grad()\n",
"\n",
" output = policy.forward(s.values)\n",
"\n",
" l = ((1-linear_profit(s.values,output))**2).sum()\n",
"\n",
"\n",
" l.backward()\n",
"\n",
" optimizer.step()\n",
"\n",
" if i%200==0:\n",
" print(l)\n",
" \n",
"\n",
"results = policy.forward(s.values)\n",
"print(results.mean(dim=0), \"\\n\",results.std(dim=0))\n",
"```\n"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "straight-negative",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([[[0.0000],\n",
" [0.0000]],\n",
"\n",
" [[0.0000],\n",
" [0.0000]],\n",
"\n",
" [[0.0000],\n",
" [0.0000]],\n",
"\n",
" [[0.0000],\n",
" [0.0000]],\n",
"\n",
" [[0.3742],\n",
" [0.0000]]], grad_fn=<ReluBackward0>)"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"output = policy.forward(s.values)\n",
"output"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "independent-deficit",
"metadata": {},
"outputs": [],
"source": [
"t = torch.ones_like(output, requires_grad=True)"
]
},
{
"cell_type": "code",
"execution_count": 57,
"id": "romance-force",
"metadata": {
"tags": []
},
"outputs": [
{
"ename": "RuntimeError",
"evalue": "element 0 of tensors does not require grad and does not have a grad_fn",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mRuntimeError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-57-efee93d7c257>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0;31m#this is where I lose the gradient. This is where I need a gradient so that I can call .backward below\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 6\u001b[0;31m \u001b[0mtest_loss\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moutput\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;32m~/miniconda3/envs/pytorch-CPU/lib/python3.8/site-packages/torch/tensor.py\u001b[0m in \u001b[0;36mbackward\u001b[0;34m(self, gradient, retain_graph, create_graph, inputs)\u001b[0m\n\u001b[1;32m 243\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcreate_graph\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 244\u001b[0m inputs=inputs)\n\u001b[0;32m--> 245\u001b[0;31m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mautograd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgradient\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mretain_graph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 246\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 247\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mregister_hook\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhook\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/miniconda3/envs/pytorch-CPU/lib/python3.8/site-packages/torch/autograd/__init__.py\u001b[0m in \u001b[0;36mbackward\u001b[0;34m(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)\u001b[0m\n\u001b[1;32m 143\u001b[0m \u001b[0mretain_graph\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 144\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 145\u001b[0;31m Variable._execution_engine.run_backward(\n\u001b[0m\u001b[1;32m 146\u001b[0m \u001b[0mtensors\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgrad_tensors_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mretain_graph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 147\u001b[0m allow_unreachable=True, accumulate_grad=True) # allow_unreachable flag\n",
"\u001b[0;31mRuntimeError\u001b[0m: element 0 of tensors does not require grad and does not have a grad_fn"
]
}
],
"source": [
"def test_loss(options):\n",
" return torch.autograd.functional.jacobian(linear_profit, (s.values, options))[0].sum()\n",
" #something is off here ^\n",
" #this is where I lose the gradient. This is where I need a gradient so that I can call .backward below\n",
"\n",
"test_loss(output).backward()"
]
},
{
"cell_type": "code",
"execution_count": 55,
"id": "asian-death",
"metadata": {},
"outputs": [
{
"ename": "RuntimeError",
"evalue": "element 0 of tensors does not require grad and does not have a grad_fn",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mRuntimeError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-55-ac1f78ecd780>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mtest_loss\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moutput\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;32m~/miniconda3/envs/pytorch-CPU/lib/python3.8/site-packages/torch/tensor.py\u001b[0m in \u001b[0;36mbackward\u001b[0;34m(self, gradient, retain_graph, create_graph, inputs)\u001b[0m\n\u001b[1;32m 243\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcreate_graph\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 244\u001b[0m inputs=inputs)\n\u001b[0;32m--> 245\u001b[0;31m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mautograd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgradient\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mretain_graph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 246\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 247\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mregister_hook\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhook\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/miniconda3/envs/pytorch-CPU/lib/python3.8/site-packages/torch/autograd/__init__.py\u001b[0m in \u001b[0;36mbackward\u001b[0;34m(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)\u001b[0m\n\u001b[1;32m 143\u001b[0m \u001b[0mretain_graph\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 144\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 145\u001b[0;31m Variable._execution_engine.run_backward(\n\u001b[0m\u001b[1;32m 146\u001b[0m \u001b[0mtensors\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgrad_tensors_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mretain_graph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 147\u001b[0m allow_unreachable=True, accumulate_grad=True) # allow_unreachable flag\n",
"\u001b[0;31mRuntimeError\u001b[0m: element 0 of tensors does not require grad and does not have a grad_fn"
]
}
],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "prospective-nelson",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.8"
}
},
"nbformat": 4,
"nbformat_minor": 5
}