updated a couple of files (one of which will be overwritten in a soon to exist pull/merge), and added a readme

temporaryWork
youainti 5 years ago
parent 3e8b8f5284
commit 92cb373b06

@ -2,8 +2,8 @@
"cells": [
{
"cell_type": "code",
"execution_count": 16,
"id": "heavy-turkish",
"execution_count": 1,
"id": "tested-harassment",
"metadata": {},
"outputs": [],
"source": [
@ -12,8 +12,8 @@
},
{
"cell_type": "code",
"execution_count": 4,
"id": "ordered-disease",
"execution_count": 2,
"id": "absolute-psychiatry",
"metadata": {},
"outputs": [],
"source": [
@ -36,8 +36,8 @@
},
{
"cell_type": "code",
"execution_count": null,
"id": "quiet-syria",
"execution_count": 29,
"id": "exciting-pipeline",
"metadata": {},
"outputs": [],
"source": [
@ -48,36 +48,43 @@
" #combined together into a set of sequential workers\n",
" self.sequential_layers = torch.nn.Sequential(\n",
" torch.nn.Linear(input_size,layers_size)\n",
" torch.nn.Linear(layers_size,output_size)\n",
" ,torch.nn.Linear(layers_size,output_size)\n",
" )\n",
" \n",
" def forward(self, input_values):\n",
" return self.sequential_layers(input_values)"
" return self.sequential_layers(input_values)\n",
" \n",
" def fwd_wrapper(self,input1,input2):\n",
" #Combines two layers together.\n",
" #This supports the thought that I can create a \"launch\" nn that takes two values as input.\n",
" return self.sequential_layers(torch.cat((input1,input2),0))"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "fleet-letter",
"execution_count": 22,
"id": "romantic-visit",
"metadata": {},
"outputs": [],
"source": [
"model = LinearNet(input_size = 5, output_size=5, layers_size=15)"
"model = LinearNet2(input_size = 5, output_size=5, layers_size=15)"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "provincial-crime",
"execution_count": 23,
"id": "finished-taste",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"LinearNet(\n",
" (linear_step1): Linear(in_features=5, out_features=15, bias=True)\n",
" (linear_step2): Linear(in_features=15, out_features=5, bias=True)\n",
"LinearNet2(\n",
" (sequential_layers): Sequential(\n",
" (0): Linear(in_features=5, out_features=15, bias=True)\n",
" (1): Linear(in_features=15, out_features=5, bias=True)\n",
" )\n",
")\n"
]
}
@ -88,27 +95,30 @@
},
{
"cell_type": "code",
"execution_count": 12,
"id": "particular-response",
"execution_count": 24,
"id": "dress-baltimore",
"metadata": {},
"outputs": [],
"source": [
"data_in = torch.tensor([1.5,2,3,4,5])"
"data_in = torch.tensor([1.5,2,3,4,5])\n",
"data_in1 = torch.tensor([1.5,2])\n",
"data_in2 = torch.tensor([3,4,5])"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "passing-termination",
"execution_count": 25,
"id": "korean-width",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([0.3597, 0.7032, 0.0924, 0.7974, 3.1524], grad_fn=<AddBackward0>)"
"tensor([8.7445e-04, 5.1066e-02, 5.5142e-01, 3.3832e-01, 1.8822e+00],\n",
" grad_fn=<AddBackward0>)"
]
},
"execution_count": 11,
"execution_count": 25,
"metadata": {},
"output_type": "execute_result"
}
@ -119,48 +129,96 @@
},
{
"cell_type": "code",
"execution_count": 13,
"id": "public-speaking",
"execution_count": 30,
"id": "novel-composer",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([8.7445e-04, 5.1066e-02, 5.5142e-01, 3.3832e-01, 1.8822e+00],\n",
" grad_fn=<AddBackward0>)"
]
},
"execution_count": 30,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"test = model.fwd_wrapper(data_in1,data_in2)\n",
"test"
]
},
{
"cell_type": "code",
"execution_count": 33,
"id": "accepting-fireplace",
"metadata": {},
"outputs": [
{
"ename": "RuntimeError",
"evalue": "grad can be implicitly created only for scalar outputs",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mRuntimeError\u001b[0m Traceback (most recent call last)",
"\u001b[0;32m<ipython-input-33-b26b7a7d68aa>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mtest\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m",
"\u001b[0;32m~/miniconda3/envs/pytorch-CPU/lib/python3.8/site-packages/torch/tensor.py\u001b[0m in \u001b[0;36mbackward\u001b[0;34m(self, gradient, retain_graph, create_graph, inputs)\u001b[0m\n\u001b[1;32m 243\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcreate_graph\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 244\u001b[0m inputs=inputs)\n\u001b[0;32m--> 245\u001b[0;31m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mautograd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgradient\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mretain_graph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 246\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 247\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mregister_hook\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhook\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/miniconda3/envs/pytorch-CPU/lib/python3.8/site-packages/torch/autograd/__init__.py\u001b[0m in \u001b[0;36mbackward\u001b[0;34m(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)\u001b[0m\n\u001b[1;32m 139\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 140\u001b[0m \u001b[0mgrad_tensors_\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_tensor_or_tensors_to_tuple\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgrad_tensors\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtensors\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 141\u001b[0;31m \u001b[0mgrad_tensors_\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_make_grads\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtensors\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgrad_tensors_\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 142\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mretain_graph\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 143\u001b[0m \u001b[0mretain_graph\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m~/miniconda3/envs/pytorch-CPU/lib/python3.8/site-packages/torch/autograd/__init__.py\u001b[0m in \u001b[0;36m_make_grads\u001b[0;34m(outputs, grads)\u001b[0m\n\u001b[1;32m 48\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mout\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrequires_grad\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 49\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mout\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnumel\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 50\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mRuntimeError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"grad can be implicitly created only for scalar outputs\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 51\u001b[0m \u001b[0mnew_grads\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mones_like\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mout\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmemory_format\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpreserve_format\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 52\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mRuntimeError\u001b[0m: grad can be implicitly created only for scalar outputs"
]
}
],
"source": [
"test.backward()"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "rapid-spoke",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"OrderedDict([('linear_step1.weight',\n",
" tensor([[ 0.2498, -0.2177, 0.2323, 0.3493, -0.2541],\n",
" [-0.2169, 0.0535, -0.1223, 0.0237, -0.0184],\n",
" [-0.1554, -0.0134, 0.2918, 0.3542, 0.3464],\n",
" [-0.2378, 0.3828, -0.3026, -0.1545, -0.1484],\n",
" [ 0.3282, -0.1492, 0.3551, -0.0447, -0.3294],\n",
" [ 0.2789, -0.1546, 0.2821, 0.0136, 0.4210],\n",
" [ 0.2911, -0.2191, 0.0493, 0.1006, 0.0470],\n",
" [-0.2269, 0.1705, 0.1198, 0.4040, 0.2512],\n",
" [-0.2696, -0.4259, 0.4229, 0.1412, 0.3553],\n",
" [ 0.0293, 0.4044, 0.3961, -0.3992, 0.2586],\n",
" [-0.3101, -0.0327, 0.1832, 0.0295, -0.3185],\n",
" [ 0.0637, -0.0770, 0.2297, -0.1567, 0.4379],\n",
" [-0.0540, -0.1769, 0.3407, 0.1942, 0.3494],\n",
" [-0.3609, -0.3536, 0.2491, -0.0490, -0.1199],\n",
" [ 0.2946, -0.0782, -0.0580, 0.2313, -0.0696]])),\n",
" tensor([[ 0.1832, -0.0033, -0.0508, -0.3904, 0.3259],\n",
" [-0.1713, 0.0096, -0.0525, 0.4356, -0.2567],\n",
" [ 0.3026, -0.4448, 0.2032, -0.2238, -0.1767],\n",
" [ 0.4211, 0.3466, 0.4140, 0.1798, 0.0438],\n",
" [ 0.2925, 0.2805, -0.1709, -0.0526, 0.1170],\n",
" [ 0.3011, -0.1513, -0.1232, -0.3212, 0.1243],\n",
" [-0.3001, 0.4073, -0.3111, 0.4319, -0.1216],\n",
" [ 0.2732, 0.2345, 0.0355, 0.3887, 0.4408],\n",
" [-0.4345, -0.0974, 0.0280, 0.2111, -0.3547],\n",
" [ 0.0311, 0.3230, 0.0452, 0.0421, -0.3895],\n",
" [-0.4149, -0.0149, 0.2047, 0.3821, -0.1537],\n",
" [-0.3941, -0.1030, -0.3153, 0.1546, -0.2481],\n",
" [-0.2590, -0.3550, -0.3910, -0.1634, 0.0569],\n",
" [-0.2632, -0.3178, -0.1942, -0.2556, -0.0210],\n",
" [-0.1319, -0.4315, 0.2441, -0.3021, -0.2024]])),\n",
" ('linear_step1.bias',\n",
" tensor([-0.4207, -0.1624, 0.0212, -0.0988, -0.2106, 0.2991, -0.3496, -0.1799,\n",
" -0.4257, -0.3384, -0.0020, 0.1267, 0.0252, 0.0037, 0.1784])),\n",
" tensor([ 0.4211, 0.3796, -0.0749, 0.1055, 0.2941, -0.0833, 0.2162, 0.4153,\n",
" -0.0988, 0.3072, -0.2772, 0.0678, 0.3566, 0.1708, -0.0892])),\n",
" ('linear_step2.weight',\n",
" tensor([[ 0.1404, -0.1424, 0.1518, -0.1080, 0.1269, -0.2030, -0.0533, -0.2240,\n",
" 0.0364, -0.0393, 0.1619, 0.1242, 0.0731, -0.1545, 0.2024],\n",
" [-0.2529, 0.0578, 0.1629, -0.0352, -0.2128, 0.0429, 0.0261, 0.2264,\n",
" -0.0470, 0.0277, 0.0272, -0.1074, -0.1334, 0.0792, -0.0173],\n",
" [ 0.0459, 0.2224, -0.2272, 0.0123, -0.0676, 0.2378, 0.2166, -0.0981,\n",
" 0.1010, -0.1593, -0.2422, -0.1253, 0.0899, -0.0760, -0.0816],\n",
" [ 0.1763, 0.2344, 0.0591, -0.2299, 0.1116, 0.0604, 0.2032, 0.1298,\n",
" 0.0509, 0.2581, 0.2425, -0.0920, 0.0098, 0.1353, -0.2110],\n",
" [ 0.0726, -0.1959, 0.2114, -0.0732, -0.1089, 0.0836, -0.1061, 0.1640,\n",
" 0.1221, 0.0281, -0.2401, 0.1108, 0.1354, 0.1903, -0.1006]])),\n",
" tensor([[ 0.0047, 0.0587, 0.1857, -0.2414, 0.0850, 0.2135, 0.0963, 0.0224,\n",
" 0.0328, 0.0438, -0.2483, 0.0613, 0.2572, 0.2077, 0.0558],\n",
" [ 0.2141, 0.0382, 0.1007, -0.0127, 0.2511, 0.0085, 0.1415, 0.1731,\n",
" -0.0145, 0.0885, -0.0495, 0.1828, 0.2506, -0.1226, -0.1583],\n",
" [ 0.0772, 0.1059, 0.0391, -0.0915, -0.2160, 0.1320, 0.0727, 0.1585,\n",
" 0.1454, -0.0973, -0.0960, -0.0753, -0.2022, 0.0092, 0.1243],\n",
" [-0.2120, 0.1741, -0.0886, 0.0816, 0.0032, 0.1702, 0.1375, -0.1119,\n",
" -0.0059, -0.1185, 0.2243, -0.1849, -0.1739, -0.0298, -0.1838],\n",
" [ 0.0095, 0.0369, 0.0736, 0.1567, -0.1493, 0.2170, 0.1332, 0.1684,\n",
" 0.1232, 0.1393, -0.2525, -0.1990, -0.1293, -0.1827, 0.1345]])),\n",
" ('linear_step2.bias',\n",
" tensor([0.2462, 0.0098, 0.1239, 0.0689, 0.2404]))])"
" tensor([ 0.1185, -0.1514, 0.1662, 0.0849, -0.1373]))])"
]
},
"execution_count": 13,
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
@ -172,7 +230,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "original-warehouse",
"id": "compatible-least",
"metadata": {},
"outputs": [],
"source": []

@ -0,0 +1,4 @@
# COMPUTATIONAL TODO
Curently the successful_recursion needs to be changed to include the launches as a NN.
- This may include creating a launch NN function (class maybe?)

@ -3,7 +3,7 @@
{
"cell_type": "code",
"execution_count": 1,
"id": "closed-glenn",
"id": "planned-choir",
"metadata": {
"tags": []
},
@ -16,7 +16,7 @@
},
{
"cell_type": "markdown",
"id": "naval-ivory",
"id": "japanese-split",
"metadata": {},
"source": [
"# Setup Functions\n",
@ -26,7 +26,7 @@
{
"cell_type": "code",
"execution_count": 2,
"id": "italian-enforcement",
"id": "final-contest",
"metadata": {},
"outputs": [],
"source": [
@ -62,7 +62,7 @@
},
{
"cell_type": "markdown",
"id": "fancy-tucson",
"id": "specific-centre",
"metadata": {},
"source": [
"## Setup functions related to the problem"
@ -71,7 +71,7 @@
{
"cell_type": "code",
"execution_count": 15,
"id": "outside-arrangement",
"id": "grand-jesus",
"metadata": {},
"outputs": [],
"source": [
@ -79,13 +79,8 @@
"\n",
"\n",
"def survival(stock, debris):\n",
" #Gompertz distribution for simplicity\n",
" #commonly used with saturation\n",
" #TODO: ACTUALLY DERIVE A SURVIVAL FUNCTION. THIS IS JUST A PLACEHOLDER. PROBABLY SHOULD BE AN EXPONENTIAL DISTRIBUTION\n",
"\n",
" #eta = 1.0/(SCALING@stock)\n",
" #b = 1/debris\n",
" #return 1 - ( b*eta*torch.exp(eta+b*stock-eta*torch.exp(b*stock)))\n",
" return 1 - torch.exp(-SCALING * stock-debris)\n",
"\n",
"def test_launch(stock, debris):\n",
@ -101,6 +96,7 @@
" return (new_stock, new_debris)\n",
"\n",
"#This is not a good specification of the profit function, but it will work for now.\n",
"#similar to Rao and Rondina's\n",
"def profit(x):\n",
" return UTIL_WEIGHTS @ x"
]
@ -108,7 +104,7 @@
{
"cell_type": "code",
"execution_count": 16,
"id": "romance-generation",
"id": "military-tunnel",
"metadata": {},
"outputs": [],
"source": [
@ -149,7 +145,7 @@
},
{
"cell_type": "markdown",
"id": "fluid-parks",
"id": "hindu-recruitment",
"metadata": {},
"source": [
"# Actual calculations"
@ -158,7 +154,7 @@
{
"cell_type": "code",
"execution_count": 17,
"id": "changing-january",
"id": "metric-bruce",
"metadata": {},
"outputs": [],
"source": [
@ -189,133 +185,52 @@
},
{
"cell_type": "code",
"execution_count": 19,
"id": "dominant-boost",
"execution_count": 21,
"id": "musical-neighbor",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"tensor([0.0000, 1.2632, 1.0526, 1.0526, 1.0892], grad_fn=<MvBackward>)\n",
"tensor([-0.9519, 1.3928, 1.0020, 1.0020, 1.0575], grad_fn=<MvBackward>)\n",
"tensor([-1.8565, 1.5150, 0.9530, 0.9530, 0.9882], grad_fn=<MvBackward>)\n",
"tensor([-2.8103, 1.6872, 0.9376, 0.9376, 0.9474], grad_fn=<MvBackward>)\n",
"tensor([-3.8626, 1.9131, 0.9505, 0.9505, 0.9408], grad_fn=<MvBackward>)\n",
"tensor([-5.0235, 2.1830, 0.9819, 0.9819, 0.9598], grad_fn=<MvBackward>)\n",
"tensor([-6.2860, 2.4869, 1.0247, 1.0247, 0.9951], grad_fn=<MvBackward>)\n",
"tensor([-7.6403, 2.8175, 1.0746, 1.0746, 1.0403], grad_fn=<MvBackward>)\n",
"tensor([-9.0802, 3.1712, 1.1293, 1.1293, 1.0918], grad_fn=<MvBackward>)\n",
"tensor([-10.6035, 3.5462, 1.1879, 1.1879, 1.1477],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-12.2108, 3.9422, 1.2501, 1.2501, 1.2075],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-13.9045, 4.3597, 1.3157, 1.3157, 1.2708],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-15.6882, 4.7995, 1.3849, 1.3849, 1.3375],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-17.5662, 5.2625, 1.4577, 1.4577, 1.4079],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-19.5432, 5.7500, 1.5345, 1.5345, 1.4820],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-21.6244, 6.2631, 1.6152, 1.6152, 1.5600],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-23.8151, 6.8033, 1.7002, 1.7002, 1.6421],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-26.1212, 7.3719, 1.7897, 1.7897, 1.7285],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-28.5486, 7.9704, 1.8839, 1.8839, 1.8195],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-31.1038, 8.6004, 1.9831, 1.9831, 1.9152],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-33.7934, 9.2636, 2.0874, 2.0874, 2.0160],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-36.6247, 9.9617, 2.1973, 2.1973, 2.1221],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-39.6049, 10.6965, 2.3129, 2.3129, 2.2338],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-42.7420, 11.4700, 2.4347, 2.4347, 2.3514],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-46.0442, 12.2842, 2.5628, 2.5628, 2.4751],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-49.5202, 13.1413, 2.6977, 2.6977, 2.6054],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-53.1792, 14.0435, 2.8397, 2.8397, 2.7425],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-57.0307, 14.9931, 2.9891, 2.9891, 2.8869],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-61.0850, 15.9928, 3.1465, 3.1465, 3.0388],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-65.3526, 17.0450, 3.3121, 3.3121, 3.1988],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-69.8449, 18.1526, 3.4864, 3.4864, 3.3671],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-74.5736, 19.3186, 3.6699, 3.6699, 3.5443],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-79.5511, 20.5459, 3.8630, 3.8630, 3.7309],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-84.7907, 21.8378, 4.0664, 4.0664, 3.9272],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-90.3060, 23.1976, 4.2804, 4.2804, 4.1339],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-96.1115, 24.6291, 4.5057, 4.5057, 4.3515],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-102.2227, 26.1359, 4.7428, 4.7428, 4.5805],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-108.6555, 27.7220, 4.9924, 4.9924, 4.8216],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-115.4268, 29.3916, 5.2552, 5.2552, 5.0754],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-122.5545, 31.1490, 5.5318, 5.5318, 5.3425],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-130.0574, 32.9990, 5.8229, 5.8229, 5.6237],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-137.9552, 34.9463, 6.1294, 6.1294, 5.9197],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-146.2686, 36.9961, 6.4520, 6.4520, 6.2313],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-155.0196, 39.1538, 6.7916, 6.7916, 6.5592],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-164.2312, 41.4251, 7.1490, 7.1490, 6.9044],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-173.9275, 43.8158, 7.5253, 7.5253, 7.2678],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-184.1343, 46.3325, 7.9213, 7.9213, 7.6503],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-194.8782, 48.9815, 8.3382, 8.3382, 8.0530],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-206.1876, 51.7701, 8.7771, 8.7771, 8.4768],\n",
" grad_fn=<MvBackward>)\n",
"tensor([-218.0922, 54.7053, 9.2391, 9.2391, 8.9230],\n",
" grad_fn=<MvBackward>)\n"
"(tensor([1.9592, 1.9592, 1.9592, 1.9592, 1.4664], grad_fn=<AddBackward0>), tensor([0.2451], grad_fn=<AddBackward0>), <function profit at 0x7f000d27fdc0>, tensor([1., 1., 1., 1., 1.], requires_grad=True), <function laws_of_motion at 0x7f003e976b80>, tensor([0.0000, 1.2632, 1.0526, 1.0526, 1.0892], grad_fn=<MvBackward>))\n",
"(tensor([2.7431, 2.7431, 2.7431, 2.7431, 2.2016], grad_fn=<AddBackward0>), tensor([0.0503], grad_fn=<AddBackward0>), <function profit at 0x7f000d27fdc0>, tensor([1., 1., 1., 1., 1.], requires_grad=True), <function laws_of_motion at 0x7f003e976b80>, tensor([-0.9519, 1.3928, 1.0020, 1.0020, 1.0575], grad_fn=<MvBackward>))\n",
"(tensor([3.5752, 3.5752, 3.5752, 3.5752, 2.9700], grad_fn=<AddBackward0>), tensor([0.0307], grad_fn=<AddBackward0>), <function profit at 0x7f000d27fdc0>, tensor([1., 1., 1., 1., 1.], requires_grad=True), <function laws_of_motion at 0x7f003e976b80>, tensor([-1.8565, 1.5150, 0.9530, 0.9530, 0.9882], grad_fn=<MvBackward>))\n",
"(tensor([4.4781, 4.4781, 4.4781, 4.4781, 3.8222], grad_fn=<AddBackward0>), tensor([0.0284], grad_fn=<AddBackward0>), <function profit at 0x7f000d27fdc0>, tensor([1., 1., 1., 1., 1.], requires_grad=True), <function laws_of_motion at 0x7f003e976b80>, tensor([-2.8103, 1.6872, 0.9376, 0.9376, 0.9474], grad_fn=<MvBackward>))\n",
"(tensor([5.4286, 5.4286, 5.4286, 5.4286, 4.7409], grad_fn=<AddBackward0>), tensor([0.0280], grad_fn=<AddBackward0>), <function profit at 0x7f000d27fdc0>, tensor([1., 1., 1., 1., 1.], requires_grad=True), <function laws_of_motion at 0x7f003e976b80>, tensor([-3.8626, 1.9131, 0.9505, 0.9505, 0.9408], grad_fn=<MvBackward>))\n"
]
}
],
"source": [
"#calculate results for first 5 iterations\n",
"for f in compose_recursive_functions(transition_wrapper,50):\n",
"for f in compose_recursive_functions(transition_wrapper,5):\n",
" result = f(base_data)\n",
" print(result[5])"
" print(result)\n",
" #need to write down what this is."
]
},
{
"cell_type": "markdown",
"id": "unnecessary-architect",
"id": "portable-placement",
"metadata": {},
"source": [
"# Notes on work so far\n",
"the issue below was resolved by choosing a different loss function. The point of needing to do a search over the determinant of A still holds.\n",
">>\n",
"Note how this fails on the last few iterations.\n",
"I need to get better model functions (profit, laws_of_motion, etc) together to test this out.\n",
"Alternatively, I can check for areas where the determinant of $A$ is zero, possibly by doing some sort of grid search?\n",
"\n",
"Maybe with a standard RBC model?\n",
"\n",
"Also, maybe I can create a `Model` class that upon construction will capture the necesary constants, functions, etc."
"Also, maybe I can create a `Model` class that upon construction will capture the necesary constants, functions, etc.\n",
"\n",
"I need to change the launch to be a neural network function that takes two inputs."
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "varying-organization",
"id": "frequent-subcommittee",
"metadata": {},
"outputs": [],
"source": []

Loading…
Cancel
Save