Fixed neural network stuff, and came up with an example training loop. First implementation of optimality conditions completed.

temporaryWork
youainti 5 years ago
parent d1840a9ed7
commit cd994a640e

@ -1,9 +1,17 @@
{ {
"cells": [ "cells": [
{
"cell_type": "code",
"execution_count": null,
"id": "sexual-collective",
"metadata": {},
"outputs": [],
"source": []
},
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 1, "execution_count": 1,
"id": "tested-harassment", "id": "complex-bookmark",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
@ -13,7 +21,7 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 2, "execution_count": 2,
"id": "absolute-psychiatry", "id": "informal-prisoner",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
@ -36,201 +44,147 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 29, "execution_count": 3,
"id": "exciting-pipeline", "id": "racial-natural",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"class LinearNet2(torch.nn.Module):\n", "model = LinearNet(input_size = 5, output_size=5, layers_size=5)"
" def __init__(self, input_size,output_size,layers_size):\n",
" super().__init__()\n",
" \n",
" #combined together into a set of sequential workers\n",
" self.sequential_layers = torch.nn.Sequential(\n",
" torch.nn.Linear(input_size,layers_size)\n",
" ,torch.nn.Linear(layers_size,output_size)\n",
" )\n",
" \n",
" def forward(self, input_values):\n",
" return self.sequential_layers(input_values)\n",
" \n",
" def fwd_wrapper(self,input1,input2):\n",
" #Combines two layers together.\n",
" #This supports the thought that I can create a \"launch\" nn that takes two values as input.\n",
" return self.sequential_layers(torch.cat((input1,input2),0))"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 22, "execution_count": 4,
"id": "romantic-visit", "id": "acting-athens",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"model = LinearNet2(input_size = 5, output_size=5, layers_size=15)" "data_in = torch.tensor([1.5,2,3,4,5])"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 23, "execution_count": 5,
"id": "finished-taste", "id": "later-bulgaria",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{ {
"name": "stdout", "data": {
"output_type": "stream", "text/plain": [
"text": [ "tensor([1.5000, 2.0000, 3.0000, 4.0000, 5.0000])"
"LinearNet2(\n",
" (sequential_layers): Sequential(\n",
" (0): Linear(in_features=5, out_features=15, bias=True)\n",
" (1): Linear(in_features=15, out_features=5, bias=True)\n",
" )\n",
")\n"
] ]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
} }
], ],
"source": [ "source": [
"print(model)" "data_in"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 24, "execution_count": 6,
"id": "dress-baltimore", "id": "large-recipient",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
"data_in = torch.tensor([1.5,2,3,4,5])\n", "target = torch.zeros(5)"
"data_in1 = torch.tensor([1.5,2])\n",
"data_in2 = torch.tensor([3,4,5])"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 25, "execution_count": 7,
"id": "korean-width", "id": "hybrid-interim",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [],
{
"data": {
"text/plain": [
"tensor([8.7445e-04, 5.1066e-02, 5.5142e-01, 3.3832e-01, 1.8822e+00],\n",
" grad_fn=<AddBackward0>)"
]
},
"execution_count": 25,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [ "source": [
"model.forward(data_in)" "def loss_fn2(output,target):\n",
" return torch.nn.MSELoss()(2*output,target)"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 30, "execution_count": 8,
"id": "novel-composer", "id": "environmental-mercury",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([8.7445e-04, 5.1066e-02, 5.5142e-01, 3.3832e-01, 1.8822e+00],\n",
" grad_fn=<AddBackward0>)"
]
},
"execution_count": 30,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "outputs": [],
}
],
"source": [ "source": [
"test = model.fwd_wrapper(data_in1,data_in2)\n", "#Prep Optimizer\n",
"test" "optimizer = torch.optim.SGD(model.parameters(),lr=0.01)"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 33, "execution_count": 9,
"id": "accepting-fireplace", "id": "internal-calibration",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{ {
"ename": "RuntimeError", "name": "stdout",
"evalue": "grad can be implicitly created only for scalar outputs", "output_type": "stream",
"output_type": "error", "text": [
"traceback": [ "tensor([-0.2090, -0.2099, 0.9129, -1.1506, 0.9998])\n",
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", "tensor([-0.2780, 0.1246, -0.0349, 0.0779, -0.1034])\n",
"\u001b[0;31mRuntimeError\u001b[0m Traceback (most recent call last)", "tensor([-0.0678, 0.0177, 0.0479, -0.0193, 0.0401])\n",
"\u001b[0;32m<ipython-input-33-b26b7a7d68aa>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mtest\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", "tensor([-0.0338, 0.0143, 0.0187, 0.0088, 0.0175])\n",
"\u001b[0;32m~/miniconda3/envs/pytorch-CPU/lib/python3.8/site-packages/torch/tensor.py\u001b[0m in \u001b[0;36mbackward\u001b[0;34m(self, gradient, retain_graph, create_graph, inputs)\u001b[0m\n\u001b[1;32m 243\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcreate_graph\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 244\u001b[0m inputs=inputs)\n\u001b[0;32m--> 245\u001b[0;31m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mautograd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgradient\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mretain_graph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0minputs\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minputs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 246\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 247\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0mregister_hook\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhook\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "tensor([-0.0157, 0.0081, 0.0111, 0.0089, 0.0140])\n",
"\u001b[0;32m~/miniconda3/envs/pytorch-CPU/lib/python3.8/site-packages/torch/autograd/__init__.py\u001b[0m in \u001b[0;36mbackward\u001b[0;34m(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)\u001b[0m\n\u001b[1;32m 139\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 140\u001b[0m \u001b[0mgrad_tensors_\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_tensor_or_tensors_to_tuple\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mgrad_tensors\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlen\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtensors\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 141\u001b[0;31m \u001b[0mgrad_tensors_\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_make_grads\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtensors\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgrad_tensors_\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 142\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mretain_graph\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 143\u001b[0m \u001b[0mretain_graph\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcreate_graph\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "tensor([-0.0083, 0.0051, 0.0063, 0.0076, 0.0101])\n",
"\u001b[0;32m~/miniconda3/envs/pytorch-CPU/lib/python3.8/site-packages/torch/autograd/__init__.py\u001b[0m in \u001b[0;36m_make_grads\u001b[0;34m(outputs, grads)\u001b[0m\n\u001b[1;32m 48\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mout\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrequires_grad\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 49\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mout\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnumel\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;34m!=\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 50\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mRuntimeError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"grad can be implicitly created only for scalar outputs\"\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 51\u001b[0m \u001b[0mnew_grads\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mones_like\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mout\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmemory_format\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpreserve_format\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 52\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", "tensor([-0.0049, 0.0033, 0.0038, 0.0058, 0.0074])\n",
"\u001b[0;31mRuntimeError\u001b[0m: grad can be implicitly created only for scalar outputs" "tensor([-0.0032, 0.0022, 0.0023, 0.0043, 0.0054])\n",
"tensor([-0.0022, 0.0015, 0.0014, 0.0031, 0.0040])\n",
"tensor([-0.0016, 0.0010, 0.0009, 0.0023, 0.0030])\n",
"tensor([-0.0012, 0.0007, 0.0006, 0.0017, 0.0022])\n",
"tensor([-0.0009, 0.0005, 0.0004, 0.0013, 0.0017])\n",
"tensor([-0.0007, 0.0003, 0.0003, 0.0010, 0.0013])\n",
"tensor([-0.0005, 0.0002, 0.0002, 0.0007, 0.0010])\n",
"tensor([-0.0004, 0.0002, 0.0002, 0.0005, 0.0007])\n",
"tensor([-0.0003, 0.0001, 0.0001, 0.0004, 0.0006])\n",
"tensor([-2.4156e-04, 9.8367e-05, 8.3392e-05, 3.1965e-04, 4.3452e-04])\n",
"tensor([-1.8733e-04, 7.3545e-05, 6.3141e-05, 2.4562e-04, 3.3559e-04])\n",
"tensor([-1.4602e-04, 5.5818e-05, 4.7690e-05, 1.8978e-04, 2.5897e-04])\n",
"tensor([-1.1313e-04, 4.2276e-05, 3.6786e-05, 1.4635e-04, 2.0107e-04])\n",
"tensor([-8.8156e-05, 3.2514e-05, 2.8075e-05, 1.1362e-04, 1.5562e-04])\n",
"tensor([-6.8587e-05, 2.5118e-05, 2.1561e-05, 8.8054e-05, 1.2085e-04])\n",
"tensor([-5.3484e-05, 1.9454e-05, 1.6658e-05, 6.8377e-05, 9.3834e-05])\n",
"tensor([-4.1497e-05, 1.4939e-05, 1.3124e-05, 5.3061e-05, 7.3066e-05])\n",
"tensor([-3.2303e-05, 1.1659e-05, 1.0152e-05, 4.1244e-05, 5.6852e-05])\n",
"\n",
" tensor(1.3901e-06, grad_fn=<MseLossBackward>)\n"
] ]
} }
], ],
"source": [ "source": [
"test.backward()" "for i in range(25):\n",
" #training loop\n",
" optimizer.zero_grad()\n",
"\n",
" output = model.forward(data_in)\n",
" output,output.shape\n",
"\n",
" l = loss_fn2(output, target)\n",
"\n",
" l.backward()\n",
"\n",
" optimizer.step()\n",
"\n",
" print(model.linear_step1.bias.grad)\n",
"print(\"\\n\",l)"
] ]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 10, "execution_count": null,
"id": "rapid-spoke", "id": "black-platinum",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"OrderedDict([('linear_step1.weight',\n",
" tensor([[ 0.1832, -0.0033, -0.0508, -0.3904, 0.3259],\n",
" [-0.1713, 0.0096, -0.0525, 0.4356, -0.2567],\n",
" [ 0.3026, -0.4448, 0.2032, -0.2238, -0.1767],\n",
" [ 0.4211, 0.3466, 0.4140, 0.1798, 0.0438],\n",
" [ 0.2925, 0.2805, -0.1709, -0.0526, 0.1170],\n",
" [ 0.3011, -0.1513, -0.1232, -0.3212, 0.1243],\n",
" [-0.3001, 0.4073, -0.3111, 0.4319, -0.1216],\n",
" [ 0.2732, 0.2345, 0.0355, 0.3887, 0.4408],\n",
" [-0.4345, -0.0974, 0.0280, 0.2111, -0.3547],\n",
" [ 0.0311, 0.3230, 0.0452, 0.0421, -0.3895],\n",
" [-0.4149, -0.0149, 0.2047, 0.3821, -0.1537],\n",
" [-0.3941, -0.1030, -0.3153, 0.1546, -0.2481],\n",
" [-0.2590, -0.3550, -0.3910, -0.1634, 0.0569],\n",
" [-0.2632, -0.3178, -0.1942, -0.2556, -0.0210],\n",
" [-0.1319, -0.4315, 0.2441, -0.3021, -0.2024]])),\n",
" ('linear_step1.bias',\n",
" tensor([ 0.4211, 0.3796, -0.0749, 0.1055, 0.2941, -0.0833, 0.2162, 0.4153,\n",
" -0.0988, 0.3072, -0.2772, 0.0678, 0.3566, 0.1708, -0.0892])),\n",
" ('linear_step2.weight',\n",
" tensor([[ 0.0047, 0.0587, 0.1857, -0.2414, 0.0850, 0.2135, 0.0963, 0.0224,\n",
" 0.0328, 0.0438, -0.2483, 0.0613, 0.2572, 0.2077, 0.0558],\n",
" [ 0.2141, 0.0382, 0.1007, -0.0127, 0.2511, 0.0085, 0.1415, 0.1731,\n",
" -0.0145, 0.0885, -0.0495, 0.1828, 0.2506, -0.1226, -0.1583],\n",
" [ 0.0772, 0.1059, 0.0391, -0.0915, -0.2160, 0.1320, 0.0727, 0.1585,\n",
" 0.1454, -0.0973, -0.0960, -0.0753, -0.2022, 0.0092, 0.1243],\n",
" [-0.2120, 0.1741, -0.0886, 0.0816, 0.0032, 0.1702, 0.1375, -0.1119,\n",
" -0.0059, -0.1185, 0.2243, -0.1849, -0.1739, -0.0298, -0.1838],\n",
" [ 0.0095, 0.0369, 0.0736, 0.1567, -0.1493, 0.2170, 0.1332, 0.1684,\n",
" 0.1232, 0.1393, -0.2525, -0.1990, -0.1293, -0.1827, 0.1345]])),\n",
" ('linear_step2.bias',\n",
" tensor([ 0.1185, -0.1514, 0.1662, 0.0849, -0.1373]))])"
]
},
"execution_count": 10,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "outputs": [],
} "source": []
],
"source": [
"model.state_dict()"
]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"id": "compatible-least", "id": "transparent-medication",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [] "source": []

@ -6,8 +6,44 @@
## Next steps ## Next steps
- create the iterated optimality conditions - create the iterated optimality conditions
- attach iterated state variables to iterated transitons
- use these state variables to calculate the optimality condition values
- use these optimality conditions to create a loss function - use these optimality conditions to create a loss function
- Thoughts on converting my `connect_transitions_to_otimality_conditions` work to this.
I need to import torch into that section, and build a loss function.
- The basics of this model
- add boundary conditions to loss function - add boundary conditions to loss function
- get a basic gradient descent/optimization of launch function working. - get a basic gradient descent/optimization of launch function working.
- add satellite deorbit to model. - add satellite deorbit to model.
- turn this into a framework in a module, not just a single notebook (long term goal) - turn this into a framework in a module, not just a single notebook (long term goal)
## CONCERNS
So I need to think about how to handle the launch functions.
Currently, my launch function takes in the stocks and debris levels and returns a launch decision for each constellation.
This is nice because it keeps them together, but it may require some thoughtful NeuralNetwork design later.
The issue is that I need to set up a way to integrate multiple firms at the same time.
This may be possible through how I set up the profit funcitons.
# Scratch work
Writing out the functional forms that need to exist and the inheritance
- Euler equation
- Optimality Conditions
- Transition functions
- Loss function
- Bounds
- Euler equations
- Neural net launch function
Launch & Retire (a neural network)
NN(states) -> launch & deorbit decisions
Euler Equations
EE(NN, states) -> vector of numbers
Consists of
Iterated_Optimality(Iterated_Value_Derivatives(NN), Iterated_States(NN))
Loss Function
L(EE, Bounds, NN, States) -> positive number

@ -3,7 +3,7 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 1, "execution_count": 1,
"id": "expired-austria", "id": "loved-quarter",
"metadata": { "metadata": {
"tags": [] "tags": []
}, },
@ -11,12 +11,13 @@
"source": [ "source": [
"import torch\n", "import torch\n",
"from torch.autograd.functional import jacobian\n", "from torch.autograd.functional import jacobian\n",
"import itertools" "import itertools\n",
"import torch #pytorch"
] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"id": "damaged-accountability", "id": "educated-cosmetic",
"metadata": {}, "metadata": {},
"source": [ "source": [
"# Setup Functions\n", "# Setup Functions\n",
@ -26,7 +27,7 @@
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 2, "execution_count": 2,
"id": "modular-memorabilia", "id": "single-currency",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
@ -73,7 +74,7 @@
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"id": "direct-picture", "id": "ordered-arrow",
"metadata": {}, "metadata": {},
"source": [ "source": [
"# functions related to transitions" "# functions related to transitions"
@ -81,8 +82,8 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 37, "execution_count": 3,
"id": "bridal-ordinary", "id": "cellular-consensus",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
@ -156,7 +157,7 @@
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"id": "miniature-karaoke", "id": "entertaining-hurricane",
"metadata": {}, "metadata": {},
"source": [ "source": [
"## Setup functions related to the problem" "## Setup functions related to the problem"
@ -164,8 +165,8 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 38, "execution_count": 4,
"id": "bright-minimum", "id": "supposed-rating",
"metadata": { "metadata": {
"tags": [] "tags": []
}, },
@ -215,7 +216,7 @@
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"id": "conservative-ukraine", "id": "located-anatomy",
"metadata": {}, "metadata": {},
"source": [ "source": [
"# Actual calculations" "# Actual calculations"
@ -223,8 +224,8 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 39, "execution_count": 5,
"id": "initial-mathematics", "id": "aggregate-hughes",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
@ -242,7 +243,7 @@
"launches = test_launch\n", "launches = test_launch\n",
"\n", "\n",
"#Starting point\n", "#Starting point\n",
"# Stocks, debris, profit fn, laws of motion, \n", "# Stocks, debris, profit fn, laws of motion, item to transition, Launch function\n",
"base_data = (stocks,debris, profit, laws_of_motion, torch.ones(6, requires_grad=True),launches)\n", "base_data = (stocks,debris, profit, laws_of_motion, torch.ones(6, requires_grad=True),launches)\n",
"\n", "\n",
"#Parameters\n", "#Parameters\n",
@ -257,36 +258,21 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 75, "execution_count": 32,
"id": "nuclear-definition", "id": "molecular-express",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{ {
"name": "stdout", "name": "stdout",
"output_type": "stream", "output_type": "stream",
"text": [ "text": [
"tensor([-0.4523, 0.8774, 0.6558, 0.6558, 0.7608, 11.0954],\n", "(tensor([1.9592, 1.9592, 1.9592, 1.9592, 1.4664], grad_fn=<AddBackward0>), tensor([0.2451], grad_fn=<AddBackward0>), <function profit at 0x7fa6269e4550>, <function laws_of_motion at 0x7fa6269e44c0>, tensor([-0.4523, 0.8774, 0.6558, 0.6558, 0.7608, 11.0954],\n",
" grad_fn=<MvBackward>) \n", " grad_fn=<MvBackward>), <function test_launch at 0x7fa6269e4430>) \n",
"\n", "\n",
"\n", "\n",
"\n", "\n",
"tensor([-25.6591, -23.1244, -23.5468, -23.5468, -29.4675, 123.8316],\n", "(tensor([2.7431, 2.7431, 2.7431, 2.7431, 2.2016], grad_fn=<AddBackward0>), tensor([0.0503], grad_fn=<AddBackward0>), <function profit at 0x7fa6269e4550>, <function laws_of_motion at 0x7fa6269e44c0>, tensor([-25.6591, -23.1244, -23.5468, -23.5468, -29.4675, 123.8316],\n",
" grad_fn=<MvBackward>) \n", " grad_fn=<MvBackward>), <function test_launch at 0x7fa6269e4430>) \n",
"\n",
"\n",
"\n",
"tensor([-236.0462, -232.3070, -232.9302, -232.9302, -312.7054, 1379.8452],\n",
" grad_fn=<MvBackward>) \n",
"\n",
"\n",
"\n",
"tensor([-1638.0408, -1632.9258, -1633.7783, -1633.7783, -2380.2834, 15341.8955],\n",
" grad_fn=<MvBackward>) \n",
"\n",
"\n",
"\n",
"tensor([ -9855.1680, -9848.4297, -9849.5527, -9849.5527, -15547.5273,\n",
" 170308.7812], grad_fn=<MvBackward>) \n",
"\n", "\n",
"\n", "\n",
"\n" "\n"
@ -295,14 +281,15 @@
], ],
"source": [ "source": [
"#Get the values from 5 transitions\n", "#Get the values from 5 transitions\n",
"for f in compose_recursive_functions(transition_wrapper,5):\n", "wt1 = compose_recursive_functions(transition_wrapper,2)\n",
"for f in wt1:\n",
" result = f(base_data)\n", " result = f(base_data)\n",
" print(result[4], \"\\n\"*3)" " print(result, \"\\n\"*3)"
] ]
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"id": "casual-annex", "id": "broken-iraqi",
"metadata": {}, "metadata": {},
"source": [ "source": [
"Also, maybe I can create a `Model` class that upon construction will capture the necesary constants, functions, etc.\n" "Also, maybe I can create a `Model` class that upon construction will capture the necesary constants, functions, etc.\n"
@ -310,7 +297,7 @@
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"id": "stopped-socket", "id": "illegal-philosophy",
"metadata": {}, "metadata": {},
"source": [ "source": [
"# Optimatility conditions" "# Optimatility conditions"
@ -318,8 +305,8 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 193, "execution_count": 7,
"id": "excessive-script", "id": "portuguese-soldier",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [ "source": [
@ -343,8 +330,18 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 195, "execution_count": 11,
"id": "unlikely-coverage", "id": "hundred-bacteria",
"metadata": {},
"outputs": [],
"source": [
"tmp_result = torch.tensor([1.0,2.0,3,4,5,6])"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "temporal-fancy",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{ {
@ -353,7 +350,7 @@
"tensor(49.4968)" "tensor(49.4968)"
] ]
}, },
"execution_count": 195, "execution_count": 12,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }
@ -364,7 +361,7 @@
}, },
{ {
"cell_type": "markdown", "cell_type": "markdown",
"id": "endless-occupation", "id": "commercial-liverpool",
"metadata": {}, "metadata": {},
"source": [ "source": [
"## Now to set up the recursive set of optimatliy conditions" "## Now to set up the recursive set of optimatliy conditions"
@ -372,27 +369,99 @@
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 179, "execution_count": 35,
"id": "valuable-bleeding", "id": "simple-steering",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"tensor([-1.3770, 0.8862, 0.6757, 0.6757, 0.7754], grad_fn=<AddBackward0>)\n",
"tensor(4.1957, grad_fn=<AddBackward0>)\n",
"\n",
"tensor([-24.7879, -21.3799, -21.7813, -21.7813, -27.4059],\n",
" grad_fn=<AddBackward0>)\n",
"tensor(2771.4756, grad_fn=<AddBackward0>)\n",
"\n",
"tensor([-218.6896, -214.1374, -214.7294, -214.7294, -290.5158],\n",
" grad_fn=<AddBackward0>)\n",
"tensor(270296.8438, grad_fn=<AddBackward0>)\n",
"\n"
]
}
],
"source": [
"base_data = (stocks,debris, profit, laws_of_motion, torch.ones(6, requires_grad=True),launches)\n",
"for f in compose_recursive_functions(transition_wrapper,3):\n",
" result = f(base_data)\n",
" \n",
" #unpack results\n",
" new_stocks, new_debris, profit_fn, laws_motion_fn, transitioned, launch_fn = result\n",
" \n",
" optimal = optimality(new_stocks,new_debris,profit_fn,laws_motion_fn,launch_fn,transitioned)\n",
" print(optimal)\n",
" print(sum(optimal**2))\n",
" print()"
]
},
{
"cell_type": "code",
"execution_count": 46,
"id": "amber-front",
"metadata": {},
"outputs": [],
"source": [
"def optimality_wrapper(stocks,debris, launches):\n",
" return optimality(stocks,debris, profit, laws_of_motion, launches, torch.ones(6, requires_grad=True))"
]
},
{
"cell_type": "code",
"execution_count": 49,
"id": "least-shock",
"metadata": {}, "metadata": {},
"outputs": [ "outputs": [
{ {
"data": { "data": {
"text/plain": [ "text/plain": [
"tensor([0.0300, 2.0300, 3.0300, 4.0300, 5.0300])" "tensor([-0.0452, 0.9548, 0.9548, 0.9548, 0.9548], grad_fn=<AddBackward0>)"
] ]
}, },
"execution_count": 179, "execution_count": 49,
"metadata": {}, "metadata": {},
"output_type": "execute_result" "output_type": "execute_result"
} }
], ],
"source": [] "source": [
"optimality_wrapper(stocks,debris,launches)"
]
},
{
"cell_type": "code",
"execution_count": 52,
"id": "offshore-announcement",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor(1.)"
]
},
"execution_count": 52,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"torch.nn.MSELoss()(torch.tensor([1.0]), torch.tensor([2]))"
]
}, },
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": null, "execution_count": null,
"id": "subjective-chassis", "id": "satellite-match",
"metadata": {}, "metadata": {},
"outputs": [], "outputs": [],
"source": [] "source": []

Loading…
Cancel
Save