got most of the new interfaces/abstractions written, and they appear to be working. Have the basics of a NN outlined. Have added a document with infomration about neural networks and PyTorch in particular. Wrote my own Upscaling module.

temporaryWork^2
youainti 5 years ago
parent d0bcdd87c0
commit 2657f4d821

2
.gitignore vendored

@ -298,4 +298,6 @@ TSWLatexianTemp*
# don't track PDFs
*.pdf
#Don't track python/jupyterlab stuff
*/.ipynb_checkpoints/*
*/__pycache__/*

@ -0,0 +1,19 @@
# Things I have learned about PyTorch and Neural networks.
## Building models
All model building in Pytorch is based on the following three steps
1. start by creating an object that extends the nn.Module base class
1. define layers as class attributes (sequential wrapper for ease of use)
2. implement the `.forward()` method
Each layer is just a predefined 'function'.
Really, they are objects that extend the nn.Module base class.
Thus each NN can act as a layer in another NN.
For example, I reimplemented an upscaling layer in BasicNeuralNet2.
(I picked up a lot of this info here.)[https://deeplizard.com/learn/video/k4jY9L8H89U]
Also, neural networks can return more than just a single output as long as the
loss function that is used for optimization can consume both of them.
Thus I could write two separate neural networks (such as for launch and partials),
and then write a third NN that binds the two together.

@ -214,7 +214,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.8"
"version": "3.9.2"
}
},
"nbformat": 4,

@ -2,7 +2,7 @@
"cells": [
{
"cell_type": "markdown",
"id": "prepared-nitrogen",
"id": "comprehensive-toyota",
"metadata": {},
"source": [
"Note on pytorch. NN optimization acts imperitively/by side effect as follows.\n",
@ -23,7 +23,7 @@
{
"cell_type": "code",
"execution_count": 1,
"id": "grateful-conviction",
"id": "together-jewelry",
"metadata": {},
"outputs": [],
"source": [
@ -33,8 +33,10 @@
{
"cell_type": "code",
"execution_count": 2,
"id": "incorrect-animal",
"metadata": {},
"id": "hispanic-grain",
"metadata": {
"tags": []
},
"outputs": [],
"source": [
"class DoubleNetwork(torch.nn.Module):\n",
@ -62,7 +64,7 @@
{
"cell_type": "code",
"execution_count": 3,
"id": "ruled-letter",
"id": "practical-gilbert",
"metadata": {},
"outputs": [
{
@ -70,45 +72,45 @@
"output_type": "stream",
"text": [
"\n",
" tensor(3.5646, grad_fn=<AddBackward0>)\n",
" tensor(10.7553, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(11.7849, grad_fn=<AddBackward0>)\n",
" tensor(64.3239, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(24.8772, grad_fn=<AddBackward0>)\n",
" tensor(17.9537, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(5.4752, grad_fn=<AddBackward0>)\n",
" tensor(60.9679, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.4457, grad_fn=<AddBackward0>)\n",
" tensor(30.1436, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0925, grad_fn=<AddBackward0>)\n",
" tensor(89.3963, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0490, grad_fn=<AddBackward0>)\n",
" tensor(70.8575, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0290, grad_fn=<AddBackward0>)\n",
" tensor(24.7911, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0178, grad_fn=<AddBackward0>)\n",
" tensor(695.9885, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0111, grad_fn=<AddBackward0>)\n",
" tensor(339753.2500, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0070, grad_fn=<AddBackward0>)\n",
" tensor(8.0135e+13, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0045, grad_fn=<AddBackward0>)\n",
" tensor(inf, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0029, grad_fn=<AddBackward0>)\n",
" tensor(nan, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0019, grad_fn=<AddBackward0>)\n",
" tensor(nan, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0012, grad_fn=<AddBackward0>)\n",
" tensor(nan, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0008, grad_fn=<AddBackward0>)\n",
" tensor(nan, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0005, grad_fn=<AddBackward0>)\n",
" tensor(nan, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0003, grad_fn=<AddBackward0>)\n",
" tensor(nan, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0002, grad_fn=<AddBackward0>)\n",
" tensor(nan, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0001, grad_fn=<AddBackward0>)\n"
" tensor(nan, grad_fn=<AddBackward0>)\n"
]
}
],
@ -147,7 +149,7 @@
{
"cell_type": "code",
"execution_count": 4,
"id": "quantitative-keeping",
"id": "early-victoria",
"metadata": {},
"outputs": [],
"source": [
@ -182,7 +184,7 @@
{
"cell_type": "code",
"execution_count": 5,
"id": "vietnamese-prophet",
"id": "sustained-avatar",
"metadata": {},
"outputs": [],
"source": [
@ -202,7 +204,7 @@
{
"cell_type": "code",
"execution_count": 6,
"id": "limiting-slide",
"id": "inclusive-rouge",
"metadata": {},
"outputs": [
{
@ -210,55 +212,55 @@
"output_type": "stream",
"text": [
"\n",
" tensor(9.6420, grad_fn=<AddBackward0>)\n",
" tensor(8.4134, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(4.1914, grad_fn=<AddBackward0>)\n",
" tensor(5.9490, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(5.1337, grad_fn=<AddBackward0>)\n",
" tensor(4.8652, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(1.4943, grad_fn=<AddBackward0>)\n",
" tensor(3.7577, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.5210, grad_fn=<AddBackward0>)\n",
" tensor(2.5462, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.1217, grad_fn=<AddBackward0>)\n",
" tensor(1.3803, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0605, grad_fn=<AddBackward0>)\n",
" tensor(0.5700, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0256, grad_fn=<AddBackward0>)\n",
" tensor(0.2055, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0126, grad_fn=<AddBackward0>)\n",
" tensor(0.0747, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0057, grad_fn=<AddBackward0>)\n",
" tensor(0.0274, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0028, grad_fn=<AddBackward0>)\n",
" tensor(0.0101, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0013, grad_fn=<AddBackward0>)\n",
" tensor(0.0037, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0006, grad_fn=<AddBackward0>)\n",
" tensor(0.0014, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0003, grad_fn=<AddBackward0>)\n",
" tensor(0.0005, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0001, grad_fn=<AddBackward0>)\n",
" tensor(0.0002, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(7.2050e-05, grad_fn=<AddBackward0>)\n",
" tensor(7.1453e-05, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(3.5139e-05, grad_fn=<AddBackward0>)\n",
" tensor(2.6635e-05, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(1.7068e-05, grad_fn=<AddBackward0>)\n",
" tensor(9.9370e-06, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(8.3342e-06, grad_fn=<AddBackward0>)\n",
" tensor(3.7096e-06, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(4.0624e-06, grad_fn=<AddBackward0>)\n",
" tensor(1.3858e-06, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(1.9857e-06, grad_fn=<AddBackward0>)\n",
" tensor(5.1807e-07, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(9.7029e-07, grad_fn=<AddBackward0>)\n",
" tensor(1.9388e-07, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(4.7492e-07, grad_fn=<AddBackward0>)\n",
" tensor(7.2581e-08, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(2.3232e-07, grad_fn=<AddBackward0>)\n",
" tensor(2.7196e-08, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(1.1381e-07, grad_fn=<AddBackward0>)\n"
" tensor(1.0235e-08, grad_fn=<AddBackward0>)\n"
]
}
],
@ -284,30 +286,95 @@
},
{
"cell_type": "code",
"execution_count": 7,
"id": "elder-karen",
"execution_count": null,
"id": "sound-insulation",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": 144,
"id": "promotional-accent",
"metadata": {},
"outputs": [],
"source": [
"#This is a custom upscale module.\n",
"class CustomUpscale(torch.nn.Module):\n",
" def __init__(self, input_size,layers_size,scale_factor):\n",
" super().__init__()\n",
" self.scale_factor = scale_factor\n",
" \n",
" #So, this next section constructs different layers within the NN\n",
" #sinlge linear section\n",
" self.linear_step_1a = torch.nn.Linear(in_features=input_size, out_features=layers_size)\n",
" self.upscale_step = lambda x: torch.nn.functional.interpolate(x, scale_factor=self.scale_factor).view(x.numel(),self.scale_factor)\n",
" #single linear section\n",
" \n",
" def forward(self, input_values):\n",
" \n",
" intermediate_values_a = self.linear_step_1a(input_values)\n",
" intermediate_values_b = self.upscale_step(intermediate_values_a)\n",
" \n",
" return intermediate_values_b"
]
},
{
"cell_type": "code",
"execution_count": 145,
"id": "english-basement",
"metadata": {},
"outputs": [],
"source": [
"nn = MultiDimOut(3,12,3)"
]
},
{
"cell_type": "code",
"execution_count": 146,
"id": "passive-chapel",
"metadata": {},
"outputs": [],
"source": [
"test = torch.tensor([[[1.0,3,4]]])"
]
},
{
"cell_type": "code",
"execution_count": 147,
"id": "passing-heath",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(tensor([ 3.4232e-05, 3.7350e-05, 5.3748e-05, -2.7344e-05, -1.0052e-04],\n",
" grad_fn=<AddBackward0>),\n",
" tensor([1.0001, 1.0001, 1.0000, 1.0000, 1.0001, 1.0000, 1.0001],\n",
" grad_fn=<AddBackward0>))"
"tensor([[ 1.9191, 1.9191, 1.9191],\n",
" [-1.4519, -1.4519, -1.4519],\n",
" [ 0.4698, 0.4698, 0.4698],\n",
" [ 0.5203, 0.5203, 0.5203],\n",
" [-2.8474, -2.8474, -2.8474],\n",
" [ 2.1781, 2.1781, 2.1781],\n",
" [ 0.1220, 0.1220, 0.1220],\n",
" [ 3.4155, 3.4155, 3.4155],\n",
" [-0.5984, -0.5984, -0.5984],\n",
" [-0.8493, -0.8493, -0.8493],\n",
" [-0.6150, -0.6150, -0.6150],\n",
" [ 0.6329, 0.6329, 0.6329]], grad_fn=<ViewBackward>)"
]
},
"execution_count": 7,
"execution_count": 147,
"metadata": {},
"output_type": "execute_result"
}
],
"source": []
"source": [
"nn.forward(test)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "agreed-community",
"id": "herbal-mission",
"metadata": {},
"outputs": [],
"source": []

@ -3,7 +3,7 @@
{
"cell_type": "code",
"execution_count": 1,
"id": "consolidated-separation",
"id": "indie-evolution",
"metadata": {
"tags": []
},
@ -74,7 +74,7 @@
},
{
"cell_type": "markdown",
"id": "numeric-coral",
"id": "stuffed-firmware",
"metadata": {},
"source": [
"# Setup Functions\n",
@ -84,7 +84,7 @@
{
"cell_type": "code",
"execution_count": 2,
"id": "detected-still",
"id": "mexican-serial",
"metadata": {},
"outputs": [],
"source": [
@ -131,7 +131,7 @@
},
{
"cell_type": "markdown",
"id": "fundamental-fusion",
"id": "public-alloy",
"metadata": {},
"source": [
"# functions related to transitions"
@ -140,9 +140,18 @@
{
"cell_type": "code",
"execution_count": 3,
"id": "palestinian-uganda",
"id": "advised-enemy",
"metadata": {},
"outputs": [],
"outputs": [
{
"ename": "SyntaxError",
"evalue": "invalid syntax (<ipython-input-3-2a8ca63b5912>, line 52)",
"output_type": "error",
"traceback": [
"\u001b[0;36m File \u001b[0;32m\"<ipython-input-3-2a8ca63b5912>\"\u001b[0;36m, line \u001b[0;32m52\u001b[0m\n\u001b[0;31m launch = neural_network.forward().\u001b[0m\n\u001b[0m ^\u001b[0m\n\u001b[0;31mSyntaxError\u001b[0m\u001b[0;31m:\u001b[0m invalid syntax\n"
]
}
],
"source": [
"def single_transition(laws_motion_fn, profit_fn, stocks, debris, neural_network):\n",
" \"\"\"\n",
@ -184,7 +193,7 @@
" #Includes rearranging the jacobian of profit.\n",
"\n",
" #Return the transitioned values\n",
" return ( A.inverse()/BETA ) @ T\n",
" return ( A.inverse() ) @ T\n",
"\n",
"\n",
"# This function wraps the single transition and handles updating dates etc.\n",
@ -214,7 +223,7 @@
},
{
"cell_type": "markdown",
"id": "mexican-illness",
"id": "suspected-clerk",
"metadata": {},
"source": [
"## Setup functions related to the problem"
@ -222,8 +231,8 @@
},
{
"cell_type": "code",
"execution_count": 4,
"id": "republican-designer",
"execution_count": null,
"id": "confused-conclusion",
"metadata": {
"tags": []
},
@ -269,8 +278,8 @@
},
{
"cell_type": "code",
"execution_count": 5,
"id": "introductory-forwarding",
"execution_count": null,
"id": "miniature-thread",
"metadata": {},
"outputs": [],
"source": [
@ -291,7 +300,7 @@
},
{
"cell_type": "markdown",
"id": "concrete-movement",
"id": "yellow-frank",
"metadata": {},
"source": [
"# Actual calculations"
@ -299,21 +308,10 @@
},
{
"cell_type": "code",
"execution_count": 6,
"id": "wrong-values",
"execution_count": null,
"id": "enormous-provider",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(3, 11)"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"outputs": [],
"source": [
"#number of states\n",
"N = 5\n",
@ -356,8 +354,8 @@
},
{
"cell_type": "code",
"execution_count": 7,
"id": "charitable-cleanup",
"execution_count": null,
"id": "biblical-blake",
"metadata": {},
"outputs": [],
"source": [
@ -368,27 +366,10 @@
},
{
"cell_type": "code",
"execution_count": 8,
"id": "floppy-arkansas",
"execution_count": null,
"id": "given-clearance",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Launch Decisions and Partial Derivativs of value function with\n",
"\t states\n",
"\t\t tensor([1., 1., 1., 1., 1.], requires_grad=True)\n",
"\tPartials\n",
"\t\ttensor([[1., 1., 1., 1., 1., 1.],\n",
" [1., 1., 1., 1., 1., 1.],\n",
" [1., 1., 1., 1., 1., 1.],\n",
" [1., 1., 1., 1., 1., 1.],\n",
" [1., 1., 1., 1., 1., 1.],\n",
" [1., 1., 1., 1., 1., 1.]], requires_grad=True)\n"
]
}
],
"outputs": [],
"source": [
"m = ModelMockup()\n",
"print(m.forward(stocks,debris))"
@ -396,7 +377,7 @@
},
{
"cell_type": "markdown",
"id": "dressed-preparation",
"id": "higher-windsor",
"metadata": {},
"source": [
"# Optimatility conditions"
@ -404,8 +385,8 @@
},
{
"cell_type": "code",
"execution_count": 9,
"id": "hydraulic-powder",
"execution_count": null,
"id": "breeding-sussex",
"metadata": {},
"outputs": [],
"source": [
@ -449,7 +430,7 @@
},
{
"cell_type": "markdown",
"id": "provincial-medline",
"id": "actual-polish",
"metadata": {},
"source": [
"## Now to set up the recursive set of optimatliy conditions"
@ -457,23 +438,10 @@
},
{
"cell_type": "code",
"execution_count": 10,
"id": "monetary-bermuda",
"execution_count": null,
"id": "thrown-subject",
"metadata": {},
"outputs": [
{
"ename": "TypeError",
"evalue": "'ModelMockup' object is not callable",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------\u001b[0m",
"\u001b[0;31mTypeError\u001b[0mTraceback (most recent call last)",
"\u001b[0;32m<ipython-input-10-282ba729dd5a>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0mbase_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mstocks\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mdebris\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mprofit\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlaws_of_motion\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mones\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m6\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrequires_grad\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mlaunches\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mf\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mcompose_recursive_functions\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtransition_wrapper\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m3\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 3\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbase_data\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 4\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;31m#unpack results\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m<ipython-input-3-fcc1e6d7dbd7>\u001b[0m in \u001b[0;36mtransition_wrapper\u001b[0;34m(data_in)\u001b[0m\n\u001b[1;32m 48\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 49\u001b[0m \u001b[0;31m#Calculate new states\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 50\u001b[0;31m \u001b[0mnew_stocks\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnew_debris\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlaws_motion_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstocks\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mdebris\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlaunch_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstocks\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mdebris\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 51\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 52\u001b[0m \u001b[0;31m#WARNING: RECURSION: You may break your head...\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mTypeError\u001b[0m: 'ModelMockup' object is not callable"
]
}
],
"outputs": [],
"source": [
"def recursive_optimality(base_data,transition_wrapper):\n",
" #create and return a set of transition wrappers\n",
@ -495,7 +463,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "imported-richards",
"id": "strange-appliance",
"metadata": {},
"outputs": [],
"source": [
@ -506,7 +474,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "excited-question",
"id": "friendly-acrobat",
"metadata": {},
"outputs": [],
"source": [
@ -516,14 +484,14 @@
{
"cell_type": "code",
"execution_count": null,
"id": "outer-wages",
"id": "patient-builder",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"id": "chronic-drilling",
"id": "referenced-defense",
"metadata": {},
"source": [
"Notes so far\n",
@ -548,7 +516,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "necessary-incident",
"id": "incorrect-carol",
"metadata": {},
"outputs": [],
"source": []

@ -17,7 +17,11 @@
- get a basic gradient descent/optimization of launch function working.
- add satellite deorbit to model.
- turn this into a framework in a module, not just a single notebook (long term goal)
- turn testing_combined into an actual test setup
- change prints to assertions
- turn into functions
- add into a testing framework
- this isn't that important.
## CONCERNS
So I need to think about how to handle the launch functions.
Currently, my launch function takes in the stocks and debris levels and returns a launch decision for each constellation.

@ -0,0 +1,239 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 2,
"id": "standing-catch",
"metadata": {},
"outputs": [],
"source": [
"import torch\n",
"from torch.autograd.functional import jacobian\n",
"import itertools\n",
"import math"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "going-accident",
"metadata": {},
"outputs": [],
"source": [
"import combined as c"
]
},
{
"cell_type": "markdown",
"id": "severe-employment",
"metadata": {},
"source": [
"So, this contains a bunch of initial tests of my abstractions. I eventually need to change these to assertions and package them."
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "ranking-family",
"metadata": {},
"outputs": [],
"source": [
"#Instantiate some objects\n",
"pm = c.PhysicalModel(1.0,1e-6,0.01,2.0,1e-8)\n",
"s = c.States(torch.tensor([1.0,2,3]), torch.tensor([0.0]))\n",
"lp = c.LinearProfit(0,0.95,torch.tensor([1.0,0,0]), 5)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "premium-brisbane",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(tensor([-7.5676e+05, 2.8893e+05, 4.6783e+05, 1.5236e+00]),\n",
" <combined.States at 0x7f31f0146c10>)"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"c.single_transition(pm,lp,s,est_int)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "raised-worthy",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([-718920.5625, 274490.1562, 444444.6250])"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"minimand, iterated_partials, iterated_state = c.optimality(pm,lp,s,est_int)\n",
"minimand"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "horizontal-insight",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([-2285563., -2285557., -2285557.])"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"est_int2 = c.EstimandInterface(iterated_partials,torch.ones(3))\n",
"\n",
"minimand2, iterated_partials2, iterated_state2 = c.optimality(pm,lp,iterated_state,est_int2)\n",
"minimand2"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "automatic-builder",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([-2405858.5000, -2405852.5000, -2405852.5000])"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"est_int3 = c.EstimandInterface(iterated_partials2,torch.ones(3))\n",
"\n",
"minimand3, iterated_partials3, iterated_state3 = c.optimality(pm,lp,iterated_state2,est_int3)\n",
"minimand3"
]
},
{
"cell_type": "markdown",
"id": "changing-mainland",
"metadata": {},
"source": [
"So, this succesfylly let me link the two. I'm going to move to another notebook, clean up, and start integrating the system"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "assumed-midwest",
"metadata": {},
"outputs": [],
"source": [
"model = DoubleNetwork(input_size = 5, output_size=5, layers_size=15)\n",
"\n",
"data_in = torch.tensor([1.5,2,3,4,5])\n",
"\n",
"data_in\n",
"\n",
"target = torch.zeros(5)\n",
"\n",
"def loss_fn2(output,target):\n",
" return sum((output[1] +output[0] - target)**2)\n",
" #could add a simplicity assumption i.e. l1 on parameters.\n",
"\n",
"#Prep Optimizer\n",
"optimizer = torch.optim.SGD(model.parameters(),lr=0.01)\n",
"\n",
"for i in range(20):\n",
" #training loop\n",
" optimizer.zero_grad()\n",
"\n",
" output = model.forward(data_in)\n",
" output\n",
"\n",
" l = loss_fn2(output, target)\n",
"\n",
" l.backward()\n",
"\n",
" optimizer.step()\n",
"\n",
" print(\"\\n\",l)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "surprising-fundamentals",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "parliamentary-delta",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "emotional-castle",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "substantial-exhibit",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.8"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -0,0 +1,383 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "pleasant-equation",
"metadata": {},
"outputs": [],
"source": [
"import torch\n",
"from torch.autograd.functional import jacobian\n",
"import itertools\n",
"import math"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "moved-christian",
"metadata": {},
"outputs": [],
"source": [
"import combined as c"
]
},
{
"cell_type": "markdown",
"id": "pressed-slope",
"metadata": {},
"source": [
"So, this contains a bunch of initial tests of my abstractions. I eventually need to change these to assertions and package them."
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "capable-equality",
"metadata": {},
"outputs": [],
"source": [
"#Instantiate some objects\n",
"pm = c.PhysicalModel(1.0,1e-6,0.01,2.0,1e-8)\n",
"s = c.States(torch.tensor([1.0,2,3]), torch.tensor([0.0]))\n",
"lp = c.LinearProfit(0,0.95,torch.tensor([1.0,0,0]), 5)\n",
"est_int = c.EstimandInterface(torch.tensor([[1.0,2,3,2]\n",
" ,[4,5,6,2]\n",
" ,[7,8,9,2]\n",
" ,[1,3,5,7]]\n",
" ),torch.ones(3))"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "written-experience",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"stocks\ttensor([1., 2., 3.]) \n",
"debris\t tensor([0.])\n",
"3\n",
"1\n"
]
}
],
"source": [
"#test State object \n",
"print(s)\n",
"print(s.number_constellations)\n",
"print(s.number_debris_trackers)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "twelve-arthur",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Launch Decisions and Partial Derivativs of value function with\n",
"\tlaunches\n",
"\t\t tensor([1., 1., 1.])\n",
"\tPartials\n",
"\t\ttensor([[1., 2., 3., 2.],\n",
" [4., 5., 6., 2.],\n",
" [7., 8., 9., 2.],\n",
" [1., 3., 5., 7.]])\n",
"tensor([1., 1., 1.]) tensor([[1., 2., 3., 2.],\n",
" [4., 5., 6., 2.],\n",
" [7., 8., 9., 2.],\n",
" [1., 3., 5., 7.]])\n",
"tensor(1.)\n",
"tensor([0., 1., 0.])\n",
"tensor([2., 5., 8., 3.])\n",
"tensor([[0., 2., 0., 0.],\n",
" [0., 5., 0., 0.],\n",
" [0., 8., 0., 0.],\n",
" [0., 3., 0., 0.]])\n"
]
}
],
"source": [
"#Test estimand interface\n",
"print(est_int)\n",
"print(est_int.launches,est_int.partials)\n",
"\n",
"print(est_int.launch_single(1))\n",
"print(est_int.launch_vector(1))\n",
"print(est_int.partial_vector(1)) \n",
"print(est_int.partial_matrix(1)) #TODO: double check orientation"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "impressive-tribe",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"1.0\n",
"1e-06\n",
"0.01\n",
"2.0\n",
"1e-08\n",
"tensor([1.0133e-06, 2.0266e-06, 2.9802e-06])\n",
"tensor([1., 2., 3.]) tensor([0.])\n",
"tensor([1.0000, 1.0000, 1.0000]) tensor([12.0000])\n"
]
}
],
"source": [
"#Test physical model methods\n",
"print(pm)\n",
"print(pm.survival(s))\n",
"s2 = pm.transition(s,est_int)\n",
"print(s.stocks,s.debris)\n",
"print(s2.stocks,s2.debris)"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "stretch-reward",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"LinearProfit\n",
" Benefit weights:\ttensor([1., 0., 0.])\n",
" launch cost:\t5\n",
" Deorbit cost:\t0\n",
"tensor(-4.)\n"
]
}
],
"source": [
"#test linear profit object\n",
"print(lp)\n",
"print(lp.period_benefit(s,est_int))"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "advance-folder",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([1., 0., 0., 0.])"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"lp._period_benefit_jacobian_wrt_states( s.stocks, s.debris, est_int.launches)"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "posted-subscriber",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([-5., 0., 0.])"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"lp._period_benefit_jacobian_wrt_launches( s.stocks, s.debris, est_int.launches)"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "divine-agenda",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([1., 0., 0., 0.])"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"lp.period_benefit_jacobian_wrt_states( s, est_int)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "surgical-direction",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([[1., 2., 3., 2.],\n",
" [4., 5., 6., 2.],\n",
" [7., 8., 9., 2.],\n",
" [1., 3., 5., 7.]])"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"est_int.partials"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "mounted-roots",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(tensor([-7.5676e+05, 2.8893e+05, 4.6783e+05, 1.5236e+00]),\n",
" <combined.States at 0x7f8c3c9c54f0>)"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"c.single_transition(pm,lp,s,est_int)"
]
},
{
"cell_type": "code",
"execution_count": 24,
"id": "pediatric-iceland",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([-718920.5625, 274490.1562, 444444.6250])"
]
},
"execution_count": 24,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"minimand, iterated_partials, iterated_state = c.optimality(pm,lp,s,est_int)\n",
"minimand"
]
},
{
"cell_type": "code",
"execution_count": 25,
"id": "isolated-cleveland",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([-2285563., -2285557., -2285557.])"
]
},
"execution_count": 25,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"est_int2 = c.EstimandInterface(iterated_partials,torch.ones(3))\n",
"\n",
"minimand2, iterated_partials2, iterated_state2 = c.optimality(pm,lp,iterated_state,est_int2)\n",
"minimand2"
]
},
{
"cell_type": "code",
"execution_count": 27,
"id": "relevant-romance",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([-2405858.5000, -2405852.5000, -2405852.5000])"
]
},
"execution_count": 27,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"est_int3 = c.EstimandInterface(iterated_partials2,torch.ones(3))\n",
"\n",
"minimand3, iterated_partials3, iterated_state3 = c.optimality(pm,lp,iterated_state2,est_int3)\n",
"minimand3"
]
},
{
"cell_type": "markdown",
"id": "israeli-oracle",
"metadata": {},
"source": [
"So, this succesfylly let me link the two. I'm going to move to another notebook, clean up, and start integrating the system"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.8"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -1,65 +0,0 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "victorian-produce",
"metadata": {},
"outputs": [],
"source": [
"a = [1,2,3]\n",
"b = [\"a\",\"b\",\"c\"]"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "sought-beginning",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[(0, (1, 'a')), (1, (2, 'b')), (2, (3, 'c'))]"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"[x for x in enumerate(zip(a,b))]"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "recent-lingerie",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.8"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -2,11 +2,10 @@ import torch
from torch.autograd.functional import jacobian
import itertools
import math
import abc
############### CONSTANTS ###################
#Parameters
BETA = 0.95
#Constants determining iterations etc.
NUMBER_CONSTELLATIONS = 5
@ -59,50 +58,111 @@ class PhysicalModel():
function to calculate changes to the physical environment.
"""
def __init__(self
,collision_debris
,debris_from_collision
,constellations_collision_risk
,debris_decay_rate
,launch_debris
,debris_autocatalysis_rate
)
self.collision_debris = collision_debris
self.constellations_collision_risk = constellations_collision_risk
self.debris_decay_rate = debris_decay_rate
self.launch_debris = launch_debris
self.debris_autocatalysis_rate = debris_autocatalysis_rate
,debris_autocatalysis_rate):
self.debris_from_collision= debris_from_collision
self.constellations_collision_risk = constellations_collision_risk
self.debris_decay_rate = debris_decay_rate
self.launch_debris = launch_debris
self.debris_autocatalysis_rate = debris_autocatalysis_rate
def __str__(self):
return "\n{}\n{}\n{}\n{}\n{}".format(
self.debris_from_collision
,self.constellations_collision_risk
,self.debris_decay_rate
,self.launch_debris
,self.debris_autocatalysis_rate
)
def survival(self):
def _survival(self, stocks, debris):
#returns the survival rate (not destruction rate) for the given constellation.
return 1- torch.exp(-self.constellations_collision_risk * self.stock - self.debris)
return 1- torch.exp(-self.constellations_collision_risk * stocks - debris.sum())
def survival(self, states):
"""
This is an interface wrapper
"""
return self._survival(states.stocks, states.debris)
def transition_debris(self, state, launch_decisions):
def _transition_debris(self, stocks,debris,launches):
"""
This function transitions debris levels based off of a state and launch decision.
"""
new_debris = (1-self.debris_decay_rate + self.debris_autocatalysis_rate) * state.debris \ #debris decay and autocatalysis
+ self.launch_debris*launch_decisions.sum() \ #debris from launches
+ self.collision_debris * (1-self.survival()) @ state.stocks
new_debris = (1-self.debris_decay_rate + self.debris_autocatalysis_rate) * debris \
+ self.launch_debris * launches.sum() \
+ self.debris_from_collision * (1-self._survival(stocks,debris)) @ stocks
return new_debris
def transition_stocks(self, state, launch_decisions):
def transition_debris(self, state, estimand_interface):
"""
This is an interface wrapper.
"""
return self._transition_debris(state.stocks,state.debris,estimand_interface.launches)
new_stock = self.survival() * state.stocks + launch_decisions
def _transition_stocks(self, stocks, debris, launches):
"""
This function calculates new stock levels.
"""
new_stock = self._survival(stocks,debris) * stocks + launches
return new_stock
def transition(self, state, launch_decisions):
def transition_stocks(self, state, estimand_interface):
"""
This is an interface wrapper
"""
return self._transition_stocks(state.stocks,state.debris,estimand_interface.launches)
def transition(self, state, estimand_interface):
"""
This function takes a state and launch decision, and updates the state according to the physical laws of motion.
It returns a State object.
"""
d = self.transition_debris(state, launch_decisions)
s = self.transition_stocks(state, launch_decisions)
d = self.transition_debris(state, estimand_interface)
s = self.transition_stocks(state, estimand_interface)
return States(s,d)
def transition_jacobian_wrt_states(self,state,estimand_interface):
"""
This function takes values of the state and estimand, and returns a properly formatted
jacobian of the transition function with respect to the states.
The reason this is done here is because there is some reshaping that must happen, so
it is easier to wrap it here.
"""
jac_debris = jacobian(self._transition_debris, (state.stocks,state.debris,estimand_interface.launches))
jac_stocks = jacobian(self._transition_stocks, (state.stocks,state.debris,estimand_interface.launches))
h1 = torch.cat((jac_stocks[0],jac_stocks[1]),dim=1)
h2 = torch.cat((jac_debris[0],jac_debris[1]),dim=1)
a = torch.cat((h1,h2),dim=0)
return a
def transition_jacobian_wrt_launches(self,state,estimand_interface):
"""
This function takes values of the state and estimand, and returns a properly formatted
jacobian of the transition function with respect to the launch decisions.
The reason this is done here is because there is some reshaping that must happen, so
it is easier to wrap it here.
"""
jac_debris = jacobian(self._transition_debris, (state.stocks,state.debris,estimand_interface.launches))
jac_stocks = jacobian(self._transition_stocks, (state.stocks,state.debris,estimand_interface.launches))
b = torch.cat((jac_stocks[2],jac_debris[2].T),dim=1)
return b
class States():
"""
This is supposed to capture the state variables of the model, to create a common interface
@ -111,6 +171,17 @@ class States():
def __init__(self, stocks,debris):
self.stocks = stocks
self.debris = debris
def __str__(self):
return "stocks\t{} \ndebris\t {}".format(self.stocks,self.debris)
@property
def number_constellations(self):
return len(self.stocks)
@property
def number_debris_trackers(self):
return len(self.debris)
################ NEURAL NETWORK TOOLS ###################
@ -137,108 +208,127 @@ class EstimandInterface():
self.partials = partials
self.launches = launches
self.deorbits = deorbits
def launch_single(constellation):
@property
def number_constellations(self):
return len(self.launches)
@property
def number_states(self):
return self.number_constellations+1
def launch_single(self, constellation):
#returns the launch decision for the constellation of interest
filter_tensor = torch.zeros(NUMBER_CONSTELLATIONS)
filter_tensor = torch.zeros(self.number_constellations)
filter_tensor[constellation] = 1.0
return self.launches @ filter_tensor
def launch_vector(constellation):
def launch_vector(self, constellation):
#returns the launch decision for the constellation of interest as a vector
filter_tensor = torch.zeros(NUMBER_CONSTELLATIONS)
filter_tensor = torch.zeros(self.number_constellations)
filter_tensor[constellation] = 1.0
return self.launches * filter_tensor
def partial_vector(constellation):
def partial_vector(self, constellation):
#returns the partials of the value function corresponding to the constellation of interest
filter_tensor = torch.zeros(NUMBER_CONSTELLATIONS)
filter_tensor = torch.zeros(self.number_states)
filter_tensor[constellation] = 1.0
return self.partials @ filter_tensor
def partial_matrix(constellation):
def partial_matrix(self, constellation):
#returns the partials of the value function corresponding to
#the constellation of interest as a matrix
filter_tensor = torch.zeros(NUMBER_CONSTELLATIONS)
filter_tensor = torch.zeros(self.number_states)
filter_tensor[constellation] = 1.0
return self.partials * filter_tensor
def __str__(self):
#just a human readable descriptor
return "Launch Decisions and Partial Derivativs of value function with\n\t states\n\t\t {}\n\tPartials\n\t\t{}".format(self.states,self.partials)
return "Launch Decisions and Partial Derivativs of value function with\n\tlaunches\n\t\t {}\n\tPartials\n\t\t{}".format(self.launches,self.partials)
############## ECONOMIC MODEL ############
class EconomicModel():
"""
This class describes the set of profit functions involved in the value function iteration.
"""
def __init__(self,discount_factor, profit_objects):
self.discount_factor = discount_factor
self.profit_objects = profit_objects #A list of Profit objects
def constellation_period_profits(self, state, estimand_interface, constellation):
"""
This function calculates the current period profits for a single given constellation.
"""
return self.profit_objects[constellation].profit(state,estimand_interface,constellation)
def period_profits(self, state, estimand_interface):
"""
This function calculates the current period profits for each constellation.
"""
profits = []
for i,profit_object in self.profit_objectives:
constellation_profit = self.constellation_period_profits(state, estimand_interface, i)
profits.append(constellation_profit)
return profits
@property
def number_constellations(self):
return len(profit_objects)
#Abstract class describing profit. Each subclass will connect a profit function "style" to a specific instance of parameters.
class ProfitFunctions(metaclass=abc.ABCMetaclass):
class EconomicAgent(metaclass=abc.ABCMeta):
@abc.abstractmethod
def period_benefit(self,state,estimand_interface):
pass
@abc.abstractmethod
def profit(self,state,estimand_interface,constellation_number):
def _period_benefit(self):
pass
@abc.abstractmethod
def period_benefit_jacobian_wrt_states(self):
pass
@abc.abstractmethod
def _period_benefit_jacobian_wrt_states(self):
pass
@abc.abstractmethod
def period_benefit_jacobian_wrt_launches(self):
pass
@abc.abstractmethod
def _period_benefit_jacobian_wrt_launches(self):
pass
#TODO: Should I attach the jacobian here? It may simplify things.
#def jacobian()
class LinearProfit(ProfitFunctions):
class LinearProfit(EconomicAgent):
"""
The simplest type of profit function available.
"""
def __init__(self, benefit_weight, launch_cost, deorbit_cost=0):
def __init__(self, constellation_number, discount_factor, benefit_weight, launch_cost, deorbit_cost=0):
#track which constellation this is.
self.constellation_number = constellation_number
#parameters describing the agent's situation
self.discount_factor = discount_factor
self.benefit_weights = benefit_weight
self.launch_cost = launch_cost
self.deorbit_cost = deorbit_cost
def profit(self, state, estimand_interface, constellation_number):
#Calculate the profit
profits = self.benefit_weights @ state.stock \
- self.launch_cost * estimand_interface.launches[constellation_number] #\
#- deorbit_cost @ estimand_interface.deorbits[constellation_number]
def __str__(self):
return "LinearProfit\n Benefit weights:\t{}\n launch cost:\t{}\n Deorbit cost:\t{}".format(self.benefit_weights, self.launch_cost, self.deorbit_cost)
def period_benefit(self,state,estimand_interface):
return self._period_benefit(state.stocks, state.debris, estimand_interface.launches)
def _period_benefit(self,stocks,debris,launches):
profits = self.benefit_weights @ stocks \
- self.launch_cost * launches[self.constellation_number] #\
#- deorbit_cost @ deorbits[self.constellation_number]
return profits
def period_benefit_jacobian_wrt_states(self, states, estimand_interface):
return self._period_benefit_jacobian_wrt_states(states.stocks, states.debris, estimand_interface.launches)
def _period_benefit_jacobian_wrt_states(self, stocks, debris, launches):
jac = jacobian(self._period_benefit, (stocks,debris,launches))
return torch.cat((jac[0], jac[1]))
def period_benefit_jacobian_wrt_launches(self, states, estimand_interface):
return self._period_benefit_jacobian_wrt_launches(states.stocks, states.debris, estimand_interface.launches)
def _period_benefit_jacobian_wrt_launches(self,stocks,debris,launches):
jac = jacobian(self._period_benefit, (stocks,debris,launches))
return jac[2]
#other profit functions to implement
# price competition (substitution)
# military (complementarity)
############### TRANSITION AND OPTIMALITY FUNCTIONS #################
#Rewrite these to use the abstractiosn present earlier
#at some point I should wrap the two functions below into a class that holds various things
class OrbitalModel:
def __init__(self, number_debris_trackers, num_choice_variables):
pass
def single_transition(physical_model, economic_model, states, estimand_interface):
def single_transition(physical_model, economic_agent, states, estimand_interface):
"""
This function represents the inverted envelope conditions.
It allows us to describe the derivatives of the value function evaluated at time $t+1$ in terms based in time period $t$.
@ -248,33 +338,36 @@ def single_transition(physical_model, economic_model, states, estimand_interface
It returns the transitioned values
"""
#TODO: rewrite using the current abstractions
#possibly move jacobians of profit functions and physical models to those classes
#Transition the partials
#Get the discounted jacobian with respect to states
A = economic_agent.discount_factor * physical_model.transition_jacobian_wrt_states(states, estimand_interface)
f_theta = economic_agent.period_benefit_jacobian_wrt_states(states, estimand_interface)
T = estimand_interface.partial_vector(economic_agent.constellation_number) - f_theta
#need to do testing to nicely handle when A is non-invertible
#using linalg.solve because it has more numerical stability and is faster.
iterated_value_partials = torch.linalg.solve(A,T)
#transition the states
iterated_states = physical_model.transition(states, estimand_interface)
#The goal is to create a set of partials that I can throw into the appropriate
return iterated_value_partials, iterated_states
pass
def transition_wrapper(data_in): # Identify a way to eliminate this maybe?
"""
This function wraps the single transition and handles updating states etc.
"""
#TODO: rewrite using current abstractions
pass
#Optimality math
def optimality(stocks
,debris
,profit_fn
,laws_motion_fn
,neural_net
):
def optimality(physical_model, economic_agent, states, estimand_interface):
"""
This function takes in the
- stock levels
- debris levels
- profit function
- laws of motion
- results from the neural network
and returns the parts used to make up the optimality conditions
This takes the given models, states, and the estimand and returns the optimality condition.
"""
pass
fx = economic_agent.period_benefit_jacobian_wrt_launches(states, estimand_interface)
B = physical_model.transition_jacobian_wrt_launches(states, estimand_interface)
iterated_partials, iterated_state = single_transition(physical_model, economic_agent, states, estimand_interface)
return fx + economic_agent.discount_factor * B @ iterated_partials, iterated_partials, iterated_state

Loading…
Cancel
Save