{ "cells": [ { "cell_type": "code", "execution_count": 1, "id": "variable-telephone", "metadata": { "tags": [] }, "outputs": [], "source": [ "import torch\n", "from torch.autograd.functional import jacobian\n", "import itertools" ] }, { "cell_type": "markdown", "id": "minimal-quarterly", "metadata": {}, "source": [ "# Setup Functions\n", "## General CompositionFunctions" ] }, { "cell_type": "code", "execution_count": 2, "id": "instructional-guide", "metadata": {}, "outputs": [], "source": [ "### Set up functions to compose functions \n", "# These functions will \n", "# - compose two functions together\n", "# - compose a function to itself n times.\n", "\n", "def compose(f,g):\n", " \"\"\"\n", " this function composes two functions f and g in\n", " the order f(g(*args))\n", " \n", " it returns a function\n", " \"\"\"\n", " return lambda *args: f(g(*args))\n", "\n", "def compose_recursive_functions(fn,n):\n", " \"\"\"\n", " This function takes a function fn and composes it with itself n times\n", " \n", " Returns an array/list that contains each of the n composition levels, ordered\n", " from fn() to fn^n()\n", " \"\"\"\n", "\n", " \n", " #Set base conditions\n", " out_func = None\n", " out_func_list =[]\n", "\n", " #build the compositions of functions\n", " for f in itertools.repeat(fn, n):\n", " #if first iteration\n", " if out_func == None:\n", " out_func = f\n", " else:\n", " out_func = compose(f,out_func)\n", "\n", " #append the ou\n", " out_func_list.append(out_func)\n", " \n", " return out_func_list" ] }, { "cell_type": "markdown", "id": "spread-humor", "metadata": {}, "source": [ "## Setup functions related to the problem" ] }, { "cell_type": "code", "execution_count": 3, "id": "supposed-chorus", "metadata": {}, "outputs": [], "source": [ "### Classes\n", "class States():\n", " \"\"\"\n", " This class represents the state variables associated with the problems.\n", " \n", " In this problem, the two types of states are constellation stocks and debris.\n", " \n", " I'm not sure how useful it will be. We'll see.\n", " \"\"\"\n", " def __init__(self, satellite_stock, debris):\n", " self.stock = satellite_stock\n", " self.debris = debris\n", " \n", " \n", "\n", "### functions\n", "\n", "def survival(stock, debris):\n", " \"\"\"\n", " This is a basic, deterministic survival function. \n", " It is based on the CDF of an exponential distribution.\n", " \"\"\"\n", " #TODO: ACTUALLY DERIVE A SURVIVAL FUNCTION. THIS IS JUST A PLACEHOLDER. PROBABLY SHOULD BE AN EXPONENTIAL DISTRIBUTION\n", " return 1 - torch.exp(-SCALING * stock - debris)\n", "\n", "def test_launch(stock, debris):\n", " \"\"\"\n", " Temporary launch function \n", " \"\"\"\n", " return torch.ones(5, requires_grad=True)\n", "\n", "def laws_of_motion(stock, debris, launches):\n", " \"\"\"\n", " This function updates state variables (stock and debris), according \n", " to the laws of motion.\n", " \n", " It returns the state variables as \n", " \"\"\"\n", " \n", " new_stock = stock*survival(stock,debris) + launches#(stock,debris) #TODO: Launches will become a function (neural network)\n", " \n", " #TODO: Currently Ignoring autocatalysis\n", " new_debris = (1-DELTA)*debris + LAUNCH_DEBRIS_RATE * launches.sum() + COLLISION_DEBRIS_RATE*(1-survival(stock,debris)) @ stock\n", " \n", " return (new_stock, new_debris)\n", "\n", "#This is not a good specification of the profit function, but it will work for now.\n", "def profit(stock, debris):\n", " return UTIL_WEIGHTS @ stock" ] }, { "cell_type": "code", "execution_count": 4, "id": "analyzed-drink", "metadata": {}, "outputs": [], "source": [ "def single_transition(item_to_iterate, laws_motion, profit, launch, stocks, debris ):\n", " \"\"\"\n", " This function represents the inverted envelope conditions.\n", " It allows us to describe the derivatives of the value function evaluated at time $t+1$ in terms based in time period $t$.\n", " \n", " It takes a few derivatives (jacobians), and due to the nature of the return as tuples\n", " they must be concatenated.\n", " \n", " It returns the transitioned values\n", " \"\"\"\n", " \n", " #Find the matrix to invert. \n", " #it consists of the derivative of the laws of motion with respect to stocks and debris\n", " \n", " #Get the jacobian\n", " a = jacobian(laws_motion, (stocks,debris,launch))\n", " \n", " #Reassemble the Jacobian nested tuples into the appropriate tensor\n", " A = BETA * torch.cat((torch.cat((a[0][0],a[0][1]),dim=1),torch.cat((a[1][0],a[1][1]),dim=1)), dim=0)\n", " \n", " #TODO: figure out some diagnostics for this section\n", " #Possibilities include:\n", " # - Det(A) ~= 0\n", " # - EigVal(A) ~= 0\n", " # - A.inverse() with a try catch system to record types of returns\n", " #Alternatively, \n", " \n", " #Calculate the item to transition\n", " T = item_to_iterate - torch.cat(jacobian(profit,(stocks, debris))) \n", " #Includes rearranging the jacobian of profit.\n", "\n", " #Return the transitioned values\n", " return ( A.inverse()/BETA ) @ T\n", "\n", "# This function wraps the single transition and handles updating dates etc.\n", "def transition_wrapper(data_in):\n", " \"\"\"\n", " \"\"\"\n", " #unpack states and functions\n", " stocks, debris,profit, launch, laws_motion, item_to_transition = data_in\n", " \n", " #Calculate new states\n", " new_stocks, new_debris = laws_motion(stocks,debris,launch)\n", " \n", " #WARNING: RECURSION: You may break your head...\n", " #This gets the transition of the value function derivatives over time.\n", " transitioned = single_transition(\n", " item_to_transition, #item to iterate, i.e. the derivatives of the value function\n", " #functions\n", " laws_motion, \n", " profit, \n", " launch, #TODO: reimplement with launch as a function\n", " stocks, debris #states\n", " )\n", " \n", " #collects the data back together for return, including the updated state variables\n", " data_out = new_stocks, new_debris, profit, launch, laws_motion, transitioned\n", " \n", " return data_out" ] }, { "cell_type": "markdown", "id": "martial-station", "metadata": {}, "source": [ "# Actual calculations" ] }, { "cell_type": "code", "execution_count": 5, "id": "ambient-breakfast", "metadata": {}, "outputs": [], "source": [ "#set states\n", "stocks = torch.ones(5)\n", "#Last one is different\n", "stocks[-1] = 0.5\n", "#now add the tracking requirement in place\n", "stocks.requires_grad_()\n", "\n", "#Setup Debris\n", "debris = torch.tensor([2.2],requires_grad=True)\n", "\n", "#CHANGE LATER: Launch is currently a value, should be a function (i.e. neural network)\n", "launch = torch.ones(5, requires_grad=True)\n", "\n", "#compose the functions together.\n", "base_data = (stocks,debris, profit, launch, laws_of_motion, torch.ones(6, requires_grad=True))\n", "\n", "#Parameters\n", "SCALING = torch.ones(5)\n", "DELTA = 0.9 \n", "LAUNCH_DEBRIS_RATE = 0.005\n", "COLLISION_DEBRIS_RATE = 0.0007\n", "UTIL_WEIGHTS = torch.tensor([1,-0.2,0,0,0])\n", "BETA = 0.95" ] }, { "cell_type": "code", "execution_count": 6, "id": "deluxe-remains", "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ "tensor([-0.4523, 0.8774, 0.6558, 0.6558, 0.7608, 11.0954],\n", " grad_fn=) \n", "\n", "\n", "\n", "tensor([-25.6591, -23.1244, -23.5468, -23.5468, -29.4675, 123.8316],\n", " grad_fn=) \n", "\n", "\n", "\n", "tensor([-236.0462, -232.3070, -232.9302, -232.9302, -312.7054, 1379.8452],\n", " grad_fn=) \n", "\n", "\n", "\n", "tensor([-1638.0408, -1632.9258, -1633.7783, -1633.7783, -2380.2834, 15341.8955],\n", " grad_fn=) \n", "\n", "\n", "\n", "tensor([ -9855.1680, -9848.4297, -9849.5527, -9849.5527, -15547.5273,\n", " 170308.7812], grad_fn=) \n", "\n", "\n", "\n" ] } ], "source": [ "#Get the values from 5 transitions\n", "for f in compose_recursive_functions(transition_wrapper,5):\n", " result = f(base_data)\n", " print(result[5], \"\\n\"*3)" ] }, { "cell_type": "markdown", "id": "parallel-classroom", "metadata": {}, "source": [ "Also, maybe I can create a `Model` class that upon construction will capture the necesary constants, functions, etc.\n" ] }, { "cell_type": "code", "execution_count": null, "id": "copyrighted-freeze", "metadata": {}, "outputs": [], "source": [] } ], "metadata": { "kernelspec": { "display_name": "Python 3", "language": "python", "name": "python3" }, "language_info": { "codemirror_mode": { "name": "ipython", "version": 3 }, "file_extension": ".py", "mimetype": "text/x-python", "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", "version": "3.8.8" } }, "nbformat": 4, "nbformat_minor": 5 }