Merged work sitting in bitbucket into local repo

temporaryWork
will king 3 years ago
commit 7ec2ede00e

7
.gitignore vendored

@ -4,11 +4,17 @@
CurrentWriting/sections/[0-9][0-9]*.pdf
CurrentWriting/Main.pdf
#Ignore .bson files
*.bson
### SPECIFICALLY INCLUDE
#Preambles
!CurrentWriting/assets/preambles/*
########### GENERIC STUFF BELOW ###########
## Core latex/pdflatex auxiliary files:
@ -298,5 +304,6 @@ TSWLatexianTemp*
# don't track PDFs
*.pdf
#Don't track python/jupyterlab stuff
*/.ipynb_checkpoints/*
*/__pycache__/*

@ -0,0 +1,10 @@
%----------------------------------------------------------------------------------------
% BIBLIOGRAPHY SETUP
%----------------------------------------------------------------------------------------
%%% Setup Bibliography
\usepackage[backend=biber,style=apa,autocite=inline]{biblatex}
\addbibresource{../Assets/preambles/References.bib}

@ -0,0 +1,38 @@
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% Based on a template downloaded from:
% http://www.LaTeXTemplates.com
%
% License:
% CC BY-NC-SA 3.0 (http://creativecommons.org/licenses/by-nc-sa/3.0/)
%
% Changed theme to WSU by William King
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%----------------------------------------------------------------------------------------
% COLORS AND THEMES
%----------------------------------------------------------------------------------------
%%% Setup color
\usetheme{Berkeley}
\definecolor{WSUred}{RGB}{152,30,50}
\definecolor{WSUgrey}{RGB}{94,106,113}
\setbeamercolor{palette primary}{bg=WSUred,fg=white}
\setbeamercolor{palette secondary}{bg=WSUred,fg=white}
\setbeamercolor{palette tertiary}{bg=WSUred,fg=white}
\setbeamercolor{palette quaternary}{bg=WSUred,fg=white}
\setbeamercolor{structure}{fg=WSUgrey} % itemize, enumerate, etc
\setbeamercolor{section in toc}{fg=WSUred} % TOC sections
\setbeamercolor{block body}{fg=WSUred} % block body sections
%\setbeamertemplate{footline} % To remove the footer line in all slides uncomment this line
\setbeamertemplate{footline}[page number] % To replace the footer line in all slides with a simple slide count uncomment this line
\setbeamertemplate{navigation symbols}{} % To remove the navigation symbols from the bottom of all slides uncomment this line
%%% setup packages
\usepackage{graphicx} % Allows including images
\graphicspath{{./img/}} %Set a graphics path
\usepackage{booktabs} % Allows the use of \toprule, \midrule and \bottomrule in tables

@ -0,0 +1,16 @@
%Include other generally important packages
\usepackage{hyperref} % Allows for weblinks
\hypersetup{
colorlinks=true,
citebordercolor=WSUgrey,
citecolor=WSUred,
linkcolor=WSUred,
urlcolor=Blue
}
\usepackage{cleveref}
%Add algorithms
\usepackage{algorithm,algpseudocode}

@ -0,0 +1,20 @@
%%%%%%%%%Packages%%%%%%%%%%%%%%%
\usepackage{amsmath}
\usepackage{mathtools}
\usepackage{amsthm}
\usepackage{amssymb}
\usepackage{thmtools, thm-restate}
%%%%%%%%%%%% MATH FORMATTING %%%%%%%%%%%%%%%%%%%%%
%Helpful bits
\newcommand{\bb}[1]{\mathbb{#1}}
%Derivatives etc.
\newcommand{\parder}[3]{\ensuremath{ \frac{\partial^{#3} #1}{\partial #2~^{#3}}}}
\newcommand{\der}[3]{\ensuremath{ \frac{d^{#3} #1}{d #2~^{#3}}}}
%Math Operators
\DeclareMathOperator{\argmax}{argmax}
\DeclareMathOperator{\argmin}{argmin}

@ -0,0 +1,234 @@
% Encoding: UTF-8
@Misc{EsaTweet,
author = {European~Space~Agency},
title = {For the first time ever, ESA has performed a `collision avoidance manoeuvre' to protect one of its satellites from colliding with a 'mega constellation' \#SpaceTraffic},
addendum = {https://twitter.com/esaoperations},
date = {2019-09-02},
nameaddon = {\@ESAOperations},
}
@Electronic{ArsTechnicaStatement,
author = {Brodkin, Jon},
howpublished = {Online Article},
language = {English},
note = {Statement from SpaceX to ARS Technica},
organization = {Ars Technica},
title = {SpaceX satellite was on “collision course” until ESA satellite was re-routed},
url = {https://arstechnica.com/information-technology/2019/09/spacex-satellite-was-on-collision-course-until-esa-satellite-was-re-routed/},
date = {2019-09-03},
}
@Electronic{EsaBlog,
author = {European_Space_Agency},
howpublished = {Online},
language = {English},
organization = {European Space Agency},
title = {ESA spacecraft dodges large constellation},
url = {http://www.esa.int/Safety_Security/ESA_spacecraft_dodges_large_constellation},
date = {2019-09-03},
}
@PhdThesis{Rao:dissertation,
author = {Rao, Akhil},
school = {University of Colorado},
title = {The Economics of Orbit Use: Theory, Policy, and Practice},
year = {2019},
}
@Article{adilov_alexander_cunningham_2015,
author = {Adilov, Nodir and Alexander, Peter J. and Cunningham, Brendan M.},
journal = {Environmental and Resource Economics},
title = {An Economic Analysis of Earth Orbit Pollution},
year = {2015},
issn = {0924-6460},
number = {1},
pages = {8198},
volume = {60},
doi = {10.1007/s10640-013-9758-4},
publisher = {Environmental and Resource Economics},
}
@Article{Macauley_1998,
author = {Macauley, Molly K},
journal = {The Journal of Law and Economics},
title = {Allocation of Orbit and Spectrum Resources for Regional Communications: What's At Stake?},
year = {1998},
issn = {0022-2186},
number = {S2},
pages = {737764},
volume = {41},
abstract = {Contentious debate surrounds allocation of the geostationary orbit and electromagneticspectrum, two resources used by communications satellites. An extensive economicsliterature alleges that the nonmarket administrative allocative procedures now in place arehighly inefficient, but no research has empirically estimated the welfare loss. This paperdevelops a conceptual framework and a computerized model to estimate the economic valueof the resources, the size and distribution of welfare costs associated with the presentregulatory regime, and the potential gains from more market-like allocation.
Key Words: outer space, communications satellites, pricing natural resources
JEL Classification Nos.: H4, Q2},
doi = {10.1086/467411},
publisher = {The Journal of Law and Economics},
}
@InBook{brillinger_2001,
author = {Brillinger, David R.},
pages = {105116},
title = {Space Debris: Flux in a Two Dimensional Orbit},
year = {2001},
doi = {10.1007/978-3-0348-8326-9_8},
}
@Article{Grzelka2019,
author = {Zachary Grzelka and Jeffrey Wagner},
journal = {Environmental and Resource Economics},
title = {Managing Satellite Debris in Low-Earth Orbit: Incentivizing Ex Ante Satellite Quality and Ex Post Take-Back Programs},
year = {2019},
month = {feb},
number = {1},
pages = {319--336},
volume = {74},
doi = {10.1007/s10640-019-00320-3},
publisher = {Springer Science and Business Media {LLC}},
}
@Article{Adilov2018,
author = {Nodir Adilov and Peter J. Alexander and Brendan M. Cunningham},
title = {An economic “Kessler Syndrome”: A dynamic model of earth orbit debris},
year = {2018},
issn = {0165-1765},
pages = {79-82},
volume = {166},
comment = {Might be a working paper?},
doi = {10.1016/j.econlet.2018.02.025},
}
@Article{Adilov2018a,
author = {Nodir Adilov and Peter J. Alexander and Brendan M. Cunningham},
title = {Corrigendum to “An economic “Kessler Syndrome”: A dynamic model of earth orbit debris” [Econom. Lett. 166 (2018) 7982]},
year = {2018},
issn = {0165-1765},
pages = {185},
volume = {170},
doi = {10.1016/j.econlet.2018.04.012},
}
@Misc{Kessler1990,
author = {Donald Kessler},
title = {Orbital debris environment for spacecraft in low earth orbit},
year = {1990},
doi = {10.2514/6.1990-1353},
}
@Article{Adilov2015,
author = {Nodir Adilov and Peter J. Alexander and Brendan Michael Cunningham},
title = {Earth Orbit Debris: An Economic Model},
year = {2015},
issn = {1556-5068},
doi = {10.2139/ssrn.2264915},
}
@Article{Kessler1978,
author = {Kessler, Donald J. and Cour-Palais, Burton G.},
journal = {Journal of Geophysical Research: Space Physics},
title = {Collision frequency of artificial satellites: The creation of a debris belt},
year = {1978},
number = {A6},
pages = {2637-2646},
volume = {83},
abstract = {As the number of artificial satellites in earth orbit increases, the probability of collisions between satellites also increases. Satellite collisions would produce orbiting fragments, each of which would increase the probability of further collisions, leading to the growth of a belt of debris around the earth. This process parallels certain theories concerning the growth of the asteroid belt. The debris flux in such an earth-orbiting belt could exceed the natural meteoroid flux, affecting future spacecraft designs. A mathematical model was used to predict the rate at which such a belt might form. Under certain conditions the belt could begin to form within this century and could be a significant problem during the next century. The possibility that numerous unobserved fragments already exist from spacecraft explosions would decrease this time interval. However, early implementation of specialized launch constraints and operational procedures could significantly delay the formation of the belt.},
doi = {10.1029/JA083iA06p02637},
eprint = {https://agupubs.onlinelibrary.wiley.com/doi/pdf/10.1029/JA083iA06p02637},
url = {https://agupubs.onlinelibrary.wiley.com/doi/abs/10.1029/JA083iA06p02637},
}
@Electronic{FAA2020,
author = {NA},
howpublished = {Online},
month = oct,
note = {Describes altitude of LEO and GEO},
organization = {Federal Aviation Administration},
url = {https://www.faa.gov/space/additional_information/faq/#s1},
year = {2020},
}
@Article{adilov_alexander_cunningham_2018,
author = {Adilov, Nodir and Alexander, Peter J. and Cunningham, Brendan M.},
journal = {Economics Letters},
title = {An economic “Kessler Syndrome”: A dynamic model of earth orbit debris},
year = {2018},
issn = {0165-1765},
pages = {7982},
volume = {166},
doi = {10.1016/j.econlet.2018.02.025},
publisher = {Economics Letters},
}
@TechReport{RaoRondina2020,
author = {Rao, Ahkil and Rondina, Giacomo},
institution = {NA},
title = {Cost in Space:Debris and Collision Risk in the Orbital Commons},
year = {2020},
month = feb,
note = {Middlebury College | UC San Diego},
type = {Working Paper},
}
@Article{Adilov2019,
author = {Adilov, Nodir and Cunningham, Brendan and Alexander, Peter and Duvall, Jerry and Shiman, Daniel},
journal = {Econ Inq},
title = {LEFT FOR DEAD: ANTICOMPETITIVE BEHAVIOR IN ORBITAL SPACE},
year = {2019},
month = {04},
volume = {57},
doi = {10.1111/ecin.12790},
}
@Article{Rao2020,
author = {Rao and Burgess and Kaffine},
journal = {Proceedings of the National Academy of Sciences},
title = {Orbital-use fees could more than quadruple the value of the space industry},
year = {2020},
issn = {0027-8424},
number = {23},
pages = {12756--12762},
volume = {117},
abstract = {The commercial satellite industry is rapidly expanding. A side effect of this expansion is a growing buildup of space debris that imposes costly collision risk on satellite operators. Proposed solutions to this debris have been primarily technological, but the core of the problem is incentives{\textemdash}satellites are being launched without consideration of the collision risks they impose on other operators. We show that this incentive problem can be solved with an internationally harmonized {\textquotedblleft}orbital-use fee{\textquotedblright} (OUF){\textemdash}a tax on orbiting satellites. Using a coupled physical{\textendash}economic model, we project that an optimally designed OUF could more than quadruple the long-run value of the satellite industry by 2040.The space industry{\textquoteright}s rapid recent growth represents the latest tragedy of the commons. Satellites launched into orbit contribute to{\textemdash}and risk damage from{\textemdash}a growing buildup of space debris and other satellites. Collision risk from this orbital congestion is costly to satellite operators. Technological and managerial solutions{\textemdash}such as active debris removal or end-of-life satellite deorbit guidelines{\textemdash}are currently being explored by regulatory authorities. However, none of these approaches address the underlying incentive problem: satellite operators do not account for costs they impose on each other via collision risk. Here, we show that an internationally harmonized orbital-use fee can correct these incentives and substantially increase the value of the space industry. We construct and analyze a coupled physical{\textendash}economic model of commercial launches and debris accumulation in low-Earth orbit. Similar to carbon taxes, our model projects an optimal fee that rises at a rate of 14\% per year, equal to roughly $235,000 per satellite-year in 2040. The long-run value of the satellite industry would more than quadruple by 2040{\textemdash}increasing from around $600 billion under business as usual to around $3 trillion. In contrast, we project that purely technological solutions are unlikely to fully address the problem of orbital congestion. Indeed, we find debris removal sometimes worsens economic damages from congestion by increasing launch incentives. In other sectors, addressing the tragedy of the commons has often been a game of catch-up with substantial social costs. The infant space industry can avert these costs before they escalate.},
doi = {10.1073/pnas.1921260117},
eprint = {https://www.pnas.org/content/117/23/12756.full.pdf},
publisher = {National Academy of Sciences},
url = {https://www.pnas.org/content/117/23/12756},
}
@Article{GrzelkaWagner2019,
author = {Grzelka, Zachary and Wagner, Jeffrey},
journal = {Environmental and Resource Economics},
title = {Managing Satellite Debris in Low-Earth Orbit: Incentivizing Ex Ante Satellite Quality and Ex Post Take-Back Programs},
year = {2019},
issn = {0924-6460},
number = {1},
pages = {319336},
volume = {74},
doi = {10.1007/s10640-019-00320-3},
publisher = {Environmental and Resource Economics},
}
@Misc{Kennedy1962,
author = {John F. Kennedy},
month = sep,
title = {Address at Rice University on the Nation's Space Effort},
year = {1962},
url = {https://er.jsc.nasa.gov/seh/ricetalk.htm},
}
@Article{Innes2018,
author = {Michael Innes and Elliot Saba and Keno Fischer and Dhairya Gandhi and Marco Concetto Rudilosso and Neethu Mariya Joy and Tejan Karmali and Avik Pal and Viral Shah},
title = {Fashionable Modelling with Flux},
journal = {CoRR},
year = {2018},
volume = {abs/1811.01457},
archiveprefix = {arXiv},
bibsource = {dblp computer science bibliography, https://dblp.org},
biburl = {https://dblp.org/rec/bib/journals/corr/abs-1811-01457},
eprint = {1811.01457},
timestamp = {Thu, 22 Nov 2018 17:58:30 +0100},
url = {https://arxiv.org/abs/1811.01457},
}
@Comment{jabref-meta: databaseType:bibtex;}

@ -0,0 +1,12 @@
%-----------------------------------------------------------
% Tikzit Preamble
%-----------------------------------------------------------
%Setup graphing
\usepackage{tikz}
\usepackage{tikzit}
\input{../Assets/preambles/WSU_Econ.tikzstyles}
%\input{../Assets/preambles/tikzit.sty}

@ -0,0 +1,32 @@
% TiKZ style file generated by TikZiT. You may edit this file manually,
% but some things (e.g. comments) may be overwritten. To be readable in
% TikZiT, the only non-comment lines must be of the form:
% \tikzstyle{NAME}=[PROPERTY LIST]
% Node styles
\tikzstyle{CrimsonNode}=[fill={rgb,255: red,152; green,30; blue,50}, draw={rgb,255: red,152; green,30; blue,50}, shape=circle, tikzit category=WSU, tikzit draw={rgb,255: red,152; green,30; blue,50}, tikzit fill={rgb,255: red,152; green,30; blue,50}]
\tikzstyle{GreyNode}=[fill={rgb,255: red,94; green,106; blue,113}, draw={rgb,255: red,94; green,106; blue,113}, shape=circle, tikzit category=WSU, tikzit draw={rgb,255: red,94; green,106; blue,113}, tikzit fill={rgb,255: red,94; green,106; blue,113}]
\tikzstyle{Box}=[fill={rgb,255: red,94; green,106; blue,113}, draw={rgb,255: red,94; green,106; blue,113}, shape=rectangle, tikzit draw={rgb,255: red,94; green,106; blue,113}, tikzit fill={rgb,255: red,94; green,106; blue,113}]
\tikzstyle{Red Box}=[fill={rgb,255: red,152; green,30; blue,50}, draw={rgb,255: red,152; green,30; blue,50}, shape=rectangle]
\tikzstyle{new style 0}=[fill=white, draw=black, shape=circle, tikzit draw=black]
\tikzstyle{new style 1}=[fill={rgb,255: red,128; green,0; blue,128}, draw=black, shape=circle]
\tikzstyle{Box}=[fill=white, draw=black, shape=rectangle]
\tikzstyle{rotated text}=[fill=none, draw=none, shape=circle, rotate=270, tikzit draw={rgb,255: red,191; green,191; blue,191}]
% Edge styles
\tikzstyle{RightArrow}=[->]
\tikzstyle{LeftRightArrow}=[<->]
\tikzstyle{CrimsonBar}=[-, draw={rgb,255: red,152; green,30; blue,50}]
\tikzstyle{GreyBar}=[-, draw={rgb,255: red,94; green,106; blue,113}, tikzit draw={rgb,255: red,94; green,106; blue,113}]
\tikzstyle{divider}=[draw={rgb,255: red,64; green,64; blue,64}, dashed, dash pattern=on 2mm off 1, -]
\tikzstyle{bars}=[{|-|}]
\tikzstyle{Dashed}=[-, dashed, dash pattern=on 1mm off 2mm, tikzit draw={rgb,255: red,128; green,128; blue,128}]
\tikzstyle{Light Arrow}=[->, draw={rgb,255: red,191; green,191; blue,191}]
\tikzstyle{lightgreybar}=[-, draw={rgb,255: red,191; green,191; blue,191}]
\tikzstyle{lightred}=[-, draw={rgb,255: red,222; green,148; blue,178}]
\tikzstyle{Purple}=[-, draw={rgb,255: red,128; green,0; blue,128}, tikzit draw={rgb,255: red,128; green,0; blue,128}, line width=1mm]
\tikzstyle{new edge style 1}=[draw={rgb,255: red,121; green,23; blue,40}, ->]
\tikzstyle{filled2}=[-, fill={rgb,255: red,255; green,191; blue,191}, draw=black, tikzit draw=black, tikzit fill={rgb,255: red,255; green,191; blue,191}, opacity=0.5]
\tikzstyle{filled1}=[-, fill={rgb,255: red,191; green,191; blue,191}, draw=black, tikzit draw=black, opacity=0.5, tikzit fill={rgb,255: red,191; green,191; blue,191}]
\tikzstyle{emptyFill1}=[-, fill={rgb,255: red,255; green,191; blue,191}, draw=none, tikzit draw=blue, opacity=0.3]
\tikzstyle{new edge style 0}=[-, draw=none, fill={rgb,255: red,191; green,191; blue,191}, tikzit draw=green, opacity=0.3, tikzit fill={rgb,255: red,191; green,191; blue,191}]

@ -0,0 +1,42 @@
\usepackage{tikz}
\usetikzlibrary{backgrounds}
\usetikzlibrary{arrows}
\usetikzlibrary{shapes,shapes.geometric,shapes.misc}
% this style is applied by default to any tikzpicture included via \tikzfig
\tikzstyle{tikzfig}=[baseline=-0.25em,scale=0.5]
% these are dummy properties used by TikZiT, but ignored by LaTex
\pgfkeys{/tikz/tikzit fill/.initial=0}
\pgfkeys{/tikz/tikzit draw/.initial=0}
\pgfkeys{/tikz/tikzit shape/.initial=0}
\pgfkeys{/tikz/tikzit category/.initial=0}
% standard layers used in .tikz files
\pgfdeclarelayer{edgelayer}
\pgfdeclarelayer{nodelayer}
\pgfsetlayers{background,edgelayer,nodelayer,main}
% style for blank nodes
\tikzstyle{none}=[inner sep=0mm]
% include a .tikz file
\newcommand{\tikzfig}[1]{%
{\tikzstyle{every picture}=[tikzfig]
\IfFileExists{#1.tikz}
{\input{#1.tikz}}
{%
\IfFileExists{./figures/#1.tikz}
{\input{./figures/#1.tikz}}
{\tikz[baseline=-0.5em]{\node[draw=red,font=\color{red},fill=red!10!white] {\textit{#1}};}}%
}}%
}
% the same as \tikzfig, but in a {center} environment
\newcommand{\ctikzfig}[1]{%
\begin{center}\rm
\tikzfig{#1}
\end{center}}
% fix strange self-loops, which are PGF/TikZ default
\tikzstyle{every loop}=[]

@ -0,0 +1,562 @@
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Beamer Presentation
% LaTeX Template
% Version 1.0 (10/11/12)
%
% This template has been downloaded from:
% http://www.LaTeXTemplates.com
%
% License:
% CC BY-NC-SA 3.0 (http://creativecommons.org/licenses/by-nc-sa/3.0/)
%
% Changed theme to WSU by William King
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%----------------------------------------------------------------------------------------
% PACKAGES AND THEMES
%----------------------------------------------------------------------------------------
\documentclass[xcolor=dvipsnames,aspectratio=169]{beamer}
%Import Preamble bits
\input{../Assets/preambles/FormattingPreamble.tex}
\input{../Assets/preambles/TikzitPreamble.tex}
\input{../Assets/preambles/MathPreamble.tex}
\input{../Assets/preambles/BibPreamble.tex}
\input{../Assets/preambles/GeneralPreamble.tex}
%----------------------------------------------------------------------------------------
% TITLE PAGE
%----------------------------------------------------------------------------------------
\title[MDP Constellations]{Modeling decisions in operating satellite constellations}
\author{Will King} % Your name
\institute[WSU] % Your institution as it will appear on the bottom of every slide, may be shorthand to save space
{
Washington State University \\ % Your institution for the title page
\medskip
\textit{william.f.king@wsu.edu} % Your email address
}
\date{\today} % Date, can be changed to a custom date
\begin{document}
\begin{frame}
\titlepage % Print the title page as the first slide
\end{frame}
%-------------------------------------------------------------------------------------
%%%%%%%%%%%%%%%%%%%% Developing the Model%%%%%%%%%%%%%%%%%%%%%%%%
\section{Background}
% Why should we care?
% Uses of space
% Pollution in space and it's impacts
% Kessler Syndrome
% What is different now
%
%
%
%-------------------------------------------------------------------------------------
\begin{frame}
\frametitle{Orbital Debris}
%Story from monday ISS
\href{https://edition.cnn.com/2021/11/15/politics/russia-anti-satellite-weapon-test-scn/index.html}{ISS threatened by debris cloud - Monday Nov 15th, 2021}
\begin{itemize}
\item Russia conducts an Anti-Satellite Missle Test generating at least 1,500 items of trackable debris
\item The Astronauts and Cosmonauts on the ISS entered lockdown, including donning pressure suits.
\item The situation is still being monitored although the immediate danger appears to have passed.
\end{itemize}
\end{frame}
%-------------------------------
\begin{frame}
\frametitle{Orbital Debris}
Other events involving the ISS highlight the dangers from orbital debris:
\begin{itemize}
\item ISS conducts 3 evasive maneuvers to doge debris in 2020-
\href{https://www.jpost.com/science/international-space-station-nearly-struck-by-chinese-satellite-debris-684809}{Jerusalem Post}
\item ISS hit by debris, May 2021- \href{https://www.asc-csa.gc.ca/eng/iss/news.asp}{Canadian Space Agency}
\item ISS dodged debris from 2007 Anti-Sat Missile, Nov 2021-
\href{https://www.jpost.com/science/international-space-station-nearly-struck-by-chinese-satellite-debris-684809}{Jerusalem Post (Same as above)}
\end{itemize}
%This isn't a unique experience
%list of other issues the ISS has faced
\end{frame}
%-------------------------------
\begin{frame}
\frametitle{Orbital Debris}
%Talk about Fregat breakup
Not just an issue for manned space flight.
\href{https://www.orbitaldebris.jsc.nasa.gov/quarterly-news/pdfs/odqnv25i1.pdf}{Orbital Debris Quarterly News - NASA}
\begin{itemize}
\item In May of 2020, the Satellite SL-23 Zenit Fregat's
tank suffered a second breakup event.
\item While only 65 large pieces of debris were initially identified, by Feb. 2021
over 325 had been attributed to the breakup.
\item Debris was spread in orbits between 500km and 6,000km.
\end{itemize}
\href{https://www.yahoo.com/news/space-debris-russian-missile-test-175253044.html}{Starlink and recent Anti-Sat test}
\begin{itemize}
\item Estimated that there will likely be some impact to Starlink operations.
\item 1,500 large pieces of debris initially identified.
\end{itemize}
\end{frame}
%-------------------------------
\begin{frame}
\frametitle{Why now?}
%launch costs
%nano-satellites
In recent years two major changes have occured
\begin{enumerate}
\item New launch providers: SpaceX, RocketLab, etc have lead to plummeting launch costs
\item CubeSates and other Nano-Satellites.
\end{enumerate}
\end{frame}
%-------------------------------
\begin{frame}
\frametitle{Goals}
%Model decision making of satellite operators to be able to investigate policies to reduce kessler syndrome.
Goals:
\begin{itemize}
\item Model the choices
facing Satellite Constellation Operators
and optimal policy policy response.
\item Investigate the effect of various policies on debris pollution
\end{itemize}
\end{frame}
%-------------------------------
%-------------------------------------------------------------------------------------
%%%%%%%%%%%%%%%%%%%% Developing the Model%%%%%%%%%%%%%%%%%%%%%%%%
\section{TOC}
%-------------------------------------------------------------------------------------
\begin{frame}[allowframebreaks] %Allow frame breaks
\frametitle{Overview} % Table of contents slide, comment this out to remove it
\tableofcontents
%Planned TOC
% See ../outline2.txt
\end{frame}
%-------------------------------------------------------------------------------------
%%%%%%%%%%%%%%%%%%%% Developing the Model%%%%%%%%%%%%%%%%%%%%%%%%
\section{Literature}
%-------------------------------------------------------------------------------------
\begin{frame}
\frametitle{Past Literature}
Key elements of recent literature.
\begin{enumerate}
\item \cite{Kessler1978}: Raised issue of runaway orbital pollution.
\item \cite{Adilov2015}: Described 2 period salop model of interactions.
\item \cite{Adilov2018,Adilov2018a}: Described an infinite period model with symmetric competitive interactions.
\item \cite{RaoRondina2020}: Describe a symetric infinite period model (first to do so).
\item \cite{Rao2020}: Examine the effect of Orbital-Use fees, find it would quadruple long term value produced of the space industry.
\end{enumerate}
\end{frame}
%-------------------------------
%-------------------------------------------------------------------------------------
%%%%%%%%%%%%%%%%%%%% Developing the Model%%%%%%%%%%%%%%%%%%%%%%%%
\section{Model}
%-------------------------------------------------------------------------------------
\begin{frame}
\frametitle{Overview}
\begin{itemize}
\item Mathematical Notation
\item Law of motion for debris
\item Law of motion for satellite stocks
\item Kessler Syndrome
\item Markov Decision Problems
\end{itemize}
\end{frame}
%-------------------------------
\begin{frame}
\frametitle{Mathematical Notation}
\begin{itemize}
\item $S_t$: The set of constellation satellites stocks.
\item $s^i_t$: The number of satellites (stock) for constellation $i$.
\item $D_t$: The level of debris.
\item $X_t$: The set of launches.
\item $x^i_t$: The launches from constellation $i$.
\end{itemize}
\end{frame}
%-------------------------------
%------------------------------------
\subsection{Laws of Motion}
%------------------------------------
\begin{frame}
\frametitle{Debris}
Law of motion for debris
\begin{align}
D_{t+1} =& (1-\delta)D_t \tag{Debris decay.} \\
&+ g\cdot D_t \tag{Debris produced by collision with debris.} \\
&+ \gamma \sum^N_{i=1} \left(1-R^i(S_t,D_t) \right) s^i_t \tag{Debris produced by satellite destruction.} \\
&+ \Gamma \sum^n_{j=1} x^j_t \tag{Debris produced by launches.}
\end{align}
\end{frame}
%-------------------------------
\begin{frame}
\frametitle{Satellite Stocks}
Law of motion for satellite stocks
\begin{align}
s^i_{t+1} =& \left(
R^i(S_t,D_t,X_t)
- \eta
\right) \cdot s^i_t
+ x^i_t
\end{align}
\begin{itemize}
\item $\eta$ is the orbit decay rate of satellites.
\end{itemize}
\end{frame}
%-------------------------------
%------------------------------------
\subsection{Kessler Syndrome}
%------------------------------------
\begin{frame}
\frametitle{Explanation of Kessler Syndrome}
\begin{block}{Kessler Syndrome}
The situation in which collisions between
objects in orbit produced debris and this debris begins collisions
with other objects, leading to a runaway growth in debris.
As debris can persist for millenia, this may make some orbits unusable.
\autocite{Kessler1978}
\end{block}
Often described as a condition with an exponential growth of debris.
\end{frame}
%-------------------------------
\begin{frame}
\frametitle{Past approaches to Kessler Syndrome}
\begin{itemize}
\item \cite{Adilov2018}:\\
Develops an analog of kessler syndrome where the condition is met when satellites
are destroyed immediately after launch by debris.
\begin{align}
\left\{ (S_t,D_t) : R^i(S_t,D_t) = 0 \forall i\right\}
\end{align}
\item \cite{RaoRondina2020}:\\
A working paper in which the authors develop a dynamic model and a definition of
kessler syndrome that captures all increasing debris levels.
\begin{align}
\left\{ (S_t,D_t) :
\lim_{t \rightarrow \infty} D_{t+1}(S_t,D_t) = \infty
\right\}
\end{align}
\end{itemize}
\end{frame}
%-------------------------------
\begin{frame}
\frametitle{My contributions}
I propose two refinements of these definitions to
simplify analyzing kessler syndrome in computational models.
\begin{itemize}
\item $\epsilon$-Kessler Region
\item Proto Kessler Region
\end{itemize}
\end{frame}
%-------------------------------------------------------
\begin{frame}
\frametitle{$\epsilon$-Kessler Region}
\begin{block}{$\epsilon$-Kessler Region}
\begin{align}
\kappa_\epsilon = \left\{ \left(S_t, D_t \right) : \forall k \geq 0,
~~ D_{t+k+1} - D_{t+k} \geq \epsilon > 0 \right\}
\end{align}
\end{block}
Notable Features
\begin{itemize}
\item $\epsilon$ can be calibrated to capture only economically significant growth.
\item Requires an explicit description of what is considered economically significant.
\item Guarantees divergent behavior.
\item Simulated transition paths can identify the region.
\end{itemize}
\end{frame}
%-------------------------------------------------------
\begin{frame}
\frametitle{Proto Kessler Region}
\begin{block}{Proto Kessler Region}
\begin{align}
\kappa_\text{proto} =
\left\{
\left(S_t,D_t \right) : ~~ D_{t+1} - D_{t} \geq \epsilon_\text{proto}
\right\}
\end{align}
\end{block}
Notable Features
\begin{itemize}
\item $\epsilon_\text{proto}$ can be calibrated to capture only economically significant growth.
\item Requires an explicit description of what is considered economically significant.
\item Does not guarantee divergent behavior.
\item Easily computable kessler regions.
\end{itemize}
\end{frame}
%-------------------------------------------------------
\begin{frame}
\frametitle{Proto Kessler Region}
With the given law of motion for debris, the proto-kessler region is:
\begin{align}
\left\{
\left(S_t,D_t \right) :
(g-\delta) D_t
+ \gamma \sum^n_{i=1} 1-R^i(S_t,D_t)
+ \Gamma \sum^n_{i=1} x^i_t(S_t,D_t)
\geq \epsilon_\text{proto}
\right\}
\end{align}
\end{frame}
%------------------------------------
\subsection{Markov Decision Problem Formulation}
%------------------------------------
\begin{frame}
\frametitle{Operator's Problem}
\begin{align}
V^i(S_t, x^{\sim i}_t, D_t) = \max_{x^i_t} u^i(S_t, D_t) -F(x^i_t)
+ \beta \left[ V^i(S_{t+1}, x^{\sim i}_{t+1}, D_{t+1}) \right]
\end{align}
\end{frame}
%-------------------------------
\begin{frame}
\frametitle{Benefit Functions}
Possible benefit functions
\begin{itemize}
\item Linear (Currently working on this one)
\item Cournot Profits
\item Profits under Partial substitutability
\item Military capabilities (Keeping up with the Jones')
\end{itemize}
\end{frame}
%-------------------------------
\begin{frame}
\frametitle{Planner's Problem}
\begin{align}
W(S_t, D_t) =& \max_{X_t} \left[
\sum^N_{i=1} \left(u^i(S_t, D_t) - F(x^i_t) \right)
+ \beta \left[ W(S_{t+1}, D_{t+1}) \right]\right] \notag \\
&\text{subject to:} \notag \\
& s^i_{t+1} = (R^i(S_t, D_t)) s^i_t +x^i_t ~~~ \forall i \notag \\
& D_{t+1} = (1-\delta + g)D_t
+ \gamma \sum^N_{i=1} \left(1-R^i(\vec s_t, D_t)\right) s^i_t
+ \Gamma \sum^N_{i=1} x^i_t
\end{align}
\end{frame}
%-------------------------------
\begin{frame}
\frametitle{Planned model expansions}
\begin{itemize}
\item Multiple interacting orbital shells and debris terms.
\item Stochastic laws of motion
\item Multiple types of operators
\item Operators benefit functions include taxation
\end{itemize}
\end{frame}
%-------------------------------
%-------------------------------------------------------------------------------------
%%%%%%%%%%%%%%%%%%%% Developing the Model%%%%%%%%%%%%%%%%%%%%%%%%
\section{Analysis}
%-------------------------------------------------------------------------------------
\begin{frame}
\frametitle{Issues}
The following issues characterize the Operators' and Planner's problem
\begin{itemize}
\item Curse of Dimensionality
\item Strategic Interaction (operators only)
\end{itemize}
\end{frame}
%-------------------------------
\begin{frame}
\frametitle{Possible approaches}
Possible approaches
\begin{itemize}
\item Standard VFI/Howards algorithm.
\item VFI with sparse state space (dimensionality reduction).
\item Reinforcement Learning.
\item \cite{MALIAR2018} approaches using machine learning.
\end{itemize}
\end{frame}
%-------------------------------
\begin{frame}
\frametitle{Chosen approach}
% maliar et al - Bellman Residuals
% present basic approach (using my notation)
% Discuss basic training loop (use sutton and bartos term Generalized Policy Iteration)
Bellman Residual minimization due to \autocite{MALIAR2018}
Use NN to approximate $V(S_t,D_t|\theta_1)$ and $X(S_t,D_t|\theta_2)$.
The loss function is:
\begin{align}
0 =& \left[
V(S_t, D_t) - F(S_t, D_t, X_t)- \beta V(S_{t+1}, D_{t+1})
\right]^2 \notag\\
&- v \left[
F(S_t, D_t, X_t) + \beta V(S_{t+1}, D_{t+1})
\right] \\
0 =& \left[
V(S_t, D_t) - F(S_t, D_t, X_t)- \beta V(S_{t+1}, D_{t+1})
- \frac{v}{2}
\right]^2
- v \left[
V(S_{t}, D_{t}) + \frac{v}{4}
\right]
\end{align}
\end{frame}
%-------------------------------
\begin{frame}
\frametitle{Training Loop: Planner}
For each training epoch
\begin{enumerate}
\item Draw random data
\item train policy function
\item train value function
\end{enumerate}
\end{frame}
%-------------------------------
\begin{frame}
\frametitle{Training Loop: Operators}
For each training epoch
\begin{enumerate}
\item Draw random data
\item For each operator
\begin{enumerate}
\item train policy function
\item train value function
\end{enumerate}
\end{enumerate}
\end{frame}
%-------------------------------
%------------------------------------
\subsection{Analysis so far}
%------------------------------------
\begin{frame}
\frametitle{State of the Code}
Currently functioning
\begin{itemize}
\item Planner Value and Policy training
\end{itemize}
Almost functioning
\begin{itemize}
\item Operator Value and Policy training
\item Proto-Kessler Region analysis
\end{itemize}
%Planner training
%Operator training
\end{frame}
%-------------------------------
\begin{frame}
\frametitle{Results}
%simulated debris paths (increase the ammount of debris a couple orders of magnitude)
%Protokessler region plots?
Results are currently waiting on finishing the code.
\vspace{12pt}
Some analyses I plan on completing include
\begin{itemize}
\item Kessler Region analysis
\item Free Entry conditions analysis
\end{itemize}
\end{frame}
%-------------------------------
%-------------------------------------------------------------------------------------
%%%%%%%%%%%%%%%%%%%% Developing the Model%%%%%%%%%%%%%%%%%%%%%%%%
\section{Conclusion}
%-------------------------------------------------------------------------------------
%-------------------------------
\begin{frame}
\frametitle{Summary}
Summary
\begin{enumerate}
\item Created Dynamic model of the MDP facing satellite operators.
\item Defined new Kessler Regions for computational analysis.
\item Currently developing solution and simulation tools.
\item Much work left to do.
\end{enumerate}
\end{frame}
%-------------------------------
\begin{frame}
\frametitle{Other Areas Needing Work}
Related Orbits Work
\begin{enumerate}
\item Adding stochastic elements to the model.
\item Parameter Estimation.
\item Rights of Way.
\item Satellite Lifetimes and constellation management.
\end{enumerate}
Related computational work
\begin{enumerate}
\item Automating the Euler Equation Residuals method.
\end{enumerate}
\end{frame}
%-------------------------------
\begin{frame}
\frametitle{Questions?}
\center
Any remaining questions?
\end{frame}
%-------------------------------
\begin{frame}[allowframebreaks]
\frametitle{References}
\printbibliography
\end{frame}
%\begin{frame}
% \frametitle{MarginalRevenue}
% \begin{figure}
% \tikzfig{../Assets/owned/ch8_MarginalRevenue}
% \includegraphics[height=\textheight]{../Assets/copyrighted/KrugmanObsterfeldMeliz_fig8-7.jpg}
% \label{FIG:costs}
% \caption{Average Cost Curve as firms enter.}
% \end{figure}
%\end{frame}
\end{document}
% \begin{frame}
% \frametitle{Columns}
% \begin{columns}
% \begin{column}{0.5\textwidth}
% \end{column}
% \begin{column}{0.5\textwidth}
% \begin{figure}
% \tikzfig{../Assets/owned/ch7_EstablishedAdvantageExample2}
% \label{FIG:costs}
% \caption{Setting the Stage}
% \end{figure}
% \end{column}
% \end{columns}
% \end{frame}
% %---------------------------------------------------------------

@ -0,0 +1,42 @@
\usepackage{tikz}
\usetikzlibrary{backgrounds}
\usetikzlibrary{arrows}
\usetikzlibrary{shapes,shapes.geometric,shapes.misc}
% this style is applied by default to any tikzpicture included via \tikzfig
\tikzstyle{tikzfig}=[baseline=-0.25em,scale=0.5]
% these are dummy properties used by TikZiT, but ignored by LaTex
\pgfkeys{/tikz/tikzit fill/.initial=0}
\pgfkeys{/tikz/tikzit draw/.initial=0}
\pgfkeys{/tikz/tikzit shape/.initial=0}
\pgfkeys{/tikz/tikzit category/.initial=0}
% standard layers used in .tikz files
\pgfdeclarelayer{edgelayer}
\pgfdeclarelayer{nodelayer}
\pgfsetlayers{background,edgelayer,nodelayer,main}
% style for blank nodes
\tikzstyle{none}=[inner sep=0mm]
% include a .tikz file
\newcommand{\tikzfig}[1]{%
{\tikzstyle{every picture}=[tikzfig]
\IfFileExists{#1.tikz}
{\input{#1.tikz}}
{%
\IfFileExists{./figures/#1.tikz}
{\input{./figures/#1.tikz}}
{\tikz[baseline=-0.5em]{\node[draw=red,font=\color{red},fill=red!10!white] {\textit{#1}};}}%
}}%
}
% the same as \tikzfig, but in a {center} environment
\newcommand{\ctikzfig}[1]{%
\begin{center}\rm
\tikzfig{#1}
\end{center}}
% fix strange self-loops, which are PGF/TikZ default
\tikzstyle{every loop}=[]

@ -0,0 +1,27 @@
utline
Tell story
- Why do we care? Kessler syndrome.
- Why is space access needed? (satellite internet, GPS, military, hobbies, science, R&D, etc.)
- Why now? (cubesats, lower launch costs, massive expansion in who launches, etc.)
Present past models
-
-
-
Present Current model
-
-
-
Present work on solving models.
-
-
-
Future work

@ -0,0 +1,86 @@
# Intro
Why do we care
Possible stories:
sep 2019
2020 - 3 near misses with ISS (https://www.jpost.com/science/international-space-station-nearly-struck-by-chinese-satellite-debris-684809)
(https://orbitaldebris.jsc.nasa.gov/quarterly-news/pdfs/odqnv24i3.pdf)
May 2020 - Fregat tank breakup (left debris from 1000 to 6000 miles in altitude)
May 2021 - Canadarm2 got hit (https://www.asc-csa.gc.ca/eng/iss/news.asp, part way down)
Nov 2021 - Dodge debris from anti-satellite test in 2007 (China) (Jerusalem post above)
Nasa releases a
Monday Nov 15, 2021 - ISS astronauts have to shelter in their exit craft due to a debris cloud nearing the ISS. (https://www.space.com/space-debris-astronauts-shelter-november-2021) (https://www.youtube.com/watch?v=m-LIh0fdfq8)
- from youtube: limits the set of experiments they can work with.
Maybe explain the whole section.
Why should we care?
- All orbits are subject to some degree of polution.
- Common uses: GPS, Military Communications, Commercial internet and TV.
- Exploratory uses: R&D of pharmaceuticals, exploration.
- Collisions and debris damage are to some degree inevitable.
- Kessler Syndrome
What is different now:
- Launch costs (https://aerospace.csis.org/data/space-launch-to-low-earth-orbit-how-much-does-it-cost/) (https://fortune.com/2017/06/17/spacex-launch-cost-competition/)
- Cubesats/nanosats (numbers at https://www.nanosats.eu/)
- in short, accessability. With lower cost per mass to orbit, more reasons to go. With lower development costs, easier to build many small satellites. This gives us a need for urgency.
- Anti-Satellite missles (US, RU, CH all have capability)
- Starlink vs Kupiter vs OneWeb (UK gov)
# Present previous literature
Rao Rondina
- Major results: exploitation of common pool resource
Adilov et al
- Major results: Exploitation of common pool resource
Adilov et al
- Major results: Divergence between economic and non-economic kessler syndromes
# my model
- Kessler Syndrome work
- Model description
# Solution methods
- Issues
- High dimensionality
- Many Firms, Governments, and other organizations (How many different operators currently?)
- Interacting debris fields between orbits (see the Fregat breakup)
- Approximation is required
- state space discretization
- Not sure which states to examine
- fuctional approximation
- Maliar
- Reinforcemnt learning
- Choice of using NN approach
- Well supported on hardware
- Transfer learning
-
# Analysis
# major points
- Summaries of results so far.
- Request for suggestions on utility functions that might be worth investigating
- Discussion of goals
- Investigate pigouvian taxation, cleanup bonds, etc.
- Standardize interface so it is easy to estimate results.
-
- Discussion of other work that should happen
- Estimation of parameters (simulation, bayesian, calibration, best guesstimates, etc)
- Rights of way work (way to get operators to declare a no-move value?)
- Satellite Lifetime Management and it's impact on decision making. (nested overlapping generations?)
Other sources
Historical breakup events: https://www.orbitaldebris.jsc.nasa.gov/quarterly-news/pdfs/odqnv23i1.pdf
Breakups: https://www.orbitaldebris.jsc.nasa.gov/quarterly-news/pdfs/odqnv25i1.pdf
Newsletter on debris breakups: https://www.orbitaldebris.jsc.nasa.gov/quarterly-news/
remaining todo
- Review planned model expansion
- Reformulate bellman residual minimization to use Q and M
- Get code working, and run a basic analysis (proto kessler regions)

@ -0,0 +1,34 @@
# Things I have learned about PyTorch and Neural networks.
## Building models
All model building in Pytorch is based on the following three steps
1. start by creating an object that extends the nn.Module base class
1. define layers as class attributes (sequential wrapper for ease of use)
2. implement the `.forward()` method
Each layer is just a predefined 'function'.
Really, they are objects that extend the nn.Module base class.
Thus each NN can act as a layer in another NN.
For example, I reimplemented an upscaling layer in BasicNeuralNet2.
(I picked up a lot of this info here.)[https://deeplizard.com/learn/video/k4jY9L8H89U]
Also, neural networks can return more than just a single output as long as the
loss function that is used for optimization can consume both of them.
Thus I could write two separate neural networks (such as for launch and partials),
and then write a third NN that binds the two together.
## Notes on functions
ReLU is a linear rectifier, it does not have any training involved.
This makes it good for working as a final cleanup of the launch function.
This also makes it not so good for the partial derivatives.
Linear is a good but basic network type.
Upscaling allows you to create more features.
Downscaling reduces the number of features (by throwing data away?).
Instead of downscaling, use a linear function to change the dimensions.
# Remaining Questions
- How do you set it up to run over a set of variables, i.e. batches?

@ -214,7 +214,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.8"
"version": "3.9.2"
}
},
"nbformat": 4,

@ -2,7 +2,7 @@
"cells": [
{
"cell_type": "markdown",
"id": "prepared-nitrogen",
"id": "graduate-saying",
"metadata": {},
"source": [
"Note on pytorch. NN optimization acts imperitively/by side effect as follows.\n",
@ -23,291 +23,205 @@
{
"cell_type": "code",
"execution_count": 1,
"id": "grateful-conviction",
"id": "lasting-portable",
"metadata": {},
"outputs": [],
"source": [
"import torch"
"import torch\n",
"import combined as c"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "incorrect-animal",
"execution_count": 56,
"id": "marked-paris",
"metadata": {},
"outputs": [],
"source": [
"class DoubleNetwork(torch.nn.Module):\n",
" def __init__(self, input_size,output_size,layers_size):\n",
"\n",
"class LaunchFnEstimand(torch.nn.Module):\n",
" def __init__(self, state_tensor_size,layers_size,number_constellations):\n",
" \"\"\"\n",
" Description\n",
" \"\"\"\n",
" super().__init__()\n",
" self.number_constellations = number_constellations\n",
" self.layers_size = layers_size\n",
" self.state_tensor_size = state_tensor_size\n",
" \n",
" #So, this next section constructs different layers within the NN\n",
" #sinlge linear section\n",
" self.linear_step_1a = torch.nn.Linear(input_size,layers_size)\n",
" #Scale up the input from just the tensor of states to the layer_size X number_constellations\n",
" \n",
" #single linear section\n",
" self.linear_step_2a = torch.nn.Linear(layers_size,output_size)\n",
" self.linear_step_2b = torch.nn.Linear(layers_size,output_size)\n",
" #Increase to the layer size\n",
" self.linear_1 = torch.nn.Linear(in_features=state_tensor_size, out_features=layers_size)\n",
" self.relu = torch.nn.ReLU()\n",
" self.linear_3 = torch.nn.Linear(in_features=layers_size, out_features=layers_size)\n",
" self.linear_5 = torch.nn.Linear(in_features=layers_size, out_features=number_constellations)\n",
"\n",
" \n",
" def forward(self, input_values):\n",
" \n",
" intermediate_values_a = self.linear_step_1a(input_values)\n",
" \n",
" out_values_a = self.linear_step_2a(intermediate_values_a)\n",
" out_values_b = self.linear_step_2b(intermediate_values_a)\n",
" intermediate_values = self.relu(input_values) #states should be positive anyway.\n",
" intermediate_values = self.linear_1(intermediate_values)\n",
" intermediate_values = self.linear_3(intermediate_values)\n",
" intermediate_values = self.linear_5(intermediate_values)\n",
" intermediate_values = self.relu(intermediate_values) #launches are always positive\n",
" \n",
" return out_values_a,out_values_b"
" return intermediate_values"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "ruled-letter",
"execution_count": 61,
"id": "artificial-gilbert",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
" tensor(3.5646, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(11.7849, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(24.8772, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(5.4752, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.4457, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0925, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0490, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0290, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0178, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0111, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0070, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0045, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0029, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0019, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0012, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0008, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0005, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0003, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0002, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0001, grad_fn=<AddBackward0>)\n"
"tensor(0.7175, grad_fn=<AddBackward0>)\n",
"tensor(0.2107, grad_fn=<AddBackward0>)\n",
"tensor(0.0724, grad_fn=<AddBackward0>)\n",
"tensor(0.0259, grad_fn=<AddBackward0>)\n",
"tensor(0.0094, grad_fn=<AddBackward0>)\n",
"tensor(0.0034, grad_fn=<AddBackward0>)\n",
"tensor(0.0012, grad_fn=<AddBackward0>)\n",
"tensor(0.0004, grad_fn=<AddBackward0>)\n",
"tensor(0.0002, grad_fn=<AddBackward0>)\n",
"tensor(5.8468e-05, grad_fn=<AddBackward0>)\n"
]
},
{
"data": {
"text/plain": [
"tensor([[[0.0046, 0.0000]]], grad_fn=<ReluBackward0>)"
]
},
"execution_count": 61,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"model = DoubleNetwork(input_size = 5, output_size=5, layers_size=15)\n",
"\n",
"data_in = torch.tensor([1.5,2,3,4,5])\n",
"\n",
"data_in\n",
"\n",
"target = torch.zeros(5)\n",
"\n",
"def loss_fn2(output,target):\n",
" return sum((output[1] +output[0] - target)**2)\n",
" #could add a simplicity assumption i.e. l1 on parameters.\n",
"launch = LaunchFnEstimand(3,12,2)\n",
"\n",
"#Prep Optimizer\n",
"optimizer = torch.optim.SGD(model.parameters(),lr=0.01)\n",
"optimizer = torch.optim.SGD(launch.parameters(),lr=0.01)\n",
"\n",
"#get loss function\n",
"def loss_fn5(output):\n",
" return sum(sum(sum((output)**2)))\n",
"\n",
"for i in range(20):\n",
"for i in range(10):\n",
" #training loop\n",
" optimizer.zero_grad()\n",
"\n",
" output = model.forward(data_in)\n",
" output\n",
" output = launch.forward(test)\n",
"\n",
" l = loss_fn2(output, target)\n",
" l = loss_fn5(output)\n",
"\n",
" l.backward()\n",
"\n",
" optimizer.step()\n",
"\n",
" print(\"\\n\",l)"
" print(l)\n",
" \n",
"\n",
"launch.forward(test)"
]
},
{
"cell_type": "code",
"execution_count": 60,
"id": "political-manchester",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([[[0.0000, 0.9998]]], grad_fn=<ReluBackward0>)"
]
},
"execution_count": 60,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"launch(test)"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "quantitative-keeping",
"execution_count": 12,
"id": "delayed-bikini",
"metadata": {},
"outputs": [],
"source": [
"class SplitNetwork(torch.nn.Module):\n",
" def __init__(self, input_size,output_size_a,output_size_b,layers_size):\n",
"class EstimandNN(torch.nn.Module):\n",
" def __init__(self, state_tensor_size,layers_size,number_constellations):\n",
" super().__init__()\n",
" \n",
" #So, this next section constructs different layers within the NN\n",
" #sinlge linear section\n",
" self.linear_step_1 = torch.nn.Linear(input_size,layers_size)\n",
" self.linear_step_2 = torch.nn.Linear(layers_size,layers_size)\n",
" self.linear_step_3 = torch.nn.Linear(layers_size,layers_size)\n",
" self.linear_step_4 = torch.nn.Linear(layers_size,layers_size)\n",
" \n",
" #single linear section\n",
" self.linear_step_split_a = torch.nn.Linear(layers_size,output_size_a)\n",
" self.linear_step_split_b = torch.nn.Linear(layers_size,output_size_b)\n",
" self.partials_estimator = PartialDerivativesEstimand(state_tensor_size,layers_size,number_constellations) #TODO\n",
" self.launch_estimator = LaunchFnEstimand(state_tensor_size,layers_size,number_constellations)\n",
" \n",
" def forward(self, input_values):\n",
" partials = self.partials_estimator(input_values)\n",
" launch = self.launch_estimator(input_values)\n",
" \n",
" intermediate_values = self.linear_step_1(input_values)\n",
" intermediate_values = self.linear_step_2(intermediate_values)\n",
" intermediate_values = self.linear_step_3(intermediate_values)\n",
" intermediate_values = self.linear_step_4(intermediate_values)\n",
" \n",
" out_values_a = self.linear_step_split_a(intermediate_values)\n",
" out_values_b = self.linear_step_split_b(intermediate_values)\n",
" \n",
" return out_values_a,out_values_b"
" return c.EstimandInterface(partials,launch)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "vietnamese-prophet",
"execution_count": 13,
"id": "stable-edmonton",
"metadata": {},
"outputs": [],
"source": [
"model = SplitNetwork(input_size = 6, output_size_a=5, output_size_b=7, layers_size=15)\n",
"\n",
"data_in = torch.tensor([1.5,2,3,4,5,6])\n",
"\n",
"\n",
"target_a = torch.zeros(5)\n",
"target_b = torch.ones(7)\n",
"\n",
"def loss_fn3(output,target_a, target_b):\n",
" return sum((output[0] - target_a)**2) + sum((output[1] - target_b)**2)\n",
" #could add a simplicity assumption i.e. l1 on parameters."
"enn = EstimandNN(3,12,2)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "limiting-slide",
"execution_count": 15,
"id": "breeding-afghanistan",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
" tensor(9.6420, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(4.1914, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(5.1337, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(1.4943, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.5210, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.1217, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0605, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0256, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0126, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0057, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0028, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0013, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0006, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0003, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(0.0001, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(7.2050e-05, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(3.5139e-05, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(1.7068e-05, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(8.3342e-06, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(4.0624e-06, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(1.9857e-06, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(9.7029e-07, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(4.7492e-07, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(2.3232e-07, grad_fn=<AddBackward0>)\n",
"\n",
" tensor(1.1381e-07, grad_fn=<AddBackward0>)\n"
"Launch Decisions and Partial Derivativs of value function with\n",
"\tlaunches\n",
"\t\t tensor([[[0.0000, 0.0020]]], grad_fn=<ReluBackward0>)\n",
"\tPartials\n",
"\t\ttensor([[0.0000, 0.0000],\n",
" [1.7938, 1.7938],\n",
" [0.0000, 0.0000],\n",
" [2.8751, 2.8751],\n",
" [1.4894, 1.4894],\n",
" [1.4614, 1.4614],\n",
" [0.0000, 0.0000],\n",
" [2.9800, 2.9800],\n",
" [0.0000, 0.0000],\n",
" [0.0000, 0.0000],\n",
" [0.0000, 0.0000],\n",
" [0.0000, 0.0000]], grad_fn=<ReluBackward0>)\n"
]
}
],
"source": [
"#Prep Optimizer\n",
"optimizer = torch.optim.SGD(model.parameters(),lr=0.01)\n",
"\n",
"for i in range(25):\n",
" #training loop\n",
" optimizer.zero_grad()\n",
"\n",
" output = model.forward(data_in)\n",
" output\n",
"\n",
" l = loss_fn3(output, target_a, target_b)\n",
"\n",
" l.backward()\n",
"\n",
" optimizer.step()\n",
"\n",
" print(\"\\n\",l)"
"print(enn(test))"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "elder-karen",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(tensor([ 3.4232e-05, 3.7350e-05, 5.3748e-05, -2.7344e-05, -1.0052e-04],\n",
" grad_fn=<AddBackward0>),\n",
" tensor([1.0001, 1.0001, 1.0000, 1.0000, 1.0001, 1.0000, 1.0001],\n",
" grad_fn=<AddBackward0>))"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "agreed-community",
"id": "applicable-relay",
"metadata": {},
"outputs": [],
"source": []

@ -3,7 +3,7 @@
{
"cell_type": "code",
"execution_count": 1,
"id": "consolidated-separation",
"id": "indie-evolution",
"metadata": {
"tags": []
},
@ -74,7 +74,7 @@
},
{
"cell_type": "markdown",
"id": "numeric-coral",
"id": "stuffed-firmware",
"metadata": {},
"source": [
"# Setup Functions\n",
@ -84,7 +84,7 @@
{
"cell_type": "code",
"execution_count": 2,
"id": "detected-still",
"id": "mexican-serial",
"metadata": {},
"outputs": [],
"source": [
@ -131,7 +131,7 @@
},
{
"cell_type": "markdown",
"id": "fundamental-fusion",
"id": "public-alloy",
"metadata": {},
"source": [
"# functions related to transitions"
@ -140,9 +140,18 @@
{
"cell_type": "code",
"execution_count": 3,
"id": "palestinian-uganda",
"id": "advised-enemy",
"metadata": {},
"outputs": [],
"outputs": [
{
"ename": "SyntaxError",
"evalue": "invalid syntax (<ipython-input-3-2a8ca63b5912>, line 52)",
"output_type": "error",
"traceback": [
"\u001b[0;36m File \u001b[0;32m\"<ipython-input-3-2a8ca63b5912>\"\u001b[0;36m, line \u001b[0;32m52\u001b[0m\n\u001b[0;31m launch = neural_network.forward().\u001b[0m\n\u001b[0m ^\u001b[0m\n\u001b[0;31mSyntaxError\u001b[0m\u001b[0;31m:\u001b[0m invalid syntax\n"
]
}
],
"source": [
"def single_transition(laws_motion_fn, profit_fn, stocks, debris, neural_network):\n",
" \"\"\"\n",
@ -184,7 +193,7 @@
" #Includes rearranging the jacobian of profit.\n",
"\n",
" #Return the transitioned values\n",
" return ( A.inverse()/BETA ) @ T\n",
" return ( A.inverse() ) @ T\n",
"\n",
"\n",
"# This function wraps the single transition and handles updating dates etc.\n",
@ -214,7 +223,7 @@
},
{
"cell_type": "markdown",
"id": "mexican-illness",
"id": "suspected-clerk",
"metadata": {},
"source": [
"## Setup functions related to the problem"
@ -222,8 +231,8 @@
},
{
"cell_type": "code",
"execution_count": 4,
"id": "republican-designer",
"execution_count": null,
"id": "confused-conclusion",
"metadata": {
"tags": []
},
@ -269,8 +278,8 @@
},
{
"cell_type": "code",
"execution_count": 5,
"id": "introductory-forwarding",
"execution_count": null,
"id": "miniature-thread",
"metadata": {},
"outputs": [],
"source": [
@ -291,7 +300,7 @@
},
{
"cell_type": "markdown",
"id": "concrete-movement",
"id": "yellow-frank",
"metadata": {},
"source": [
"# Actual calculations"
@ -299,21 +308,10 @@
},
{
"cell_type": "code",
"execution_count": 6,
"id": "wrong-values",
"execution_count": null,
"id": "enormous-provider",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(3, 11)"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"outputs": [],
"source": [
"#number of states\n",
"N = 5\n",
@ -356,8 +354,8 @@
},
{
"cell_type": "code",
"execution_count": 7,
"id": "charitable-cleanup",
"execution_count": null,
"id": "biblical-blake",
"metadata": {},
"outputs": [],
"source": [
@ -368,27 +366,10 @@
},
{
"cell_type": "code",
"execution_count": 8,
"id": "floppy-arkansas",
"execution_count": null,
"id": "given-clearance",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Launch Decisions and Partial Derivativs of value function with\n",
"\t states\n",
"\t\t tensor([1., 1., 1., 1., 1.], requires_grad=True)\n",
"\tPartials\n",
"\t\ttensor([[1., 1., 1., 1., 1., 1.],\n",
" [1., 1., 1., 1., 1., 1.],\n",
" [1., 1., 1., 1., 1., 1.],\n",
" [1., 1., 1., 1., 1., 1.],\n",
" [1., 1., 1., 1., 1., 1.],\n",
" [1., 1., 1., 1., 1., 1.]], requires_grad=True)\n"
]
}
],
"outputs": [],
"source": [
"m = ModelMockup()\n",
"print(m.forward(stocks,debris))"
@ -396,7 +377,7 @@
},
{
"cell_type": "markdown",
"id": "dressed-preparation",
"id": "higher-windsor",
"metadata": {},
"source": [
"# Optimatility conditions"
@ -404,8 +385,8 @@
},
{
"cell_type": "code",
"execution_count": 9,
"id": "hydraulic-powder",
"execution_count": null,
"id": "breeding-sussex",
"metadata": {},
"outputs": [],
"source": [
@ -449,7 +430,7 @@
},
{
"cell_type": "markdown",
"id": "provincial-medline",
"id": "actual-polish",
"metadata": {},
"source": [
"## Now to set up the recursive set of optimatliy conditions"
@ -457,23 +438,10 @@
},
{
"cell_type": "code",
"execution_count": 10,
"id": "monetary-bermuda",
"execution_count": null,
"id": "thrown-subject",
"metadata": {},
"outputs": [
{
"ename": "TypeError",
"evalue": "'ModelMockup' object is not callable",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------\u001b[0m",
"\u001b[0;31mTypeError\u001b[0mTraceback (most recent call last)",
"\u001b[0;32m<ipython-input-10-282ba729dd5a>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0mbase_data\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mstocks\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mdebris\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mprofit\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlaws_of_motion\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mones\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m6\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mrequires_grad\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mTrue\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mlaunches\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 2\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mf\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mcompose_recursive_functions\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtransition_wrapper\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;36m3\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 3\u001b[0;31m \u001b[0mresult\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mf\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mbase_data\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 4\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0;31m#unpack results\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;32m<ipython-input-3-fcc1e6d7dbd7>\u001b[0m in \u001b[0;36mtransition_wrapper\u001b[0;34m(data_in)\u001b[0m\n\u001b[1;32m 48\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 49\u001b[0m \u001b[0;31m#Calculate new states\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 50\u001b[0;31m \u001b[0mnew_stocks\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnew_debris\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mlaws_motion_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstocks\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mdebris\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlaunch_fn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mstocks\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0mdebris\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 51\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 52\u001b[0m \u001b[0;31m#WARNING: RECURSION: You may break your head...\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
"\u001b[0;31mTypeError\u001b[0m: 'ModelMockup' object is not callable"
]
}
],
"outputs": [],
"source": [
"def recursive_optimality(base_data,transition_wrapper):\n",
" #create and return a set of transition wrappers\n",
@ -495,7 +463,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "imported-richards",
"id": "strange-appliance",
"metadata": {},
"outputs": [],
"source": [
@ -506,7 +474,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "excited-question",
"id": "friendly-acrobat",
"metadata": {},
"outputs": [],
"source": [
@ -516,14 +484,14 @@
{
"cell_type": "code",
"execution_count": null,
"id": "outer-wages",
"id": "patient-builder",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"id": "chronic-drilling",
"id": "referenced-defense",
"metadata": {},
"source": [
"Notes so far\n",
@ -548,7 +516,7 @@
{
"cell_type": "code",
"execution_count": null,
"id": "necessary-incident",
"id": "incorrect-carol",
"metadata": {},
"outputs": [],
"source": []

@ -0,0 +1,208 @@
import torch
from torch.autograd.functional import jacobian
import itertools
import math
import abc
class EstimandInterface():
"""
This defines a clean interface for working with the estimand (i.e. thing we are trying to estimate).
In general, we are trying to estimate the choice variables and the partial derivatives of the value functions.
This
This class wraps output for the neural network (or other estimand), allowing me to
- easily substitute various types of launch functions by having a common interface
- this eases testing
- check dimensionality etc without dealing with randomness
- again, easing testing
- reason more cleanly about the component pieces
- easing programming
- provide a clean interface to find constellation level launch decisions etc.
It takes inputs of two general categories:
- the choice function results
- the partial derivatives of the value function
"""
def __init__(self, partials, choices, deorbits=None):
self.partials = partials
self.choices = choices
@property
def number_constellations(self):
pass #fix this
return self.choices.shape[-1]
@property
def number_states(self):
pass #fix this
return self.partials.shape[-1] #This depends on the debris trackers technically.
def choice_single(self, constellation):
#returns the launch decision for the constellation of interest
filter_tensor = torch.zeros(self.number_constellations)
filter_tensor[constellation] = 1.0
return self.choices @ filter_tensor
def choice_vector(self, constellation):
#returns the launch decision for the constellation of interest as a vector
filter_tensor = torch.zeros(self.number_constellations)
filter_tensor[constellation] = 1.0
return self.choices * filter_tensor
def partial_vector(self, constellation):
#returns the partials of the value function corresponding to the constellation of interest
filter_tensor = torch.zeros(self.number_states)
filter_tensor[constellation] = 1.0
return self.partials @ filter_tensor
def partial_matrix(self, constellation):
#returns the partials of the value function corresponding to
#the constellation of interest as a matrix
filter_tensor = torch.zeros(self.number_states)
filter_tensor[constellation] = 1.0
return self.partials * filter_tensor
def __str__(self):
#just a human readable descriptor
return "Launch Decisions and Partial Derivativs of value function with\n\tlaunches\n\t\t {}\n\tPartials\n\t\t{}".format(self.choices,self.partials)
class ChoiceFunction(torch.nn.Module):
"""
This is used to estimate the launch function
"""
def __init__(self
,batch_size
,number_states
,number_choices
,number_constellations
,layer_size=12
):
super().__init__()
#preprocess
self.preprocess = torch.nn.Linear(in_features=number_states, out_features=layer_size)
#upsample
self.upsample = lambda x: torch.nn.Upsample(scale_factor=number_constellations)(x).view(batch_size
,number_constellations
,layer_size)
self.relu = torch.nn.ReLU() #used for coersion to the state space we care about.
#sequential steps
self.sequential = torch.nn.Sequential(
torch.nn.Linear(in_features=layer_size, out_features=layer_size)
#who knows if a convolution might help here.
,torch.nn.Linear(in_features=layer_size, out_features=layer_size)
,torch.nn.Linear(in_features=layer_size, out_features=layer_size)
)
#reduce the feature axis to match expected results
self.feature_reduction = torch.nn.Linear(in_features=layer_size, out_features=number_choices)
def forward(self, input_values):
intermediate_values = self.relu(input_values) #states should be positive anyway.
intermediate_values = self.preprocess(intermediate_values)
intermediate_values = self.upsample(intermediate_values)
intermediate_values = self.sequential(intermediate_values)
intermediate_values = self.feature_reduction(intermediate_values)
intermediate_values = self.relu(intermediate_values) #launches are always positive, this may need removed for other types of choices.
return intermediate_values
class PartialDerivativesOfValueEstimand(torch.nn.Module):
"""
This is used to estimate the partial derivatives of the value functions
"""
def __init__(self
,batch_size
,number_constellations
,number_states
,layer_size=12):
super().__init__()
self.batch_size = batch_size #used for upscaling
self.number_constellations = number_constellations
self.number_states = number_states
self.layer_size = layer_size
#preprocess (single linear layer in case there is anything that needs to happen to all states)
self.preprocess = torch.nn.Sequential(
torch.nn.ReLU() #cleanup as states must be positive
,torch.nn.Linear(in_features = self.number_states, out_features=self.number_states)
)
#upsample to get the basic dimensionality correct. From (batch,State) to (batch, constellation, state). Includes a reshape
self.upsample = lambda x: torch.nn.Upsample(scale_factor=self.number_constellations)(x).view(self.batch_size
,self.number_constellations
,self.number_states)
#sequential steps
self.sequential = torch.nn.Sequential(
torch.nn.Linear(in_features=number_states, out_features=layer_size)
#who knows if a convolution or other layer type might help here.
,torch.nn.Linear(in_features=layer_size, out_features=layer_size)
,torch.nn.Linear(in_features=layer_size, out_features=layer_size)
)
#reduce the feature axis to match expected results
self.feature_reduction = torch.nn.Linear(in_features=layer_size, out_features=number_states)
def forward(self, states):
#Note that the input values are just going to be the state variables
#TODO:check that input values match the prepared dimension?
#preprocess
intermediate = self.preprocess(states)
#upscale the input values
intermediate = self.upsample(intermediate)
#intermediate processing
intermediate = self.sequential(intermediate)
#reduce feature axis to match the expected number of partials
intermediate = self.feature_reduction(intermediate)
return intermediate
class EstimandNN(torch.nn.Module):
"""
This neural network takes the current states as input values and returns both
the partial derivatives of the value function and the launch function.
"""
def __init__(self
,batch_size
,number_states
,number_choices
,number_constellations
,layer_size=12
):
super().__init__()
self.partials_estimator = PartialDerivativesOfValueEstimand(batch_size, number_constellations, number_states, layer_size)
self.launch_estimator = ChoiceFunction(batch_size, number_states, number_choices, number_constellations, layer_size)
def forward(self, input_values):
pass
partials = self.partials_estimator(input_values)
launch = self.launch_estimator(input_values)
return EstimandInterface(partials,launch)

@ -0,0 +1,203 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "working-peeing",
"metadata": {},
"outputs": [],
"source": [
"import torch"
]
},
{
"cell_type": "markdown",
"id": "decimal-boundary",
"metadata": {},
"source": [
"The purpose of this notebook is to allow me to investigate proper shaping of inputs.\n",
"\n",
"Typically pytorch chooses a tensor specification\n",
"$$\n",
"(N, .*)\n",
"$$\n",
"where $N$ is the batch size.\n",
"For example a Convolutional NN layer expects\n",
"$$\n",
" NCHW\n",
"$$\n",
"for BatchSize,ChannelSize,Height,Width.\n",
"On the other hand, Linear expects\n",
"$$\n",
" N.*H\n",
"$$\n",
"for BatchSize,any number of other dimensions, in_features\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 15,
"id": "eligible-isolation",
"metadata": {},
"outputs": [],
"source": [
"class PartialDerivativesEstimand(torch.nn.Module):\n",
" def __init__(self,batch_size, number_constellations, number_states,scale_factor=4, layer_size=12):\n",
" \"\"\"\n",
" \n",
" \"\"\"\n",
" super().__init__()\n",
" self.batch_size = batch_size\n",
" self.number_constellations = number_constellations\n",
" self.number_states = number_states\n",
" self.scale_factor = scale_factor\n",
" self.layer_size = layer_size\n",
" \n",
" \n",
" #preprocess (single linear layer in case there is anything that needs to happen to all states)\n",
" self.preprocess = torch.nn.Sequential(\n",
" torch.nn.ReLU() #cleanup as states must be positive\n",
" ,torch.nn.Linear(in_features = self.number_states, out_features=self.number_states)\n",
" )\n",
" #upscale to get the basic dimensionality correct. From (batch,State) to (batch, constellation, state). Includes a reshape\n",
" self.upsample = lambda x: torch.nn.Upsample(scale_factor=self.number_constellations)(x).view(self.batch_size\n",
" ,self.number_constellations\n",
" ,self.number_states)\n",
" \n",
" #sequential steps\n",
" self.sequential = torch.nn.Sequential(\n",
" torch.nn.Linear(in_features=number_states, out_features=layer_size)\n",
" #who knows if a convolution might help here.\n",
" ,torch.nn.Linear(in_features=layer_size, out_features=layer_size)\n",
" ,torch.nn.Linear(in_features=layer_size, out_features=layer_size)\n",
" )\n",
"\n",
" #reduce axis to match expectation\n",
" self.feature_reduction = torch.nn.Linear(in_features=layer_size, out_features=number_states)\n",
" \n",
" def forward(self, input_values):\n",
" #Note that the input values are just going to be the state variables\n",
" #TODO:check that input values match the prepared dimension?\n",
" \n",
" #preprocess\n",
" intermediate = self.preprocess(input_values)\n",
" \n",
" #upscale the input values\n",
" intermediate = self.upsample(intermediate)\n",
" \n",
" #intermediate processing\n",
" intermediate = self.sequential(intermediate)\n",
" \n",
" #reduce feature axis to match the expected number of partials\n",
" intermediate = self.feature_reduction(intermediate)\n",
" \n",
" \n",
" return intermediate"
]
},
{
"cell_type": "code",
"execution_count": 19,
"id": "literary-desktop",
"metadata": {},
"outputs": [],
"source": [
"batch_size = 2\n",
"constellations = 2\n",
"number_states = constellations+1\n",
"\n",
"#initialize the NN\n",
"a = PartialDerivativesEstimand(batch_size,constellations,number_states,scale_factor=2)\n",
"\n",
"#example state\n",
"s = torch.rand(size=(batch_size,1,number_states))"
]
},
{
"cell_type": "code",
"execution_count": 22,
"id": "second-graduation",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([[[0.9283, 0.9414, 0.3426]],\n",
"\n",
" [[0.1902, 0.0369, 0.4699]]])"
]
},
"execution_count": 22,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"s"
]
},
{
"cell_type": "code",
"execution_count": 23,
"id": "reliable-alberta",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([[[-0.1991, 0.1335, 0.2821],\n",
" [-0.3549, 0.0213, 0.2322]],\n",
"\n",
" [[-0.1701, 0.1557, 0.2954],\n",
" [-0.3017, 0.0690, 0.2419]]], grad_fn=<AddBackward0>)"
]
},
"execution_count": 23,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"a(s)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "horizontal-judges",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "instant-lindsay",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.8"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -1,4 +1,5 @@
# COMPUTATIONAL TODO
***MOVE EVERYTHING HERE OVER TO ISSUES IN THE GITHUB TRACKER***
## Completed steps
- implement 'launch function as a function' portion
@ -17,7 +18,11 @@
- get a basic gradient descent/optimization of launch function working.
- add satellite deorbit to model.
- turn this into a framework in a module, not just a single notebook (long term goal)
- turn testing_combined into an actual test setup
- change prints to assertions
- turn into functions
- add into a testing framework
- this isn't that important.
## CONCERNS
So I need to think about how to handle the launch functions.
Currently, my launch function takes in the stocks and debris levels and returns a launch decision for each constellation.

@ -0,0 +1,793 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "french-experiment",
"metadata": {},
"outputs": [],
"source": [
"import torch\n",
"from torch.autograd.functional import jacobian\n",
"import itertools\n",
"import math\n",
"import abc\n",
"\n",
"class EconomicAgent(metaclass=abc.ABCMeta):\n",
" @abc.abstractmethod\n",
" def period_benefit(self,state,estimand_interface):\n",
" pass\n",
" @abc.abstractmethod\n",
" def _period_benefit(self):\n",
" pass\n",
" @abc.abstractmethod\n",
" def period_benefit_jacobian_wrt_states(self):\n",
" pass\n",
" @abc.abstractmethod\n",
" def _period_benefit_jacobian_wrt_states(self):\n",
" pass\n",
" @abc.abstractmethod\n",
" def period_benefit_jacobian_wrt_launches(self):\n",
" pass\n",
" @abc.abstractmethod\n",
" def _period_benefit_jacobian_wrt_launches(self):\n",
" pass\n",
"\n",
"class LinearProfit(EconomicAgent):\n",
" \"\"\"\n",
" The simplest type of profit function available.\n",
" \"\"\"\n",
" def __init__(self, constellation_number, discount_factor, benefit_weight, launch_cost, deorbit_cost=0):\n",
" #track which constellation this is.\n",
" self.constellation_number = constellation_number\n",
"\n",
" #parameters describing the agent's situation\n",
" self.discount_factor = discount_factor\n",
" self.benefit_weights = benefit_weight\n",
" self.launch_cost = launch_cost\n",
" self.deorbit_cost = deorbit_cost\n",
"\n",
" def __str__(self):\n",
" return \"LinearProfit\\n Benefit weights:\\t{}\\n launch cost:\\t{}\\n Deorbit cost:\\t{}\".format(self.benefit_weights, self.launch_cost, self.deorbit_cost)\n",
"\n",
" def period_benefit(self,state,estimand_interface):\n",
" return self._period_benefit(state.stocks, state.debris, estimand_interface.choices)\n",
" \n",
" def _period_benefit(self,stocks,debris,choice):\n",
" profits = self.benefit_weights @ stocks \\\n",
" - self.launch_cost * choice[self.constellation_number] #\\ \n",
" #- deorbit_cost @ deorbits[self.constellation_number]\n",
" return profits\n",
"\n",
" def period_benefit_jacobian_wrt_states(self, states, estimand_interface):\n",
" return self._period_benefit_jacobian_wrt_states(states.stocks, states.debris, estimand_interface.choices)\n",
"\n",
" def _period_benefit_jacobian_wrt_states(self, stocks, debris, launches):\n",
" jac = jacobian(self._period_benefit, (stocks,debris,launches))\n",
" return torch.cat((jac[0], jac[1]))\n",
" \n",
" def period_benefit_jacobian_wrt_launches(self, states, estimand_interface):\n",
" return self._period_benefit_jacobian_wrt_launches(states.stocks, states.debris, estimand_interface.choices)\n",
"\n",
" def _period_benefit_jacobian_wrt_launches(self,stocks,debris,launches):\n",
" jac = jacobian(self._period_benefit, (stocks,debris,launches))\n",
" return jac[2]\n",
"\n",
"class States():\n",
" \"\"\"\n",
" This is supposed to capture the state variables of the model, to create a common interface \n",
" when passing between functions.\n",
" \"\"\"\n",
" def __init__(self, stocks,debris):\n",
" self.stocks = stocks\n",
" self.debris = debris\n",
" \n",
"\n",
" def __str__(self):\n",
" return \"stocks\\t{} \\ndebris\\t {}\".format(self.stocks,self.debris)\n",
"\n",
" @property\n",
" def number_constellations(self):\n",
" return len(self.stocks)\n",
" @property\n",
" def number_debris_trackers(self):\n",
" return len(self.debris)\n",
"\n",
" \n",
"class EstimandInterface():\n",
" \"\"\"\n",
" This defines a clean interface for working with the estimand (i.e. thing we are trying to estimate).\n",
" In general, we are trying to estimate the choice variables and the partial derivatives of the value functions.\n",
" This \n",
"\n",
" This class wraps output for the neural network (or other estimand), allowing me to \n",
" - easily substitute various types of launch functions by having a common interface\n",
" - this eases testing\n",
" - check dimensionality etc without dealing with randomness\n",
" - again, easing testing\n",
" - reason more cleanly about the component pieces\n",
" - easing programming\n",
" - provide a clean interface to find constellation level launch decisions etc.\n",
"\n",
" It takes inputs of two general categories:\n",
" - the choice function results\n",
" - the partial derivatives of the value function\n",
" \"\"\"\n",
" def __init__(self, partials, choices, deorbits=None):\n",
" self.partials = partials\n",
" self.choices = choices\n",
" \n",
" @property\n",
" def number_constellations(self):\n",
" pass #fix this\n",
" return self.choices.shape[-1]\n",
" @property\n",
" def number_states(self):\n",
" pass #fix this\n",
" return self.partials.shape[-1] #This depends on the debris trackers technically.\n",
"\n",
" def choice_single(self, constellation):\n",
" #returns the launch decision for the constellation of interest\n",
" \n",
" filter_tensor = torch.zeros(self.number_constellations)\n",
" filter_tensor[constellation] = 1.0\n",
" \n",
" return self.choices @ filter_tensor\n",
" \n",
" def choice_vector(self, constellation):\n",
" #returns the launch decision for the constellation of interest as a vector\n",
" \n",
" filter_tensor = torch.zeros(self.number_constellations)\n",
" filter_tensor[constellation] = 1.0\n",
" \n",
" return self.choices * filter_tensor\n",
" \n",
" def partial_vector(self, constellation):\n",
" #returns the partials of the value function corresponding to the constellation of interest\n",
" \n",
" filter_tensor = torch.zeros(self.number_states)\n",
" filter_tensor[constellation] = 1.0\n",
" \n",
" return self.partials @ filter_tensor\n",
" \n",
" def partial_matrix(self, constellation):\n",
" #returns the partials of the value function corresponding to \n",
" #the constellation of interest as a matrix\n",
" \n",
" filter_tensor = torch.zeros(self.number_states)\n",
" filter_tensor[constellation] = 1.0\n",
" \n",
" return self.partials * filter_tensor\n",
" \n",
" def __str__(self):\n",
" #just a human readable descriptor\n",
" return \"Launch Decisions and Partial Derivativs of value function with\\n\\tlaunches\\n\\t\\t {}\\n\\tPartials\\n\\t\\t{}\".format(self.choices,self.partials)\n",
"\n",
"\n",
"class ChoiceFunction(torch.nn.Module):\n",
" \"\"\"\n",
" This is used to estimate the launch function\n",
" \"\"\"\n",
" def __init__(self\n",
" ,batch_size\n",
" ,number_states\n",
" ,number_choices\n",
" ,number_constellations\n",
" ,layer_size=12\n",
" ):\n",
" super().__init__()\n",
" \n",
" #preprocess\n",
" self.preprocess = torch.nn.Linear(in_features=number_states, out_features=layer_size)\n",
" \n",
" #upsample\n",
" self.upsample = lambda x: torch.nn.Upsample(scale_factor=number_constellations)(x).view(batch_size\n",
" ,number_constellations\n",
" ,layer_size)\n",
" \n",
" self.relu = torch.nn.ReLU() #used for coersion to the state space we care about.\n",
" \n",
" \n",
" #sequential steps\n",
" self.sequential = torch.nn.Sequential(\n",
" torch.nn.Linear(in_features=layer_size, out_features=layer_size)\n",
" #who knows if a convolution might help here.\n",
" ,torch.nn.Linear(in_features=layer_size, out_features=layer_size)\n",
" ,torch.nn.Linear(in_features=layer_size, out_features=layer_size)\n",
" )\n",
"\n",
" #reduce the feature axis to match expected results\n",
" self.feature_reduction = torch.nn.Linear(in_features=layer_size, out_features=number_choices)\n",
"\n",
" \n",
" def forward(self, input_values):\n",
" \n",
" intermediate_values = self.relu(input_values) #states should be positive anyway.\n",
" \n",
" intermediate_values = self.preprocess(intermediate_values)\n",
" intermediate_values = self.upsample(intermediate_values)\n",
" intermediate_values = self.sequential(intermediate_values)\n",
" intermediate_values = self.feature_reduction(intermediate_values)\n",
" \n",
" intermediate_values = self.relu(intermediate_values) #launches are always positive, this may need removed for other types of choices.\n",
" \n",
" return intermediate_values\n",
"\n",
"class PartialDerivativesOfValueEstimand(torch.nn.Module):\n",
" \"\"\"\n",
" This is used to estimate the partial derivatives of the value functions\n",
" \"\"\"\n",
" def __init__(self\n",
" ,batch_size\n",
" , number_constellations\n",
" , number_states\n",
" , layer_size=12):\n",
" super().__init__()\n",
" self.batch_size = batch_size #used for upscaling\n",
" self.number_constellations = number_constellations\n",
" self.number_states = number_states\n",
" self.layer_size = layer_size\n",
" \n",
" \n",
" #preprocess (single linear layer in case there is anything that needs to happen to all states)\n",
" self.preprocess = torch.nn.Sequential(\n",
" torch.nn.ReLU() #cleanup as states must be positive\n",
" ,torch.nn.Linear(in_features = self.number_states, out_features=self.number_states)\n",
" )\n",
" \n",
" #upsample to get the basic dimensionality correct. From (batch,State) to (batch, constellation, state). Includes a reshape\n",
" self.upsample = lambda x: torch.nn.Upsample(scale_factor=self.number_constellations)(x).view(self.batch_size\n",
" ,self.number_constellations\n",
" ,self.number_states)\n",
" \n",
" #sequential steps\n",
" self.sequential = torch.nn.Sequential(\n",
" torch.nn.Linear(in_features=number_states, out_features=layer_size)\n",
" #who knows if a convolution or other layer type might help here.\n",
" ,torch.nn.Linear(in_features=layer_size, out_features=layer_size)\n",
" ,torch.nn.Linear(in_features=layer_size, out_features=layer_size)\n",
" )\n",
"\n",
" #reduce the feature axis to match expected results\n",
" self.feature_reduction = torch.nn.Linear(in_features=layer_size, out_features=number_states)\n",
" \n",
" def forward(self, states):\n",
" #Note that the input values are just going to be the state variables\n",
" #TODO:check that input values match the prepared dimension?\n",
" \n",
" #preprocess\n",
" intermediate = self.preprocess(states)\n",
" \n",
" #upscale the input values\n",
" intermediate = self.upsample(intermediate)\n",
" \n",
" #intermediate processing\n",
" intermediate = self.sequential(intermediate)\n",
" \n",
" #reduce feature axis to match the expected number of partials\n",
" intermediate = self.feature_reduction(intermediate)\n",
" \n",
" \n",
" return intermediate\n",
" "
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "suited-nothing",
"metadata": {},
"outputs": [],
"source": [
"class EstimandNN(torch.nn.Module):\n",
" \"\"\"\n",
" This neural network takes the current states as input values and returns both\n",
" the partial derivatives of the value function and the launch function.\n",
" \"\"\"\n",
" def __init__(self\n",
" ,batch_size\n",
" ,number_states\n",
" ,number_choices\n",
" ,number_constellations\n",
" ,layer_size=12\n",
" ):\n",
" super().__init__()\n",
" \n",
"\n",
" self.partials_estimator = PartialDerivativesOfValueEstimand(batch_size, number_constellations, number_states, layer_size)\n",
" self.launch_estimator = ChoiceFunction(batch_size, number_states, number_choices, number_constellations, layer_size)\n",
" \n",
" def forward(self, input_values):\n",
" pass\n",
" partials = self.partials_estimator(input_values)\n",
" launch = self.launch_estimator(input_values)\n",
" \n",
" return EstimandInterface(partials,launch)"
]
},
{
"cell_type": "markdown",
"id": "recognized-story",
"metadata": {},
"source": [
"# Testing\n",
"\n",
"Test if states can handle the dimensionality needed."
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "smart-association",
"metadata": {},
"outputs": [],
"source": [
"batch_size,states,choices = 5,3,1\n",
"constellations = states -1 #determined by debris tracking\n",
"max_start_state = 100\n",
"\n",
"stocks_and_debris = torch.randint(max_start_state,(batch_size,1,states),dtype=torch.float32)"
]
},
{
"cell_type": "code",
"execution_count": 84,
"id": "unsigned-hungary",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"torch.Size([5, 1, 3])"
]
},
"execution_count": 84,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"stocks_and_debris.size()"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "regulated-conversation",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Launch Decisions and Partial Derivativs of value function with\n",
"\tlaunches\n",
"\t\t tensor([[[0.0000],\n",
" [0.0000]],\n",
"\n",
" [[2.0907],\n",
" [0.1053]],\n",
"\n",
" [[2.9730],\n",
" [2.2000]],\n",
"\n",
" [[2.3975],\n",
" [1.2877]],\n",
"\n",
" [[4.2107],\n",
" [2.0752]]], grad_fn=<ReluBackward0>)\n",
"\tPartials\n",
"\t\ttensor([[[ 0.1939, 0.3954, 0.0730],\n",
" [-0.9428, 0.6145, -0.9247]],\n",
"\n",
" [[ 1.1686, 3.0170, 0.3393],\n",
" [-7.1474, 2.3495, -7.0566]],\n",
"\n",
" [[-2.0849, 3.0883, -3.3791],\n",
" [-0.6664, 0.0361, -2.2530]],\n",
"\n",
" [[-0.7117, 2.5474, -1.6458],\n",
" [-2.1937, 0.6897, -3.0382]],\n",
"\n",
" [[-1.0262, 4.5973, -2.6606],\n",
" [-5.4307, 1.4510, -6.6972]]], grad_fn=<AddBackward0>)\n"
]
}
],
"source": [
"print(a := enn.forward(stocks_and_debris))"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "rental-detection",
"metadata": {},
"outputs": [],
"source": [
"def lossb(a):\n",
" #test loss function\n",
" return (a**2).sum()"
]
},
{
"cell_type": "code",
"execution_count": 30,
"id": "mechanical-joshua",
"metadata": {},
"outputs": [],
"source": [
"ch = ChoiceFunction(batch_size\n",
" ,states\n",
" ,choices\n",
" ,constellations\n",
" ,12)"
]
},
{
"cell_type": "code",
"execution_count": 31,
"id": "charged-request",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"tensor(46.8100, grad_fn=<SumBackward0>)\n",
"tensor(82442.4219, grad_fn=<SumBackward0>)\n",
"tensor(0., grad_fn=<SumBackward0>)\n",
"tensor(0., grad_fn=<SumBackward0>)\n",
"tensor(0., grad_fn=<SumBackward0>)\n",
"tensor(0., grad_fn=<SumBackward0>)\n",
"tensor(0., grad_fn=<SumBackward0>)\n",
"tensor(0., grad_fn=<SumBackward0>)\n",
"tensor(0., grad_fn=<SumBackward0>)\n",
"tensor(0., grad_fn=<SumBackward0>)\n"
]
},
{
"data": {
"text/plain": [
"tensor([[[0.],\n",
" [0.]],\n",
"\n",
" [[0.],\n",
" [0.]],\n",
"\n",
" [[0.],\n",
" [0.]],\n",
"\n",
" [[0.],\n",
" [0.]],\n",
"\n",
" [[0.],\n",
" [0.]]], grad_fn=<ReluBackward0>)"
]
},
"execution_count": 31,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"optimizer = torch.optim.SGD(ch.parameters(),lr=0.01)\n",
"\n",
"for i in range(10):\n",
" #training loop\n",
" optimizer.zero_grad()\n",
"\n",
" output = ch.forward(stocks_and_debris)\n",
"\n",
" l = lossb(output)\n",
"\n",
" l.backward()\n",
"\n",
" optimizer.step()\n",
"\n",
" print(l)\n",
" \n",
"\n",
"ch.forward(stocks_and_debris)"
]
},
{
"cell_type": "code",
"execution_count": 45,
"id": "perceived-permit",
"metadata": {},
"outputs": [],
"source": [
"def lossc(a):\n",
" #test loss function\n",
" return (a**2).sum()"
]
},
{
"cell_type": "code",
"execution_count": 53,
"id": "atomic-variance",
"metadata": {},
"outputs": [],
"source": [
"pd = PartialDerivativesOfValueEstimand(\n",
" batch_size\n",
" ,constellations\n",
" ,states\n",
" ,12)"
]
},
{
"cell_type": "code",
"execution_count": 74,
"id": "biological-badge",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"tensor(1.9948e-06, grad_fn=<SumBackward0>)\n",
"tensor(1.7427e-05, grad_fn=<SumBackward0>)\n",
"tensor(5.7993e-06, grad_fn=<SumBackward0>)\n",
"tensor(2.9985e-06, grad_fn=<SumBackward0>)\n",
"tensor(6.5281e-06, grad_fn=<SumBackward0>)\n",
"tensor(7.8818e-06, grad_fn=<SumBackward0>)\n",
"tensor(4.4327e-06, grad_fn=<SumBackward0>)\n",
"tensor(1.1240e-06, grad_fn=<SumBackward0>)\n",
"tensor(1.2478e-06, grad_fn=<SumBackward0>)\n",
"tensor(3.5818e-06, grad_fn=<SumBackward0>)\n",
"tensor(4.3732e-06, grad_fn=<SumBackward0>)\n",
"tensor(2.7699e-06, grad_fn=<SumBackward0>)\n",
"tensor(8.9659e-07, grad_fn=<SumBackward0>)\n",
"tensor(5.7541e-07, grad_fn=<SumBackward0>)\n",
"tensor(1.5010e-06, grad_fn=<SumBackward0>)\n"
]
},
{
"data": {
"text/plain": [
"tensor([[[ 0.0002, -0.0002, -0.0003],\n",
" [ 0.0001, -0.0003, -0.0002]],\n",
"\n",
" [[ 0.0002, -0.0003, -0.0003],\n",
" [ 0.0003, -0.0004, -0.0002]],\n",
"\n",
" [[ 0.0002, -0.0003, -0.0003],\n",
" [ 0.0002, -0.0003, -0.0003]],\n",
"\n",
" [[ 0.0002, -0.0002, -0.0004],\n",
" [ 0.0003, -0.0003, -0.0003]],\n",
"\n",
" [[ 0.0003, -0.0003, -0.0002],\n",
" [ 0.0003, -0.0003, -0.0002]]], grad_fn=<AddBackward0>)"
]
},
"execution_count": 74,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"optimizer = torch.optim.Adam(pd.parameters(),lr=0.0001)\n",
"\n",
"for i in range(15):\n",
" #training loop\n",
" optimizer.zero_grad()\n",
"\n",
" output = pd.forward(stocks_and_debris)\n",
"\n",
" l = lossc(output)\n",
"\n",
" l.backward()\n",
"\n",
" optimizer.step()\n",
"\n",
" print(l)\n",
" \n",
"\n",
"pd.forward(stocks_and_debris)"
]
},
{
"cell_type": "code",
"execution_count": 78,
"id": "compliant-johnson",
"metadata": {},
"outputs": [],
"source": [
"def lossa(a):\n",
" #test loss function\n",
" return (a.choices**2).sum() + (a.partials**2).sum()"
]
},
{
"cell_type": "code",
"execution_count": 81,
"id": "alive-potato",
"metadata": {},
"outputs": [],
"source": [
"enn = EstimandNN(batch_size\n",
" ,states\n",
" ,choices\n",
" ,constellations\n",
" ,12)"
]
},
{
"cell_type": "code",
"execution_count": 83,
"id": "changed-instruction",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"0 tensor(112.1970, grad_fn=<AddBackward0>)\n",
"10 tensor(79.8152, grad_fn=<AddBackward0>)\n",
"20 tensor(55.6422, grad_fn=<AddBackward0>)\n",
"30 tensor(38.5636, grad_fn=<AddBackward0>)\n",
"40 tensor(26.9156, grad_fn=<AddBackward0>)\n",
"50 tensor(18.9986, grad_fn=<AddBackward0>)\n",
"60 tensor(13.6606, grad_fn=<AddBackward0>)\n",
"70 tensor(10.1881, grad_fn=<AddBackward0>)\n",
"80 tensor(8.0395, grad_fn=<AddBackward0>)\n",
"90 tensor(6.7618, grad_fn=<AddBackward0>)\n",
"100 tensor(6.0101, grad_fn=<AddBackward0>)\n",
"110 tensor(5.5517, grad_fn=<AddBackward0>)\n",
"120 tensor(5.2434, grad_fn=<AddBackward0>)\n",
"130 tensor(5.0054, grad_fn=<AddBackward0>)\n",
"140 tensor(4.7988, grad_fn=<AddBackward0>)\n",
"150 tensor(4.6069, grad_fn=<AddBackward0>)\n",
"160 tensor(4.4235, grad_fn=<AddBackward0>)\n",
"170 tensor(4.2468, grad_fn=<AddBackward0>)\n",
"180 tensor(4.0763, grad_fn=<AddBackward0>)\n",
"190 tensor(3.9117, grad_fn=<AddBackward0>)\n",
"200 tensor(3.7532, grad_fn=<AddBackward0>)\n",
"210 tensor(3.6005, grad_fn=<AddBackward0>)\n",
"220 tensor(3.4535, grad_fn=<AddBackward0>)\n",
"230 tensor(3.3121, grad_fn=<AddBackward0>)\n",
"240 tensor(3.1761, grad_fn=<AddBackward0>)\n",
"250 tensor(3.0454, grad_fn=<AddBackward0>)\n",
"260 tensor(2.9198, grad_fn=<AddBackward0>)\n",
"270 tensor(2.7991, grad_fn=<AddBackward0>)\n",
"280 tensor(2.6832, grad_fn=<AddBackward0>)\n",
"290 tensor(2.5720, grad_fn=<AddBackward0>)\n",
"300 tensor(2.4653, grad_fn=<AddBackward0>)\n",
"310 tensor(2.3629, grad_fn=<AddBackward0>)\n",
"320 tensor(2.2646, grad_fn=<AddBackward0>)\n",
"330 tensor(2.1704, grad_fn=<AddBackward0>)\n",
"340 tensor(2.0800, grad_fn=<AddBackward0>)\n",
"350 tensor(1.9933, grad_fn=<AddBackward0>)\n",
"360 tensor(1.9103, grad_fn=<AddBackward0>)\n",
"370 tensor(1.8306, grad_fn=<AddBackward0>)\n",
"380 tensor(1.7543, grad_fn=<AddBackward0>)\n",
"390 tensor(1.6812, grad_fn=<AddBackward0>)\n",
"400 tensor(1.6111, grad_fn=<AddBackward0>)\n",
"410 tensor(1.5440, grad_fn=<AddBackward0>)\n",
"420 tensor(1.4797, grad_fn=<AddBackward0>)\n",
"430 tensor(1.4180, grad_fn=<AddBackward0>)\n",
"440 tensor(1.3590, grad_fn=<AddBackward0>)\n",
"450 tensor(1.3025, grad_fn=<AddBackward0>)\n",
"460 tensor(1.2484, grad_fn=<AddBackward0>)\n",
"470 tensor(1.1965, grad_fn=<AddBackward0>)\n",
"480 tensor(1.1469, grad_fn=<AddBackward0>)\n",
"490 tensor(1.0994, grad_fn=<AddBackward0>)\n",
"500 tensor(1.0540, grad_fn=<AddBackward0>)\n",
"510 tensor(1.0104, grad_fn=<AddBackward0>)\n",
"520 tensor(0.9688, grad_fn=<AddBackward0>)\n",
"530 tensor(0.9290, grad_fn=<AddBackward0>)\n",
"540 tensor(0.8908, grad_fn=<AddBackward0>)\n",
"550 tensor(0.8544, grad_fn=<AddBackward0>)\n",
"560 tensor(0.8195, grad_fn=<AddBackward0>)\n",
"570 tensor(0.7861, grad_fn=<AddBackward0>)\n",
"580 tensor(0.7542, grad_fn=<AddBackward0>)\n",
"590 tensor(0.7237, grad_fn=<AddBackward0>)\n",
"600 tensor(0.6945, grad_fn=<AddBackward0>)\n",
"610 tensor(0.6667, grad_fn=<AddBackward0>)\n",
"620 tensor(0.6400, grad_fn=<AddBackward0>)\n",
"630 tensor(0.6146, grad_fn=<AddBackward0>)\n",
"640 tensor(0.5903, grad_fn=<AddBackward0>)\n",
"650 tensor(0.5671, grad_fn=<AddBackward0>)\n",
"660 tensor(0.5449, grad_fn=<AddBackward0>)\n",
"670 tensor(0.5237, grad_fn=<AddBackward0>)\n",
"680 tensor(0.5035, grad_fn=<AddBackward0>)\n",
"690 tensor(0.4842, grad_fn=<AddBackward0>)\n",
"700 tensor(0.4658, grad_fn=<AddBackward0>)\n",
"710 tensor(0.4482, grad_fn=<AddBackward0>)\n",
"720 tensor(0.4315, grad_fn=<AddBackward0>)\n",
"730 tensor(0.4155, grad_fn=<AddBackward0>)\n",
"740 tensor(0.4002, grad_fn=<AddBackward0>)\n",
"750 tensor(0.3857, grad_fn=<AddBackward0>)\n",
"760 tensor(0.3718, grad_fn=<AddBackward0>)\n",
"770 tensor(0.3586, grad_fn=<AddBackward0>)\n",
"780 tensor(0.3460, grad_fn=<AddBackward0>)\n",
"790 tensor(0.3340, grad_fn=<AddBackward0>)\n",
"800 tensor(0.3226, grad_fn=<AddBackward0>)\n",
"810 tensor(0.3117, grad_fn=<AddBackward0>)\n",
"820 tensor(0.3013, grad_fn=<AddBackward0>)\n",
"830 tensor(0.2914, grad_fn=<AddBackward0>)\n",
"840 tensor(0.2820, grad_fn=<AddBackward0>)\n",
"850 tensor(0.2730, grad_fn=<AddBackward0>)\n",
"860 tensor(0.2645, grad_fn=<AddBackward0>)\n",
"870 tensor(0.2564, grad_fn=<AddBackward0>)\n",
"880 tensor(0.2486, grad_fn=<AddBackward0>)\n",
"890 tensor(0.2413, grad_fn=<AddBackward0>)\n",
"900 tensor(0.2342, grad_fn=<AddBackward0>)\n",
"910 tensor(0.2276, grad_fn=<AddBackward0>)\n",
"920 tensor(0.2212, grad_fn=<AddBackward0>)\n",
"930 tensor(0.2151, grad_fn=<AddBackward0>)\n",
"940 tensor(0.2094, grad_fn=<AddBackward0>)\n",
"950 tensor(0.2039, grad_fn=<AddBackward0>)\n",
"960 tensor(0.1986, grad_fn=<AddBackward0>)\n",
"970 tensor(0.1936, grad_fn=<AddBackward0>)\n",
"980 tensor(0.1889, grad_fn=<AddBackward0>)\n",
"990 tensor(0.1844, grad_fn=<AddBackward0>)\n"
]
},
{
"data": {
"text/plain": [
"<__main__.EstimandInterface at 0x7f85609fce20>"
]
},
"execution_count": 83,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"optimizer = torch.optim.Adam(enn.parameters(),lr=0.0001) #note the use of enn in the optimizer\n",
"\n",
"for i in range(1000):\n",
" #training loop\n",
" optimizer.zero_grad()\n",
"\n",
" output = enn.forward(stocks_and_debris)\n",
"\n",
" l = lossa(output)\n",
"\n",
" l.backward()\n",
"\n",
" optimizer.step()\n",
"\n",
" if i%10==0:\n",
" print(i, l)\n",
" \n",
"\n",
"enn.forward(stocks_and_debris)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "proved-amsterdam",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.8"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -0,0 +1,239 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 2,
"id": "standing-catch",
"metadata": {},
"outputs": [],
"source": [
"import torch\n",
"from torch.autograd.functional import jacobian\n",
"import itertools\n",
"import math"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "going-accident",
"metadata": {},
"outputs": [],
"source": [
"import combined as c"
]
},
{
"cell_type": "markdown",
"id": "severe-employment",
"metadata": {},
"source": [
"So, this contains a bunch of initial tests of my abstractions. I eventually need to change these to assertions and package them."
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "ranking-family",
"metadata": {},
"outputs": [],
"source": [
"#Instantiate some objects\n",
"pm = c.PhysicalModel(1.0,1e-6,0.01,2.0,1e-8)\n",
"s = c.States(torch.tensor([1.0,2,3]), torch.tensor([0.0]))\n",
"lp = c.LinearProfit(0,0.95,torch.tensor([1.0,0,0]), 5)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "premium-brisbane",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(tensor([-7.5676e+05, 2.8893e+05, 4.6783e+05, 1.5236e+00]),\n",
" <combined.States at 0x7f31f0146c10>)"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"c.single_transition(pm,lp,s,est_int)"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "raised-worthy",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([-718920.5625, 274490.1562, 444444.6250])"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"minimand, iterated_partials, iterated_state = c.optimality(pm,lp,s,est_int)\n",
"minimand"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "horizontal-insight",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([-2285563., -2285557., -2285557.])"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"est_int2 = c.EstimandInterface(iterated_partials,torch.ones(3))\n",
"\n",
"minimand2, iterated_partials2, iterated_state2 = c.optimality(pm,lp,iterated_state,est_int2)\n",
"minimand2"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "automatic-builder",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([-2405858.5000, -2405852.5000, -2405852.5000])"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"est_int3 = c.EstimandInterface(iterated_partials2,torch.ones(3))\n",
"\n",
"minimand3, iterated_partials3, iterated_state3 = c.optimality(pm,lp,iterated_state2,est_int3)\n",
"minimand3"
]
},
{
"cell_type": "markdown",
"id": "changing-mainland",
"metadata": {},
"source": [
"So, this succesfylly let me link the two. I'm going to move to another notebook, clean up, and start integrating the system"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "assumed-midwest",
"metadata": {},
"outputs": [],
"source": [
"model = DoubleNetwork(input_size = 5, output_size=5, layers_size=15)\n",
"\n",
"data_in = torch.tensor([1.5,2,3,4,5])\n",
"\n",
"data_in\n",
"\n",
"target = torch.zeros(5)\n",
"\n",
"def loss_fn2(output,target):\n",
" return sum((output[1] +output[0] - target)**2)\n",
" #could add a simplicity assumption i.e. l1 on parameters.\n",
"\n",
"#Prep Optimizer\n",
"optimizer = torch.optim.SGD(model.parameters(),lr=0.01)\n",
"\n",
"for i in range(20):\n",
" #training loop\n",
" optimizer.zero_grad()\n",
"\n",
" output = model.forward(data_in)\n",
" output\n",
"\n",
" l = loss_fn2(output, target)\n",
"\n",
" l.backward()\n",
"\n",
" optimizer.step()\n",
"\n",
" print(\"\\n\",l)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "surprising-fundamentals",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "parliamentary-delta",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "emotional-castle",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "substantial-exhibit",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.8"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -0,0 +1,383 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "pleasant-equation",
"metadata": {},
"outputs": [],
"source": [
"import torch\n",
"from torch.autograd.functional import jacobian\n",
"import itertools\n",
"import math"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "moved-christian",
"metadata": {},
"outputs": [],
"source": [
"import combined as c"
]
},
{
"cell_type": "markdown",
"id": "pressed-slope",
"metadata": {},
"source": [
"So, this contains a bunch of initial tests of my abstractions. I eventually need to change these to assertions and package them."
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "capable-equality",
"metadata": {},
"outputs": [],
"source": [
"#Instantiate some objects\n",
"pm = c.PhysicalModel(1.0,1e-6,0.01,2.0,1e-8)\n",
"s = c.States(torch.tensor([1.0,2,3]), torch.tensor([0.0]))\n",
"lp = c.LinearProfit(0,0.95,torch.tensor([1.0,0,0]), 5)\n",
"est_int = c.EstimandInterface(torch.tensor([[1.0,2,3,2]\n",
" ,[4,5,6,2]\n",
" ,[7,8,9,2]\n",
" ,[1,3,5,7]]\n",
" ),torch.ones(3))"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "written-experience",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"stocks\ttensor([1., 2., 3.]) \n",
"debris\t tensor([0.])\n",
"3\n",
"1\n"
]
}
],
"source": [
"#test State object \n",
"print(s)\n",
"print(s.number_constellations)\n",
"print(s.number_debris_trackers)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "twelve-arthur",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Launch Decisions and Partial Derivativs of value function with\n",
"\tlaunches\n",
"\t\t tensor([1., 1., 1.])\n",
"\tPartials\n",
"\t\ttensor([[1., 2., 3., 2.],\n",
" [4., 5., 6., 2.],\n",
" [7., 8., 9., 2.],\n",
" [1., 3., 5., 7.]])\n",
"tensor([1., 1., 1.]) tensor([[1., 2., 3., 2.],\n",
" [4., 5., 6., 2.],\n",
" [7., 8., 9., 2.],\n",
" [1., 3., 5., 7.]])\n",
"tensor(1.)\n",
"tensor([0., 1., 0.])\n",
"tensor([2., 5., 8., 3.])\n",
"tensor([[0., 2., 0., 0.],\n",
" [0., 5., 0., 0.],\n",
" [0., 8., 0., 0.],\n",
" [0., 3., 0., 0.]])\n"
]
}
],
"source": [
"#Test estimand interface\n",
"print(est_int)\n",
"print(est_int.launches,est_int.partials)\n",
"\n",
"print(est_int.launch_single(1))\n",
"print(est_int.launch_vector(1))\n",
"print(est_int.partial_vector(1)) \n",
"print(est_int.partial_matrix(1)) #TODO: double check orientation"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "impressive-tribe",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"1.0\n",
"1e-06\n",
"0.01\n",
"2.0\n",
"1e-08\n",
"tensor([1.0133e-06, 2.0266e-06, 2.9802e-06])\n",
"tensor([1., 2., 3.]) tensor([0.])\n",
"tensor([1.0000, 1.0000, 1.0000]) tensor([12.0000])\n"
]
}
],
"source": [
"#Test physical model methods\n",
"print(pm)\n",
"print(pm.survival(s))\n",
"s2 = pm.transition(s,est_int)\n",
"print(s.stocks,s.debris)\n",
"print(s2.stocks,s2.debris)"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "stretch-reward",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"LinearProfit\n",
" Benefit weights:\ttensor([1., 0., 0.])\n",
" launch cost:\t5\n",
" Deorbit cost:\t0\n",
"tensor(-4.)\n"
]
}
],
"source": [
"#test linear profit object\n",
"print(lp)\n",
"print(lp.period_benefit(s,est_int))"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "advance-folder",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([1., 0., 0., 0.])"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"lp._period_benefit_jacobian_wrt_states( s.stocks, s.debris, est_int.launches)"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "posted-subscriber",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([-5., 0., 0.])"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"lp._period_benefit_jacobian_wrt_launches( s.stocks, s.debris, est_int.launches)"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "divine-agenda",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([1., 0., 0., 0.])"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"lp.period_benefit_jacobian_wrt_states( s, est_int)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "surgical-direction",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([[1., 2., 3., 2.],\n",
" [4., 5., 6., 2.],\n",
" [7., 8., 9., 2.],\n",
" [1., 3., 5., 7.]])"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"est_int.partials"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "mounted-roots",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(tensor([-7.5676e+05, 2.8893e+05, 4.6783e+05, 1.5236e+00]),\n",
" <combined.States at 0x7f8c3c9c54f0>)"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"c.single_transition(pm,lp,s,est_int)"
]
},
{
"cell_type": "code",
"execution_count": 24,
"id": "pediatric-iceland",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([-718920.5625, 274490.1562, 444444.6250])"
]
},
"execution_count": 24,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"minimand, iterated_partials, iterated_state = c.optimality(pm,lp,s,est_int)\n",
"minimand"
]
},
{
"cell_type": "code",
"execution_count": 25,
"id": "isolated-cleveland",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([-2285563., -2285557., -2285557.])"
]
},
"execution_count": 25,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"est_int2 = c.EstimandInterface(iterated_partials,torch.ones(3))\n",
"\n",
"minimand2, iterated_partials2, iterated_state2 = c.optimality(pm,lp,iterated_state,est_int2)\n",
"minimand2"
]
},
{
"cell_type": "code",
"execution_count": 27,
"id": "relevant-romance",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([-2405858.5000, -2405852.5000, -2405852.5000])"
]
},
"execution_count": 27,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"est_int3 = c.EstimandInterface(iterated_partials2,torch.ones(3))\n",
"\n",
"minimand3, iterated_partials3, iterated_state3 = c.optimality(pm,lp,iterated_state2,est_int3)\n",
"minimand3"
]
},
{
"cell_type": "markdown",
"id": "israeli-oracle",
"metadata": {},
"source": [
"So, this succesfylly let me link the two. I'm going to move to another notebook, clean up, and start integrating the system"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.8.8"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -3,216 +3,219 @@
{
"cell_type": "code",
"execution_count": 1,
"id": "departmental-hardware",
"id": "operating-illinois",
"metadata": {},
"outputs": [],
"source": [
"import torch\n",
"from torch.autograd.functional import jacobian"
"import combined as c\n",
"import NeuralNetworkSpecifications as nns"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "differential-shock",
"execution_count": 25,
"id": "white-lottery",
"metadata": {},
"outputs": [],
"source": [
"a = torch.tensor([1,2,3,4.2],requires_grad=False)\n",
"b = torch.tensor([2,2,2,2.0],requires_grad=True)"
"BATCH_SIZE = 5\n",
"STATES = 3\n",
"CONSTELLATIONS = STATES -1 #determined by debris tracking\n",
"MAX = 10\n",
"FEATURES = 1\n",
"\n",
"stocks = torch.randint(MAX,(BATCH_SIZE,1,CONSTELLATIONS), dtype=torch.float32, requires_grad=True)\n",
"debris = torch.randint(MAX,(BATCH_SIZE,1), dtype=torch.float32, requires_grad=True)\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "separated-pursuit",
"metadata": {},
"outputs": [],
"source": [
"def test(x,y):\n",
" return (x@y)**2"
]
},
{
"cell_type": "code",
"execution_count": 17,
"id": "french-trunk",
"execution_count": 91,
"id": "quick-extraction",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor(416.1600, grad_fn=<PowBackward0>)"
]
},
"execution_count": 17,
"metadata": {},
"output_type": "execute_result"
"name": "stdout",
"output_type": "stream",
"text": [
"tensor([[[[1.],\n",
" [0.]]]], requires_grad=True) torch.Size([1, 1, 2, 1])\n",
"tensor([[ 1.0000, -0.1000]], requires_grad=True) torch.Size([1, 2])\n"
]
}
],
"source": [
"test(a,b)"
"#launch_costs = torch.randint(3,(1,CONSTELLATIONS,CONSTELLATIONS,FEATURES), dtype=torch.float32)\n",
"launch_costs = torch.tensor([[[[1.0],[0.0]]]], requires_grad=True)\n",
"print(launch_costs, launch_costs.shape)\n",
"#payoff = torch.randint(5,(STATES,CONSTELLATIONS), dtype=torch.float32)\n",
"payoff = torch.tensor([[1.0, -0.1]], requires_grad=True)\n",
"print(payoff, payoff.shape)\n",
"\n",
"debris_cost = -0.2"
]
},
{
"cell_type": "code",
"execution_count": 57,
"id": "adverse-ceremony",
"execution_count": 92,
"id": "textile-cleanup",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"((tensor([81.6000, 81.6000, 81.6000, 81.6000]),\n",
" tensor([ 40.8000, 81.6000, 122.4000, 171.3600])),\n",
" tensor([2., 2., 2., 2.], requires_grad=True),\n",
" tensor(416.1600, grad_fn=<PowBackward0>))"
]
},
"execution_count": 57,
"metadata": {},
"output_type": "execute_result"
}
],
"outputs": [],
"source": [
"j = jacobian(test,(a,b))\n",
"j,b,test(a,b)"
"def linear_profit(stocks, debris, choices,constellation_number):\n",
" #Pay particular attention to the dimensions\n",
" #note that there is an extra dimension in there just ot match that of the profit vector we'll be giving out.\n",
" \n",
" #calculate launch expenses\n",
" \n",
" launch_expense = (-5 * output)[:,constellation_number,:]\n",
"\n",
" #calculate revenue\n",
"\n",
" revenue = (payoff * stocks).sum(dim=2)\n",
" \n",
" debris_costs = debris * debris_cost \n",
"\n",
"\n",
" profit = (revenue + debris_costs + launch_expense).sum(dim=1)\n",
" return profit"
]
},
{
"cell_type": "code",
"execution_count": 58,
"id": "lovely-apple",
"execution_count": 100,
"id": "single-wheat",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"((tensor([-12.8304, -3.9878, 4.8547, 15.4658]),\n",
" tensor([-10.8365, -21.6729, -32.5094, -45.5132])),\n",
" tensor([ 1.1840, 0.3680, -0.4480, -1.4272], grad_fn=<SubBackward0>),\n",
" tensor(29.3573, grad_fn=<PowBackward0>))"
"(tensor([ 3.2451, 4.3734, 6.5474, -0.2722, -2.8843], grad_fn=<SumBackward1>),\n",
" torch.Size([5]))"
]
},
"execution_count": 58,
"execution_count": 100,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"b2 = b - j[1]*b*0.01\n",
"j2 = jacobian(test,(a,b2))\n",
"j2,b2,test(a,b2)"
"profit = linear_profit(stocks, debris, output,0)\n",
"profit, profit.shape"
]
},
{
"cell_type": "code",
"execution_count": 63,
"id": "stretch-selection",
"execution_count": 123,
"id": "handy-perry",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"((tensor([-13.6581, -4.2906, 5.2787, 17.0284]),\n",
" tensor([-11.4119, -22.8239, -34.2358, -47.9301])),\n",
" tensor([ 1.1968, 0.3760, -0.4626, -1.4922], grad_fn=<SubBackward0>),\n",
" tensor(32.5580, grad_fn=<PowBackward0>))"
"(tensor([[-0.2000],\n",
" [-0.0000],\n",
" [-0.0000],\n",
" [-0.0000],\n",
" [-0.0000]]),)"
]
},
"execution_count": 63,
"execution_count": 123,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"b3 = b2 - j2[1]*b2*0.001\n",
"j3 = jacobian(test,(a,b3))\n",
"j3,b3,test(a,b3)"
"torch.autograd.grad(profit[0], (debris), create_graph=True)"
]
},
{
"cell_type": "code",
"execution_count": 64,
"id": "colored-visit",
"execution_count": 95,
"id": "purple-superior",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"((tensor([-14.5816, -4.6324, 5.7628, 18.8361]),\n",
" tensor([-12.0461, -24.0921, -36.1382, -50.5935])),\n",
" tensor([ 1.2105, 0.3846, -0.4784, -1.5637], grad_fn=<SubBackward0>),\n",
" tensor(36.2769, grad_fn=<PowBackward0>))"
]
},
"execution_count": 64,
"metadata": {},
"output_type": "execute_result"
}
],
"outputs": [],
"source": [
"b4 = b3 - j3[1]*b3*0.001\n",
"j4 = jacobian(test,(a,b4))\n",
"j4,b4,test(a,b4)"
"policy = nns.ChoiceFunction(BATCH_SIZE\n",
" ,STATES\n",
" ,FEATURES\n",
" ,CONSTELLATIONS\n",
" ,12\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": 65,
"id": "familiar-pizza",
"cell_type": "markdown",
"id": "auburn-leonard",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"((tensor([-15.6173, -5.0205, 6.3191, 20.9424]),\n",
" tensor([-12.7481, -25.4962, -38.2443, -53.5421])),\n",
" tensor([ 1.2251, 0.3938, -0.4957, -1.6428], grad_fn=<SubBackward0>),\n",
" tensor(40.6286, grad_fn=<PowBackward0>))"
]
},
"execution_count": 65,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"b5 = b4 - j4[1]*b4*0.001\n",
"j5 = jacobian(test,(a,b5))\n",
"j5,b5,test(a,b5)"
"example to get profit = 1\n",
"```python\n",
"optimizer = torch.optim.Adam(policy.parameters(),lr=0.001)\n",
"\n",
"for i in range(10000):\n",
" #training loop\n",
" optimizer.zero_grad()\n",
"\n",
" output = policy.forward(s.values)\n",
"\n",
" l = ((1-linear_profit(s.values,output))**2).sum()\n",
"\n",
"\n",
" l.backward()\n",
"\n",
" optimizer.step()\n",
"\n",
" if i%200==0:\n",
" print(l)\n",
" \n",
"\n",
"results = policy.forward(s.values)\n",
"print(results.mean(dim=0), \"\\n\",results.std(dim=0))\n",
"```\n"
]
},
{
"cell_type": "code",
"execution_count": 66,
"id": "brilliant-squad",
"execution_count": 96,
"id": "herbal-manual",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"((tensor([-15.6173, -5.0205, 6.3191, 20.9424]),\n",
" tensor([-12.7481, -25.4962, -38.2443, -53.5421])),\n",
" tensor([ 1.2407, 0.4039, -0.5146, -1.7307], grad_fn=<SubBackward0>),\n",
" tensor(45.7605, grad_fn=<PowBackward0>))"
"tensor([[[0.2910],\n",
" [0.4003]],\n",
"\n",
" [[0.1053],\n",
" [0.2446]],\n",
"\n",
" [[0.1705],\n",
" [0.2758]],\n",
"\n",
" [[0.1944],\n",
" [0.3421]],\n",
"\n",
" [[0.5369],\n",
" [0.6181]]], grad_fn=<ReluBackward0>)"
]
},
"execution_count": 66,
"execution_count": 96,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"b6 = b5 - j5[1]*b5*0.001\n",
"j6 = jacobian(test,(a,b5))\n",
"j6,b6,test(a,b6)"
"output = policy.forward(s.values)\n",
"output"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "discrete-engineer",
"id": "another-timing",
"metadata": {},
"outputs": [],
"source": []

@ -3,39 +3,518 @@
{
"cell_type": "code",
"execution_count": 1,
"id": "victorian-produce",
"id": "geographic-wilderness",
"metadata": {},
"outputs": [],
"source": [
"a = [1,2,3]\n",
"b = [\"a\",\"b\",\"c\"]"
"import torch\n",
"import combined as c\n",
"import NeuralNetworkSpecifications as nns"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "major-glucose",
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"tensor([[[4., 9., 6.]],\n",
"\n",
" [[0., 5., 4.]],\n",
"\n",
" [[3., 1., 9.]],\n",
"\n",
" [[6., 4., 8.]],\n",
"\n",
" [[8., 7., 6.]]], grad_fn=<CatBackward>)\n"
]
}
],
"source": [
"batch_size,states,choices = 5,3,1\n",
"constellations = states -1 #determined by debris tracking\n",
"max_start_state = 10\n",
"\n",
"stocks = torch.randint(max_start_state,(batch_size,1,constellations), dtype=torch.float32, requires_grad=True)\n",
"debris = torch.randint(max_start_state,(batch_size,1,1), dtype=torch.float32, requires_grad=True)\n",
"\n",
"s = c.States(stocks, debris)\n",
"\n",
"print(s.values)"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "recognized-ability",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([5.6433e-07, 6.7631e-07], grad_fn=<MulBackward0>)"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"constellation_collision_risk = 1e-6 * torch.rand(constellations, requires_grad=True)\n",
"constellation_collision_risk"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "elect-float",
"metadata": {},
"outputs": [],
"source": [
"debris_decay_rate = 0.1\n",
"launch_debris = 0.05\n",
"debris_autocatalysis_rate = 1.4\n",
"\n",
"benefit_weight0 = torch.tensor([1.0,-0.02], requires_grad=True)\n",
"benefit_weight1 = torch.tensor([0.0,1.0], requires_grad=True)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "compressed-individual",
"metadata": {},
"outputs": [],
"source": [
"pm = c.PhysicalModel(10\n",
" , constellation_collision_risk #constellations_collision_risk #as tensor\n",
" , debris_decay_rate #debris_decay_rate\n",
" , launch_debris #launch_debris\n",
" , debris_autocatalysis_rate #debris_autocatalysis_rate\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "tracked-bachelor",
"metadata": {},
"outputs": [],
"source": [
"class LinearProfit():\n",
" \"\"\"\n",
" The simplest type of profit function available.\n",
" \"\"\"\n",
" def __init__(self, batch_size, constellation_number, discount_factor, benefit_weights, launch_cost, deorbit_cost=0, ):\n",
" self.batch_size = batch_size\n",
" \n",
" \n",
" #track which constellation this is.\n",
" self.constellation_number = constellation_number\n",
" \n",
" #get the number of constellations (pull from the benefit weight, in the dimension that counts across constellations)\n",
" self.number_of_constellations = benefit_weights.size()[0] -1\n",
"\n",
" #parameters describing the agent's situation\n",
" self.discount_factor = discount_factor\n",
" self.benefit_weights = benefit_weights\n",
" self.launch_cost = launch_cost\n",
" self.deorbit_cost = deorbit_cost\n",
" \n",
" def _period_benefit(self,stocks,debris,launches):\n",
" # multiply benefits times stocks\n",
" # sum across constellations\n",
" # reshape to standard dimensions\n",
" # subtract launch costs. \n",
" profit = torch.tensordot(self.benefit_weights,stocks, [[0],[1]])[:,self.constellation_number] \\\n",
" - (self.launch_cost * launches)[:,self.constellation_number,0]\n",
" return profit.view(batch_size,1)\n",
"\n",
"\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "confidential-philippines",
"metadata": {},
"outputs": [],
"source": [
"launch_cost = 5\n",
"ea0 = LinearProfit(\n",
" batch_size\n",
" ,0 #constellation index\n",
" ,0.95 #discount\n",
" ,benefit_weight0\n",
" ,launch_cost #launch_cost\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": 8,
"id": "sought-beginning",
"id": "religious-georgia",
"metadata": {},
"outputs": [],
"source": [
"enn = nns.EstimandNN(batch_size\n",
" ,states\n",
" ,choices\n",
" ,constellations\n",
" ,12)"
]
},
{
"cell_type": "code",
"execution_count": 9,
"id": "painful-republican",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([[[0.],\n",
" [0.]],\n",
"\n",
" [[0.],\n",
" [0.]],\n",
"\n",
" [[0.],\n",
" [0.]],\n",
"\n",
" [[0.],\n",
" [0.]],\n",
"\n",
" [[0.],\n",
" [0.]]], grad_fn=<ReluBackward0>)"
]
},
"execution_count": 9,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"launch_decisions = enn.forward(s.values).choices\n",
"launch_decisions"
]
},
{
"cell_type": "code",
"execution_count": 10,
"id": "equal-raising",
"metadata": {},
"outputs": [],
"source": [
"def test(stocks,launches):\n",
" # multiply benefits times stocks\n",
" # sum across constellations\n",
" # reshape to standard dimensions\n",
" # subtract launch costs. \n",
" profit = torch.tensordot(benefit_weight0,stocks, [[0],[1]])[:,0] - (launch_cost * launch_decisions)[:,0,0]\n",
" return profit.view(batch_size,1)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"id": "divine-editor",
"metadata": {},
"outputs": [],
"source": [
"t = LinearProfit(batch_size #batch_size\n",
" ,0 #constellation index\n",
" ,0.95 #discount\n",
" ,benefit_weight0\n",
" ,launch_cost #launch_cost\n",
" )"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "japanese-captain",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"[(0, (1, 'a')), (1, (2, 'b')), (2, (3, 'c'))]"
"tensor([[3.9200],\n",
" [0.0000],\n",
" [2.9400],\n",
" [5.8800],\n",
" [7.8400]], grad_fn=<ViewBackward>)"
]
},
"execution_count": 8,
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"[x for x in enumerate(zip(a,b))]"
"test(stocks,launch_decisions)"
]
},
{
"cell_type": "code",
"execution_count": 13,
"id": "determined-difference",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"tensor([[3.9200],\n",
" [0.0000],\n",
" [2.9400],\n",
" [5.8800],\n",
" [7.8400]], grad_fn=<ViewBackward>)"
]
},
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"t._period_benefit(s.stocks,s.debris,launch_decisions)"
]
},
{
"cell_type": "code",
"execution_count": 14,
"id": "tribal-least",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"(tensor([[[[[0.9800, 0.0000]],\n",
" \n",
" [[0.0000, 0.0000]],\n",
" \n",
" [[0.0000, 0.0000]],\n",
" \n",
" [[0.0000, 0.0000]],\n",
" \n",
" [[0.0000, 0.0000]]]],\n",
" \n",
" \n",
" \n",
" [[[[0.0000, 0.0000]],\n",
" \n",
" [[0.9800, 0.0000]],\n",
" \n",
" [[0.0000, 0.0000]],\n",
" \n",
" [[0.0000, 0.0000]],\n",
" \n",
" [[0.0000, 0.0000]]]],\n",
" \n",
" \n",
" \n",
" [[[[0.0000, 0.0000]],\n",
" \n",
" [[0.0000, 0.0000]],\n",
" \n",
" [[0.9800, 0.0000]],\n",
" \n",
" [[0.0000, 0.0000]],\n",
" \n",
" [[0.0000, 0.0000]]]],\n",
" \n",
" \n",
" \n",
" [[[[0.0000, 0.0000]],\n",
" \n",
" [[0.0000, 0.0000]],\n",
" \n",
" [[0.0000, 0.0000]],\n",
" \n",
" [[0.9800, 0.0000]],\n",
" \n",
" [[0.0000, 0.0000]]]],\n",
" \n",
" \n",
" \n",
" [[[[0.0000, 0.0000]],\n",
" \n",
" [[0.0000, 0.0000]],\n",
" \n",
" [[0.0000, 0.0000]],\n",
" \n",
" [[0.0000, 0.0000]],\n",
" \n",
" [[0.9800, 0.0000]]]]], grad_fn=<ViewBackward>),\n",
" tensor([[[[[0.]],\n",
" \n",
" [[0.]],\n",
" \n",
" [[0.]],\n",
" \n",
" [[0.]],\n",
" \n",
" [[0.]]]],\n",
" \n",
" \n",
" \n",
" [[[[0.]],\n",
" \n",
" [[0.]],\n",
" \n",
" [[0.]],\n",
" \n",
" [[0.]],\n",
" \n",
" [[0.]]]],\n",
" \n",
" \n",
" \n",
" [[[[0.]],\n",
" \n",
" [[0.]],\n",
" \n",
" [[0.]],\n",
" \n",
" [[0.]],\n",
" \n",
" [[0.]]]],\n",
" \n",
" \n",
" \n",
" [[[[0.]],\n",
" \n",
" [[0.]],\n",
" \n",
" [[0.]],\n",
" \n",
" [[0.]],\n",
" \n",
" [[0.]]]],\n",
" \n",
" \n",
" \n",
" [[[[0.]],\n",
" \n",
" [[0.]],\n",
" \n",
" [[0.]],\n",
" \n",
" [[0.]],\n",
" \n",
" [[0.]]]]]),\n",
" tensor([[[[[-5.],\n",
" [ 0.]],\n",
" \n",
" [[-0.],\n",
" [ 0.]],\n",
" \n",
" [[-0.],\n",
" [ 0.]],\n",
" \n",
" [[-0.],\n",
" [ 0.]],\n",
" \n",
" [[-0.],\n",
" [ 0.]]]],\n",
" \n",
" \n",
" \n",
" [[[[-0.],\n",
" [ 0.]],\n",
" \n",
" [[-5.],\n",
" [ 0.]],\n",
" \n",
" [[-0.],\n",
" [ 0.]],\n",
" \n",
" [[-0.],\n",
" [ 0.]],\n",
" \n",
" [[-0.],\n",
" [ 0.]]]],\n",
" \n",
" \n",
" \n",
" [[[[-0.],\n",
" [ 0.]],\n",
" \n",
" [[-0.],\n",
" [ 0.]],\n",
" \n",
" [[-5.],\n",
" [ 0.]],\n",
" \n",
" [[-0.],\n",
" [ 0.]],\n",
" \n",
" [[-0.],\n",
" [ 0.]]]],\n",
" \n",
" \n",
" \n",
" [[[[-0.],\n",
" [ 0.]],\n",
" \n",
" [[-0.],\n",
" [ 0.]],\n",
" \n",
" [[-0.],\n",
" [ 0.]],\n",
" \n",
" [[-5.],\n",
" [ 0.]],\n",
" \n",
" [[-0.],\n",
" [ 0.]]]],\n",
" \n",
" \n",
" \n",
" [[[[-0.],\n",
" [ 0.]],\n",
" \n",
" [[-0.],\n",
" [ 0.]],\n",
" \n",
" [[-0.],\n",
" [ 0.]],\n",
" \n",
" [[-0.],\n",
" [ 0.]],\n",
" \n",
" [[-5.],\n",
" [ 0.]]]]]))"
]
},
"execution_count": 14,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"#in this case, the debris isn't tracked because it isn't included, and launch_decisions has a similar issue.\n",
"torch.autograd.functional.jacobian(t._period_benefit, (s.stocks,s.debris,launch_decisions), create_graph=True)"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "statutory-lyric",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "recent-lingerie",
"id": "naked-health",
"metadata": {},
"outputs": [],
"source": []

@ -2,18 +2,11 @@ import torch
from torch.autograd.functional import jacobian
import itertools
import math
import abc
############### CONSTANTS ###################
#Parameters
BETA = 0.95
#Constants determining iterations etc.
NUMBER_CONSTELLATIONS = 5
NUMBER_DEBRIS_TRACKERS = 1
NUMBER_OF_CHOICE_VARIABLES = 1
NUMBER_OF_REQUIRED_ITERATED_CONDITIONS = (NUMBER_CONSTELLATIONS+NUMBER_DEBRIS_TRACKERS+(NUMBER_OF_CHOICE_VARIABLES*NUMBER_CONSTELLATIONS))
NUMBER_OF_REQUIRED_ITERATIONS = math.ceil(NUMBER_OF_REQUIRED_ITERATED_CONDITIONS/NUMBER_CONSTELLATIONS)
############# COMPOSITION FUNCTIONS ###################
@ -57,52 +50,116 @@ class PhysicalModel():
It captures the constants that characterize interactions, and provides a set of
function to calculate changes to the physical environment.
It has two sets of interfaces, one that handles tensors (denoted by _function():)
and one that handles state objects (denoted by function():)
"""
def __init__(self
,collision_debris
,debris_from_collision
,constellations_collision_risk
,debris_decay_rate
,launch_debris
,debris_autocatalysis_rate
)
self.collision_debris = collision_debris
self.constellations_collision_risk = constellations_collision_risk
self.debris_decay_rate = debris_decay_rate
self.launch_debris = launch_debris
self.debris_autocatalysis_rate = debris_autocatalysis_rate
,debris_autocatalysis_rate):
self.debris_from_collision= debris_from_collision
self.constellations_collision_risk = constellations_collision_risk
self.debris_decay_rate = debris_decay_rate
self.launch_debris = launch_debris
self.debris_autocatalysis_rate = debris_autocatalysis_rate
def __str__(self):
return "\n{}\n{}\n{}\n{}\n{}".format(
self.debris_from_collision
,self.constellations_collision_risk
,self.debris_decay_rate
,self.launch_debris
,self.debris_autocatalysis_rate
)
def survival(self):
def _survival(self, stocks, debris):
#returns the survival rate (not destruction rate) for the given constellation.
return 1- torch.exp(-self.constellations_collision_risk * self.stock - self.debris)
return 1- torch.exp(-self.constellations_collision_risk * stocks - debris.sum())
def survival(self, states):
"""
This is an interface wrapper
"""
return self._survival(states.stocks, states.debris)
def transition_debris(self, state, launch_decisions):
def _transition_debris(self, stocks,debris,launches):
"""
This function transitions debris levels based off of a state and launch decision.
"""
new_debris = (1-self.debris_decay_rate + self.debris_autocatalysis_rate) * state.debris \ #debris decay and autocatalysis
+ self.launch_debris*launch_decisions.sum() \ #debris from launches
+ self.collision_debris * (1-self.survival()) @ state.stocks
new_debris = (1-self.debris_decay_rate + self.debris_autocatalysis_rate) * debris \
+ self.launch_debris * launches.sum() \
+ self.debris_from_collision * (1-self._survival(stocks,debris)) @ stocks
return new_debris
def transition_stocks(self, state, launch_decisions):
def transition_debris(self, state, estimand_interface):
"""
This is an interface wrapper.
"""
return self._transition_debris(state.stocks,state.debris,estimand_interface.launches)
new_stock = self.survival() * state.stocks + launch_decisions
def _transition_stocks(self, stocks, debris, launches):
"""
This function calculates new stock levels.
"""
new_stock = self._survival(stocks,debris) * stocks + launches
return new_stock
def transition(self, state, launch_decisions):
def transition_stocks(self, state, estimand_interface):
"""
This is an interface wrapper
"""
return self._transition_stocks(state.stocks,state.debris,estimand_interface.launches)
def transition(self, state, estimand_interface):
"""
This function takes a state and launch decision, and updates the state according to the physical laws of motion.
It returns a State object.
"""
d = self.transition_debris(state, launch_decisions)
s = self.transition_stocks(state, launch_decisions)
d = self.transition_debris(state, estimand_interface)
s = self.transition_stocks(state, estimand_interface)
return States(s,d)
def transition_jacobian_wrt_states(self,state,estimand_interface):
"""
This function takes values of the state and estimand, and returns a properly formatted
jacobian of the transition function with respect to the states.
The reason this is done here is because there is some reshaping that must happen, so
it is easier to wrap it here.
"""
jac_debris = jacobian(self._transition_debris, (state.stocks,state.debris,estimand_interface.launches))
jac_stocks = jacobian(self._transition_stocks, (state.stocks,state.debris,estimand_interface.launches))
h1 = torch.cat((jac_stocks[0],jac_stocks[1]),dim=1)
h2 = torch.cat((jac_debris[0],jac_debris[1]),dim=1)
a = torch.cat((h1,h2),dim=0)
return a
def transition_jacobian_wrt_launches(self,state,estimand_interface):
"""
This function takes values of the state and estimand, and returns a properly formatted
jacobian of the transition function with respect to the launch decisions.
The reason this is done here is because there is some reshaping that must happen, so
it is easier to wrap it here.
"""
jac_debris = jacobian(self._transition_debris, (state.stocks,state.debris,estimand_interface.launches))
jac_stocks = jacobian(self._transition_stocks, (state.stocks,state.debris,estimand_interface.launches))
b = torch.cat((jac_stocks[2],jac_debris[2].T),dim=1)
return b
class States():
"""
This is supposed to capture the state variables of the model, to create a common interface
@ -111,6 +168,23 @@ class States():
def __init__(self, stocks,debris):
self.stocks = stocks
self.debris = debris
def __str__(self):
return "stocks\t{} \ndebris\t {}".format(self.stocks,self.debris)
@property
def values(self):
#return these as a single tensor.
return torch.cat((self.stocks,self.debris), dim=-1)
@property
def number_constellations(self):
return len(self.stocks)
@property
def number_debris_trackers(self):
return len(self.debris)
################ NEURAL NETWORK TOOLS ###################
@ -137,108 +211,134 @@ class EstimandInterface():
self.partials = partials
self.launches = launches
self.deorbits = deorbits
def launch_single(constellation):
@property
def number_constellations(self):
return len(self.launches)
@property
def number_states(self):
return self.number_constellations+1 #This depends on the debris trackers technically.
def launch_single(self, constellation):
#returns the launch decision for the constellation of interest
filter_tensor = torch.zeros(NUMBER_CONSTELLATIONS)
filter_tensor = torch.zeros(self.number_constellations)
filter_tensor[constellation] = 1.0
return self.launches @ filter_tensor
def launch_vector(constellation):
def launch_vector(self, constellation):
#returns the launch decision for the constellation of interest as a vector
filter_tensor = torch.zeros(NUMBER_CONSTELLATIONS)
filter_tensor = torch.zeros(self.number_constellations)
filter_tensor[constellation] = 1.0
return self.launches * filter_tensor
def partial_vector(constellation):
def partial_vector(self, constellation):
#returns the partials of the value function corresponding to the constellation of interest
filter_tensor = torch.zeros(NUMBER_CONSTELLATIONS)
filter_tensor = torch.zeros(self.number_states)
filter_tensor[constellation] = 1.0
return self.partials @ filter_tensor
def partial_matrix(constellation):
def partial_matrix(self, constellation):
#returns the partials of the value function corresponding to
#the constellation of interest as a matrix
filter_tensor = torch.zeros(NUMBER_CONSTELLATIONS)
filter_tensor = torch.zeros(self.number_states)
filter_tensor[constellation] = 1.0
return self.partials * filter_tensor
def __str__(self):
#just a human readable descriptor
return "Launch Decisions and Partial Derivativs of value function with\n\t states\n\t\t {}\n\tPartials\n\t\t{}".format(self.states,self.partials)
return "Launch Decisions and Partial Derivativs of value function with\n\tlaunches\n\t\t {}\n\tPartials\n\t\t{}".format(self.launches,self.partials)
############## ECONOMIC MODEL ############
class EconomicModel():
"""
This class describes the set of profit functions involved in the value function iteration.
"""
def __init__(self,discount_factor, profit_objects):
self.discount_factor = discount_factor
self.profit_objects = profit_objects #A list of Profit objects
def constellation_period_profits(self, state, estimand_interface, constellation):
"""
This function calculates the current period profits for a single given constellation.
"""
return self.profit_objects[constellation].profit(state,estimand_interface,constellation)
def period_profits(self, state, estimand_interface):
"""
This function calculates the current period profits for each constellation.
"""
profits = []
for i,profit_object in self.profit_objectives:
constellation_profit = self.constellation_period_profits(state, estimand_interface, i)
profits.append(constellation_profit)
return profits
@property
def number_constellations(self):
return len(profit_objects)
#Abstract class describing profit. Each subclass will connect a profit function "style" to a specific instance of parameters.
class ProfitFunctions(metaclass=abc.ABCMetaclass):
class EconomicAgent(metaclass=abc.ABCMeta):
@abc.abstractmethod
def period_benefit(self,state,estimand_interface):
pass
@abc.abstractmethod
def _period_benefit(self):
pass
@abc.abstractmethod
def period_benefit_jacobian_wrt_states(self):
pass
@abc.abstractmethod
def _period_benefit_jacobian_wrt_states(self):
pass
@abc.abstractmethod
def period_benefit_jacobian_wrt_launches(self):
pass
@abc.abstractmethod
def profit(self,state,estimand_interface,constellation_number):
def _period_benefit_jacobian_wrt_launches(self):
pass
#TODO: Should I attach the jacobian here? It may simplify things.
#def jacobian()
class LinearProfit(ProfitFunctions):
class LinearProfit(EconomicAgent):
"""
The simplest type of profit function available.
"""
def __init__(self, benefit_weight, launch_cost, deorbit_cost=0):
def __init__(self, batch_size, constellation_number, discount_factor, benefit_weight, launch_cost, deorbit_cost=0, ):
self.batch_size = batch_size
#track which constellation this is.
self.constellation_number = constellation_number
#get the number of constellations (pull from the benefit weight, in the dimension that counts across constellations)
self.number_of_constellations = benefit_weight.size()[1]
#parameters describing the agent's situation
self.discount_factor = discount_factor
self.benefit_weights = benefit_weight
self.launch_cost = launch_cost
self.deorbit_cost = deorbit_cost
def profit(self, state, estimand_interface, constellation_number):
#Calculate the profit
profits = self.benefit_weights @ state.stock \
- self.launch_cost * estimand_interface.launches[constellation_number] #\
#- deorbit_cost @ estimand_interface.deorbits[constellation_number]
return profits
def __str__(self):
return "LinearProfit\n Benefit weights:\t{}\n launch cost:\t{}\n Deorbit cost:\t{}".format(self.benefit_weights, self.launch_cost, self.deorbit_cost)
def period_benefit(self,state,estimand_interface):
return self._period_benefit(state.stocks, state.debris, estimand_interface.choices)
def _period_benefit(self,stocks,debris,launches):
# multiply benefits times stocks
# sum across constellations
# reshape to standard dimensions
# subtract launch costs.
pass
def period_benefit_jacobian_wrt_states(self, states, estimand_interface):
return self._period_benefit_jacobian_wrt_states(states.stocks, states.debris, estimand_interface.launches)
def _period_benefit_jacobian_wrt_states(self, stocks, debris, launches):
jac = jacobian(self._period_benefit, (stocks,debris,launches))
return torch.cat((jac[0], jac[1]))
def period_benefit_jacobian_wrt_launches(self, states, estimand_interface):
return self._period_benefit_jacobian_wrt_launches(states.stocks, states.debris, estimand_interface.launches)
def _period_benefit_jacobian_wrt_launches(self,stocks,debris,launches):
jac = jacobian(self._period_benefit, (stocks,debris,launches))
return jac[2]
#other profit functions to implement
# price competition (substitution)
# military (complementarity)
############### TRANSITION AND OPTIMALITY FUNCTIONS #################
#Rewrite these to use the abstractiosn present earlier
#at some point I should wrap the two functions below into a class that holds various things
def single_transition(physical_model, economic_model, states, estimand_interface):
class OrbitalModel:
def __init__(self, number_debris_trackers, num_choice_variables):
pass
def single_transition(physical_model, economic_agent, states, estimand_interface):
"""
This function represents the inverted envelope conditions.
It allows us to describe the derivatives of the value function evaluated at time $t+1$ in terms based in time period $t$.
@ -248,33 +348,36 @@ def single_transition(physical_model, economic_model, states, estimand_interface
It returns the transitioned values
"""
#TODO: rewrite using the current abstractions
#possibly move jacobians of profit functions and physical models to those classes
#Transition the partials
#Get the discounted jacobian with respect to states
A = economic_agent.discount_factor * physical_model.transition_jacobian_wrt_states(states, estimand_interface)
f_theta = economic_agent.period_benefit_jacobian_wrt_states(states, estimand_interface)
T = estimand_interface.partial_vector(economic_agent.constellation_number) - f_theta
#need to do testing to nicely handle when A is non-invertible
#using linalg.solve because it has more numerical stability and is faster.
iterated_value_partials = torch.linalg.solve(A,T)
#transition the states
iterated_states = physical_model.transition(states, estimand_interface)
#The goal is to create a set of partials that I can throw into the appropriate
return iterated_value_partials, iterated_states
pass
def transition_wrapper(data_in): # Identify a way to eliminate this maybe?
"""
This function wraps the single transition and handles updating states etc.
"""
#TODO: rewrite using current abstractions
pass
#Optimality math
def optimality(stocks
,debris
,profit_fn
,laws_motion_fn
,neural_net
):
def optimality(physical_model, economic_agent, states, estimand_interface):
"""
This function takes in the
- stock levels
- debris levels
- profit function
- laws of motion
- results from the neural network
and returns the parts used to make up the optimality conditions
This takes the given models, states, and the estimand and returns the optimality condition.
"""
pass
fx = economic_agent.period_benefit_jacobian_wrt_launches(states, estimand_interface)
B = physical_model.transition_jacobian_wrt_launches(states, estimand_interface)
iterated_partials, iterated_state = single_transition(physical_model, economic_agent, states, estimand_interface)
return fx + economic_agent.discount_factor * B @ iterated_partials, iterated_partials, iterated_state

@ -0,0 +1,117 @@
\documentclass{article}
%Setup Subfiles
\usepackage{subfiles}
%Include preambles
\input{assets/preambles/MathPreamble}
%\input{assets/preambles/TikzitPreamble}
\input{assets/preambles/BibPreamble}
\input{assets/preambles/GeneralPreamble}
\title{Summary of my work on Satellite Constellations}
\author{William King}
\begin{document}
\maketitle
\section{Introduction}
\subfile{sections/00_Introduction} %Currently from past semesters.
%roughly done 2021-07-15
%Describe sections
The paper is organized as follows.
Section \ref{SEC:Models}, describes the laws of motion
governing satellites and debris(\ref{SEC:Laws})%,
%places limits on various measures of risk (\ref{SEC:Survival}),
and reviews various definitions of kessler syndrome in
the context of dynamic modeling, their advantages, and their disadvanteges (\ref{SEC:Kessler}).
It then describes the dynamic problem faced by constellation operators
(\ref{SEC:Operator}) and social planners (\ref{SEC:Planner}).
Section \ref{SEC:Computation} describes the computational approach and the
results are reported in \cref{SEC:Results}.
Section \ref{SEC:Conclusion} concludes with a discussion of limitations, concerns,
and remaining policy questions.
%\section{Modeling the Environment}\label{SEC:Environment}
\section{Model}\label{SEC:Models}
\subsection{Laws of motion}\label{SEC:Laws}
\subfile{sections/01_LawsOfMotion} %Roughly done 2021-07-15
%\subsection{Marginal survival rates}\label{SEC:Survival}
%\subfile{sections/03_SurvivalAnalysis} %roughly done 2021-07-14
% Thoughts on removal: This doesn't add much to the actual questions.
% It is interesting, but in a paper is just too much.
% I'll keep it here to add it back easily.
\subsection{Kessler Syndrome}\label{SEC:Kessler}
% Kessler syndrome follows laws of motion because it is the main
% threat of orbital pollution and needs to be included.
% Also, there is not really a better place to place it.
\subfile{sections/02_KesslerSyndrome} %roughly done before 2021-07-14
\subfile{sections/06_KesslerRegion} %roughly done before 2021-07-14
\subsection{Constellation Operator's Program}\label{SEC:Operator}
\subfile{sections/04_ConstellationOperator} %Reasonably done.
\subsection{Social Planner's Program}\label{SEC:Planner}
\subfile{sections/05_SocialPlanner} %Reasonably done?
\section{Computation}\label{SEC:Computation}
\subfile{sections/07_ComputationalApproach} %needs some clarifications.
\section{Results}\label{SEC:Results}
\subfile{sections/09_Results} %TODO
\section{Conclusion}\label{SEC:Conclusion}
\subfile{sections/08_Conclusion} %TODO
\newpage
\section{References}
\printbibliography
\newpage
\section{Appedicies}
\subsection{Mathematical Notation}
Needs completed.
%\subsection{Deriving Marginal Survival Rates}\label{APX:Derivations:SurvivalRates}
%\subfile{sections/apx_01_MarginalSurvivalRates}
\subsection{Deriving Euler Equations}\label{APX:Derivations:EulerEquations}
\subfile{sections/apx_02_GeneralizedEuEqSteps}
\subsection{Collected Assumptions and Caveats}\label{APX:CollectedAssumptions}
I hope to write a section clearly explaining assumptions, caveats, and shortcomings here.
These will later get written back into the other sections, but I want to collect them
in a single place first.
%time periods are long enough for debris to disperse after collisions.
%Only a single type of debris
%With my current computational idea; each constellation provides the same risk to each other constellation
% That can be easily adjusted in the computational models.
\newpage
%Just for simplicity, remove later
\tableofcontents
\end{document}
%%% Notes to keep track of
% Possible other things to investigate
% - Free-entry conditions: which of the following?
% - When for every operator, the current stocks imply no more launches(x = 0).
% - When for every type of operator, the current stocks plus an own stock of 0 imply no more launches(x = 0).
%
%
%
%
%
%
%
%
%
%
%
%
%

@ -73,20 +73,18 @@ and remaining policy questions.
\section{References}
\printbibliography
\newpage
\section{Appedicies}
\subsection{Mathematical Notation}
Needs completed.
%TODO: write appendicies
%\section{Appedicies}
%\subsection{Mathematical Notation}
%Needs completed.
%\subsection{Deriving Marginal Survival Rates}\label{APX:Derivations:SurvivalRates}
%\subfile{sections/apx_01_MarginalSurvivalRates}
\subsection{Deriving Euler Equations}\label{APX:Derivations:EulerEquations}
\subfile{sections/apx_02_GeneralizedEuEqSteps}
\subsection{Collected Assumptions and Caveats}\label{APX:CollectedAssumptions}
I hope to write a section clearly explaining assumptions, caveats, and shortcomings here.
These will later get written back into the other sections, but I want to collect them
in a single place first.
%\subsection{Collected Assumptions and Caveats}\label{APX:CollectedAssumptions}
%I hope to write a section clearly explaining assumptions, caveats, and shortcomings here.
%These will later get written back into the other sections, but I want to collect them
%in a single place first.
%time periods are long enough for debris to disperse after collisions.
%Only a single type of debris
%With my current computational idea; each constellation provides the same risk to each other constellation

@ -117,7 +117,7 @@ Specifically, I permit:
\begin{itemize}
\item Heterogeneous agent types including commercial, scientific, and military.
\item Asymetric constellations.
\item Inter- and intra- constellation risk is not assumed to be equal.
\item Inter- and intra- constellation risk to differ.
\end{itemize}
each of which are important qualities of the current orbital environment.
None of these aspects are considered in the papers that I have reviewed so far.

@ -18,8 +18,8 @@ subscripts $s_t$ denote time periods.
in period $t$
\item $D_t$ represents the level of debris at period $t$.
\end{itemize}
I've used curly braces (i.e. $\{ s^j_t \}$) to represent the set
of constellations' stocks.
I've used the capital letters $S_t$ and $X_t$ to represent the set (vector)
of constellations' stocks and policy decisions respectively.
\subsubsection{Satellite Stocks}
Each constellation consists of a number of satellites in orbit, controlled by the same operator and
@ -29,14 +29,15 @@ Of course, satellite stocks can be increased by launching more satellites.
Assuming satellites are not actively deorbited, we get the
following general law of motion for each constellation $i$.
\begin{align}
s^i_{t+1} = \left( 1 - l^i(\{s^j_t\}, D_t)\right)s^i_t + x^i_t
s^i_{t+1} = \left( R^i(S_t, D_t)\right)s^i_t + x^i_t
%Couple of Notes:
% This does not allow for natural decay of satellites.
% Nor does it include a deorbit decision.
% Representing those might be:
% - \eta s^i_t - y^i_t
\end{align}
Where $l^i(\cdot)$ represents the rate at which satellites are destroyed by collisions.
Where $R^i(\cdot)$ represents the constellation $i$'s survival rate, making
$1-R^1()$ the rate at which they are destroyed or damaged by collisions.
%Assumption:
\subsubsection{Collision Efficiencies}
@ -54,8 +55,8 @@ are operated for different purposes and require different orbital properties.
%This could be explained as Coordination across time (time travel doesn't exist yet)
This coordination is also complicated by the fact that constellations are not
designed nor launched at the same time.
Consequently an operator may choos to minimize their total risk when launching
a constellation, the later launch of constellations may lead to a suboptimal orbit design.
Consequently, while an operator may choose to minimize their total risk when launching
a constellation, the launch of later constellations may lead to a suboptimal orbit design.
It is important to note that satellite-on-satellite collisions are rare\footnote{
I am only aware of one collision between satellites,
and one of them was abandoned at the time.\cref{ListOfOrbitalIncidents}
@ -63,18 +64,14 @@ It is important to note that satellite-on-satellite collisions are rare\footnote
but this may be due to the fact that evasive maneuvers are usually taken
when collisions appear reasonably possible.
These collision efficiencies can be represented in the satellite destruction rate $l^i(\cdot)$ when:
These intra-collision efficiencies can be represented in the satellite survival rate $R^i(\cdot)$ as:
\begin{align}
\parder{l^i}{s^k_t}{} > 0 ~~\forall k \in \{1,\dots,N)\\
\parder{l^i}{s^j_t}{} > \parder{l^i}{s^i_t}{} ~~\forall j\neq i
\parder{R^i}{s^k_t}{} < 0 ~~\forall k \in \{1,\dots,N)\\
\parder{R^i}{s^j_t}{} < \parder{R^i}{s^i_t}{} ~~\forall j\neq i
\end{align}
Note that an additional satellite in any constellation increases the probability of loosing
a satellite from a given constellation, and this risk is lower
for the home constellation of the additional satellite.
Note that it is reasonable to assume that the loss of satellites to collisions should be
increasing in the level of debris: $\parder{l^i}{D_t}{} >0$.
Note that we assume that the loss of satellites to collisions is
increasing in the level of debris: $\parder{R^i}{D_t}{} < 0$.
\subsubsection{Debris}
Debris is generated by various processes, including:
@ -83,29 +80,28 @@ Debris is generated by various processes, including:
\item Satellite launches, operations, failures, or intentional destruction.
\item Collisions between
\begin{itemize}
\item Two satellites
\item A satellite and debris
\item Two pieces of debris
\item Two satellites.
\item A satellite and debris.
\item Two pieces of debris.
\end{itemize}
all generate more debris.
\end{itemize}
It leaves orbit when atmospheric drag slows it down enough to reenter the atmosphere.
Debris leaves orbit when atmospheric drag slows it down enough to reenter the atmosphere.
Because the atmosphere is negligible for many orbits, reentry can easily take decades
or centuries.
These effects can be represented by the following general law of motion.
\begin{align}
D_{t+1} = (1-\delta)D_t + g(D_t) + \gamma(\{s^j_t\},D_t) + \Gamma(\{x^j_t\})
D_{t+1} = (1-\delta)D_t + g(D_t) + \gamma(S_t,D_t) + \Gamma(X_t)
\end{align}
For simplicity, I formulate this more specifically as:
\begin{align}
D_{t+1} = (1-\delta)D_t + g(D_t)
+ \sum^N_{i=1} \gamma l^i(\{s^j_t\},D_t)
+ \Gamma \sum^n_{j=1} \{x^j_t\}
D_{t+1} = (1-\delta + g)D_t
+ \gamma \sum^N_{i=1} (1-R^i(S_t,D_t)) \cdot s^i_t
+ \Gamma \sum^n_{j=1} x^i_t
\end{align}
where $ \Gamma, \gamma$ represent the debris generated by each
launch and collision respectively,
while $\delta,g(\cdot)$ represent the decay rate of debris and the
launch and collision respectively.
Similarly $\delta$ and $g$ represent the decay rate of debris and the
autocatalysis\footnote{
Using terminology from \cite(RaoRondina2020).
} of debris generation.

@ -10,7 +10,7 @@ A few methods have been used to model this behavior in the economics literature.
The first one I want to explain was developed by \cite{Adilov2018}.
They characterize kessler syndrome as the point in time at which an orbit is
unusable as each satellite in orbit will be destroyed within a single time period.
In my notation, this is that $l^i(\{s^j_t\}, D_t) = 1$.
In my notation, this is that $R^i(S_t, D_t) = 0 ~ \forall i$.
The benefit of this approach is that it is algebraically simple.
It was used in to show that firms will stop launching before
orbits are rendered physically useless.
@ -22,11 +22,12 @@ They define it in terms of a ``kessler region'', the set of satellite stocks and
such that the limit of debris in the future is infinite.
Mathematically this can be represented as:
\begin{align}
\kappa = \left\{ \{s^j_t\}, D_t :
\lim_{k\rightarrow \infty} D_{t+k}\left(\{s^j_{t+k-1}\}, D_{t+k-1}, \{x^j\}\right) = \infty \right\}
\kappa = \left\{ S_t, D_t :
\lim_{k\rightarrow \infty} D_{t+k}\left(S_{t+k-1}, D_{t+k-1}, X_t\right)
= \infty \right\}
\end{align}
There are a few issues with this approach, even though it captures the essence of kessler syndrome
better than the definition proposed by Adilov et al.
better than the definition proposed by \cite{Adilov2018}.
The issues it faces are generally the case of not delineating between kessler regions
with significantly different economic outcomes.
% doesn't account for speed of divergence
@ -38,7 +39,7 @@ The former is a global emergency, while the latter is effectively non-existant.
The last disadvantage I'd like to mention is that determining whether a
series is divergent depends on constructing mathematical proofs.
This makes it difficult to computationally identify whether a given state
constitutes as kessler syndrome.
constitutes is in the kessler region.
@ -49,13 +50,15 @@ fashions than \cite{RaoRondina2020}, for which I term the regions
First, define the $\epsilon$-kessler region as:
\begin{align}
\kappa = \left\{ \{s^j_t\}, D_t :
\kappa_\epsilon = \left\{ S_t, D_t :
\forall k \geq 0, D_{t+k+1} - D_{t+k} \geq \epsilon > 0 \right\}
\end{align}
%show that this is similar to saying that all non \epsilon kessler regions are bounded by the
%derivative, i.e. are lipshiz
The continuous time equivalent of this condition is defining the non-kessler regions by
an upper bound on the derivative of debris generation\footnote{A lipshitz-like condition}.
an upper bound on the derivative of debris generation\footnote{
Note that the non-proto-kessler region is defined by a lipshitz-like condition
}.
It is easily shown that this criteria is sufficient to guarantee Rao and Rondina's criteria.
@ -84,45 +87,45 @@ of the kessler region would capture this behavior, but the $\epsilon$-kessler de
would not.
A particularly pathological case is where debris cycles between just below the cutoff level to
significantly above the cutoff, leading to a highly divergent behavior not captured by this definition.
As far as computability goes, by simulating a phase diagram (for a given solution to the model)
Also, by simulating a phase diagram (for a given solution to the model)
we can determine what sections are in the $\epsilon$-kessler region.
This is a major benefit in a computational model.
A related and more general concept is the ``proto-kesslerian'' region, which is
defined as the stock and debris levels such that:
\begin{align}
\kappa = \left\{ \{s^j_t\}, D_t :
\kappa_\text{proto} = \left\{ S_t, D_t :
D_{t+1} - D_{t} \geq \varepsilon > 0 \right\}
\end{align}
%Note that the debris level is in a $\epsilon$-kessler region when it is in a proto-kesslerian region
%for all future periods.
This even simpler to compute than the phase diagram, and can be used to generate a topological view
of proto-kesslerian regions of degre $\varepsilon$.
of various proto-kesslerian regions.
%These are both easier to interpret and various approaches could be used to analyze how debris levels
%transition between them.
%%%what would the integral of gradients weighted by the dividing line measure? just a thought.
%Other thoughts
% proto-kesslerian paths, paths that pass into a proto kesslerian region.
In order to capture the cyclic behavior that $\epsilon$-kessler regions miss, we can define a type of
path in the phase diagram called a proto-kesslerian path of degree $\epsilon$, which is any path
In order to capture the cyclic behavior that $\epsilon$-kessler regions miss,
we can define a type of
path in the phase diagram (called a proto-kessler path of degree $\epsilon$), which is any path
that enters the region.
For example, one could simulate a phase diagram and compare paths that fall into a given $\epsilon$-kessler region
and paths that only temporarily pass into the equivalent proto-kesslerian regions.
Comparing the number of paths that fall into each region may give a useful metric for policies that are
Comparing the number of paths that fall into each region may give a useful metric
for policies that are
designed to decrease the likelihood of kessler syndrome.
I believe, but have not verified, that some choices of $\varepsilon$, although permitting cycles,
would relegate them to levels with minimal economic impact.
%Maybe can be studies by phase or flow diagrams?
%Consider where it cycles between just below epsilon and then to a large increase in debris?
%I believe, but have not verified, that some choices of $\varepsilon$, although permitting cycles,
%would relegate them to levels with minimal economic impact.
%Area of research: What makes a good \epsilon?
This leads to the important question of ``What makes a good value of $\epsilon$ or $\varepsilon$?''
One method, in the spirit of \cite{Adilov2018}, is to choose a change in debris, $D_{t+1} - D_t$, such that
the loss of satellites in periods $t+1$ to $t+k$ is increased by or to a certain percentage, say 1\%.
One method, in the spirit of \cite{Adilov2018},
is to choose a change in debris, $D_{t+1} - D_t$,
such that the loss of satellites between periods $t$ to $t+k$ is
increased by or to a certain percentage, say 1\%.
I've put very little thought into addressing this general question so far,
and need to analyze the implications of different choice rules.

@ -9,7 +9,7 @@ Actual functional specifications are described in \cref{SEC:Computation} on comp
Each operator recieve per-period benefits
-- such as profits for firms and warfighting capability for militaries --
from their constellation
according to $u^i(\{s^j_t\},D_t)$, which depends
according to $u^i(S_t,D_t)$, which depends
on the current sizes of constellations and the level of debris.
In addition, the operator pays for the launch of $x^i_t$ satellites
according to a general cost function $F(x)$.
@ -17,12 +17,12 @@ These satellites will become operational in the subsequent period.
Thus the $M$-period (possibly infinite), problem is:
\begin{align}
\max_{\{\vec x_t\}^M}&~
E\left[ \sum^M_{t=0} \beta^t u^i(\vec s_t, D_t) - F(x^i_t) \right] \\
\max_{\{x_t^i\}^M}&~
\left[ \sum^M_{t=0} \beta^t u^i(S_t, D_t) - F(x^i_t) \right] \\
&\text{subject to:}\\
& s^i_{t+1} = (1-l^i(\vec s_t, D_t))s^i_t +x^i_t ~~~ \forall i \\
& D_{t+1} = (1-\delta)D_t + g(D_t)
+ \gamma \sum^N_{i=1} l^i(\vec s_t, D_t)
& s^j_{t+1} = R^j(S_t, D_t) s^j_t + x^j_t ~~~ \forall j \\
& D_{t+1} = (1-\delta + g) D_t
+ \gamma \sum^N_{i=1} \left( 1-R^i(S_t, D_t) \right) s^i_t
+ \Gamma \sum^N_{i=1} x^i_t
\end{align}
%Assumptions
@ -33,106 +33,12 @@ Thus the $M$-period (possibly infinite), problem is:
%\subsection{Infinite Period (Bellman) Equation} % Not sure how much help a new header is.
The inifinite period version of the problem above can be rewritten in the bellman form as
\begin{align}
V^i(\vec s_t, \vec x^{\sim i}_t, D_t) = \max_{x^i_t} u^i(\vec s_t, D_t) -F(x)
+ \beta \left[ V^i(\vec s_{t+1}, \vec x^{\sim i}_{t+1}, D_{t+1}) \right]
V^i(S_t, x^{\sim i}_t, D_t) = \max_{x^i_t} u^i(S_t, D_t) -F(x^i_t)
+ \beta \left[ V^i(S_{t+1}, x^{\sim i}_{t+1}, D_{t+1}) \right]
\end{align}
where $x^{\sim i}_t$ represents the launch decisions of all the other constellation
operators.
This implies that the policy function is a best response function, allowing for
One important point is that the policy function is a best response function, allowing for
a nash equilibrium interpretation of the result.
To solve for the policy function, we have a variety of methods available.
Due to the computational method chosen later, I'm going to examine the conditions
for the existence of an euler equation.
\subsubsection{Euler Equation}
Appendix \cref{APX:Derivations:EulerEquations} contains more details
on the math involved.
What follows is just a sketch of the method in matrix notation.
As there is only one choice variable, we get a single optimality condition.
It can be written in various formats, with the latter matching the appendix the best.
\begin{align}
% 0 =& \parder{}{x^i_t}{} u^i(\vec s_t, D_t) -\parder{}{x^i_t}{}F(x)
% + \beta \left[ \parder{}{x^i_t}{}
% V^i(\vec s_{t+1}, \vec x^{\sim i}_{t+1}, D_{t+1})
% \right] \\
0 =& -\der{F}{x^i_t}{}
+ \beta \left[
\nabla_{x^i_t} [ \vec s_{t+1}, \vec x^{\sim i}_{t+1}, D_{t+1} ]
\cdot
\nabla_{\vec s_{t+1}, \vec x^{\sim i}_{t+1}, D_{t+1}}
V^i(\vec s_{t+1}, \vec x^{\sim i}_{t+1}, D_{t+1})
\right] \label{EQ:OptimalityCondition}\\
0 =& -\der{F}{x^i_t}{} + \beta \vec a(\vec s_t,D_t) \cdot \nabla V^i_{t+1}
\label{EQ:SimplifiedOptimalityCondition}\\
=& - f_{x_t} + \beta \vec a_t \cdot \nabla V^i_{t+1}
\end{align}
As there are $N$ constellations we get $N$ satellite stocks,
$N-1$ decisions $x^{\sim i}$,
and $1$ debris state for a total of $2N$ state
variables\footnote{recall that $N$ is the number of constellations.}.
Thus there are $2N$ envelope conditions to be found:
\begin{align}
% \nabla_{\vec s_t, \vec x^{\sim i}_t, D_t} V^i(\vec s_t, \vec x^{\sim i}_t, D_t)
% =& \nabla_{\vec s_t, \vec x^{\sim i}_t, D_t} u^i(\vec s_t, D_t) \notag \\
% &+ \beta \left[
% \nabla_{\vec s_{t+1}, \vec x^{\sim i}_{t+1}, D_{t+1} }
% V^i(\vec s_{t+1}, \vec x^{\sim i}_{t+1}, D_{t+1})
% \cdot
% \nabla_{\vec s_t, \vec x^{\sim i}_t, D_t}
% [ \vec s_{t+1}, \vec x^{\sim i}_{t+1}, D_{t+1} ]
% \right] \label{EQ:EnvelopeConditions}
% \\
\nabla_{\vec s_t, \vec x^{\sim i}_t, D_t} V^i(\vec s_t, \vec x^{\sim i}_t, D_t)
=
\nabla \vec V^i_t
= \vec u^i
+ \beta B_t \cdot \nabla \vec V^i_{t+1}
\label{EQ:SimplifiedEnvelopeConditions}
\end{align}
%When interpreting this, note that
% $$
% \nabla \vec V^i_{t+1} = \nabla_{[\vec s_{t+1}~ \vec x^{\sim i}_{t+1}~ D_{t+1}] }
% V^i(\vec s_{t+1}, \vec x^{\sim i}_{t+1}, D_{t+1})
% $$
% is a $2N \times 1$ vector of first derivatives but
% $$
% A = \nabla_{\vec s_t, \vec x^{\sim i}_t, D_t}
% [ \vec s_{t+1}~ \vec x^{\sim i}_{t+1}~ D_{t+1} ]
% $$
% is a $2N \times 2N$ matrix of first derivatives.
% By solving for $\vec V^i_{t+1}$ as a function of $\vec V^i_{t}$ we get the
% intertemporal condition:
% \begin{align}
% \frac{1}{\beta} A^{-1} \left(\nabla \vec V^i_t - \vec u^i_t \right)
% = \nabla \vec V^i_{t+1}
% \end{align}
% Thus one crucial condition for the existence of a solution is that $A^{-1}$ exists for
% all values the laws of motion and choice functions can take.
% \subsection{Existence}
% I need to do some more diving into conditions for existence.
% Of particular concern is that the way I have specified the debris may lead to
% non-convergence.
%
To finish constructing the euler equation, we would use the intertemporal
transition function \cref{EQ:SimplifiedEnvelopeConditions} and iterated
versions of \cref{EQ:OptimalityCondition,EQ:SimplifiedOptimalityCondition}
to construct the $2N+1$ euler equations.\footnote{Double check numbers}
Note that for even a small number of agents -- e.g. 3 -- this iterated substitution
becomes relatively complex, requiring caculating an iterated intertemporal tranisition
function and laws of motion 6 times.
To solve this symbolicly involves inverting a $6 \times 6$ matrix.
As matrix inversion has approximately an $O(n^3)$ computational complexity,
this becomes unsustainable very quickly.
Section \cref{SEC:Computation} describes how to address this issue to generate
these euler equations using features of modern programming languages and linear algebra
libraries.
\end{document}

@ -4,13 +4,13 @@
\begin{document}
The Social (Fleet) Planner's problem can be written in the bellman form as:
\begin{align}
W(\vec s_t, D_t) =& \max_{\vec x_t} \left[
\left(\sum^N_{i=1} u^i(\vec s_t, D_t) - F(x^i_t) \right)
+ \beta \left[ W(\vec s_{t+1}, D_{t+1}) \right]\right] \notag \\
W(S_t, D_t) =& \max_{X_t} \left[
\sum^N_{i=1} \left( u^i(S_t, D_t) - F(x^i_t) \right)
+ \beta \left[ W(S_{t+1}, D_{t+1}) \right]\right] \notag \\
&\text{subject to:} \notag \\
& s^i_{t+1} = (1-l^i(\vec s_t, D_t))s^i_t +x^i_t ~~~ \forall i \notag \\
& D_{t+1} = (1-\delta)D_t + g(D_t)
+ \gamma \sum^N_{i=1} l^i(\vec s_t, D_t)
& s^i_{t+1} = (R^i(S_t, D_t)) s^i_t +x^i_t ~~~ \forall i \notag \\
& D_{t+1} = (1-\delta + g)D_t
+ \gamma \sum^N_{i=1} \left(1-R^i(\vec s_t, D_t)\right) s^i_t
+ \Gamma \sum^N_{i=1} x^i_t
\end{align}
%Some particular features of the model include:
@ -21,44 +21,8 @@ The Social (Fleet) Planner's problem can be written in the bellman form as:
% including uncontrolled deorbits.
Although the social planner controls each constellation, note that they do not reap additional
collision avoidance efficiencies.
this is because no social planner could concieve of every use of orbit
at any single point in time, and thus constellations may be designed sequentially.
This allows only the intra-constellation benefits to be achived.
\subsubsection{Euler Equation}
In accordance with Appendix \cref{APX:Derivations:EulerEquations},
we find the $N$ optimality conditions:
\begin{align}
0 =& -\der{F(x^i_t)}{x^i_t}{}
+ \beta \left[
\nabla_{\vec s_{t+1}, D_{t+1}} W(\vec s_{t+1}, D_{t+1})
\cdot
\parder{}{x^I_t}{}[\vec s_{t+1} ~ D_{t+1}]
\right]
~~\forall~~i
\end{align}
Which in vector form is:
\begin{align}
0 =& -\vec f_x +\beta \left[B\cdot \nabla W_{t+1} \right]
\end{align}
Similarly, the $N+1$ envelope conditions are:
\begin{align}
% \nabla_{\vec s_{t}, D_{t}} W(\vec s_t, D_t) =&
% \sum^N_{i=1} \nabla_{\vec s_{t}, D_{t}} u^i(\vec s_t, D_t)
% %- \der{}{x^i_t}{}F(x^i_t) \nabla_{\vec s_{t}, D_{t}}x^i_t %This equals zero due to the envelope theorem
% \notag \\
% &+ \beta \left[ \nabla_{\vec s_{t+1}, D_{t+1}} W(\vec s_{t+1}, D_{t+1})
% \cdot \nabla_{\vec s_{t}, D_{t}} [\vec s_{t+1} ~ D_{t+1}]
% \right] \\
\nabla W_t =& \vec U + \beta \left[C \cdot \nabla W_{t+1} \right]
\end{align}
Which gives us the iteration format
\begin{align}
\nabla W_{t+1} =& (\beta C)^{-1} \cdot \left(\nabla W_t - \vec U \right)
\end{align}
Thus two iterations of the optimality condition are needed, but only to provide $N+1$ binding conditions.
This lets us discard $N-1$ of the conditions from the second iteration of the optimality condition.
% NEed to explain better. Not quite true.
One justification is that no social planner could concieve of every future use of an orbit
and consequentally constellations will be designed sequentially.
This prevents intra-constellation benefits to be achieved across the entire fleet.
\end{document}

@ -6,7 +6,9 @@
With the definitions of kessler syndrome and the law of debris given above, we can now
explicitly describe the proto-kessler region.
\begin{align}
\epsilon < -\delta D_t + g(D_t) + \gamma \sum^n_{j=1} l^i(\{s^j_t\},D_t) + \Gamma \sum^n_{j=1} \{x^j_t\}
\epsilon < (g - \delta) D_t
+ \gamma \sum^n_{j=1} \left( 1-R^i(S_t,D_t) \right) s^i_t
+ \Gamma \sum^n_{j=1} x^j_t\}
\end{align}
As being in the proto-kessler region is a prerequesit to being in the kessler region, we see that
the kessler region depends on the collision rates of the constellation operators.

@ -2,147 +2,166 @@
\graphicspath{{\subfix{Assets/img/}}}
\begin{document}
The computational approach I have decided to take is an application of
\cite{Maliar2019}, where the policy function is approximated using a
neural network.
The approach uses the fact that the euler equation implicitly defines the
optimal policy function, for example:
$[0] = f(x(\theta),\theta)$.
This can easily be turned into a mean square objective function,
$0 = f^2(x(\theta),\theta)$,
allowing one to find $x(\dot)$ as the solution to a minimization problem.
The computational approach I take is based on
\cite{Maliar2019}'s Bellman Residual Minimization, with the
policy and value functions are approximated using a neural network.
In summary the bellman equation is rewritten in the form:
\begin{align}
Q = V(S_T,D_t) - F(S_t,D_t,X_t(S_t,D_t)) -\beta V(S_{t+1},D_{t+1})
\end{align}
With a policy maximization condition such as:
\begin{align}
M = \left[ F(S_t,D_t,X_t(S_t,D_t)) + \beta V(S_{t+1},D_{t+1})\right]
\end{align}
In the deterministic case, a loss function can be constructed in
either of the following equivalent cases:
\begin{align}
\phi_1 = Q^2 - vM \\
\phi_2 = \left (M - Q - \frac{v}{2}\right)^2 - v \cdot \left(Q + \frac{v}{4}\right)
\end{align}
where $v$ is an external weighting parameter which can be cross validated.
By choosing a neural network as the functional approximation, we are able to
use the fact that a NN with a single hidden layer can be used to approximate
functions arbitrarily well
under certain conditions \cref{White1990}.
under certain conditions \autocite{White1990}.
We can also
take advantage of the significant computational and practical improvements
currently revolutionizing Machine Learning.
In particular, we can now use common frameworks, such as python, PyTorch,
and various online accerators (Google Colab)
which have been optimized for relatively high performance and
straightforward development.
Some examples include the use of specialized hardware and the ability to transfer
learning between models, both of which can speed up functional approximation.
\subsection{Computational Plan}
I have decided to use python and the PyTorch Neural Network library for this project.
The neural network library I've chosen to use is Flux.jl \cite{Innes2018}
a Neural Network library implmented in and for the Julia language,
although the Bellman Residual Minimization algorithm would work equally well in
PyTorch or TensorFlow
\footnote{
The initial reason I investigated Flux/Julia is due to the source to source
Automatic Differentiation capabilities, which I intended to use to implement
a generic version of \cite{Maliar2019}'s euler equation iteration method.
While I still believe this is possible and that Flux represents one of the
best tools available for that specific purpose,
I've been unsuccessful at implementing the algorithm.
}.
Below I note some of the design, training, and implementation decisions.
%Data Description
The data used to train the network is simulated data, pulled from random distributions.
One advantage of this approach is that by changing the distribution, the emphasis
in the training changes.
Initially training can be focused on certain areas of the state space, but later
training can put the focus on other areas as their importance is recognized.
In the case that we don't know which data areas to investigate, it is possible to
optimize over a given dataset, and the iterate stocks and debris forward
many periods.
If the debris and stocks don't line up well with the initial training dataset,
we can change the distribution to cover the stocks and debris from the iteration,
thus bootstrapping the distribution of the training set.
\subsubsection{Constellation Operators}
%Operators
% Branched Policy Topology
% Individual Value functions
% Training Loop
Although there are multiple operators, the individual policy functions
show up jointly as the code is currently implemented.
For this reason, I've implemented each operator's policy function
as a ``branch'' within a single neural network.
These branches are configured such that they each recieve the same
inputs (stocks and debris), but decisions in each branch are made without reference
These results are then concatenated together into the final policy vector.
When training a given operator, the appropriate branch is unfrozen so that operator can train.
Value functions are implemented as unique neural networks at the constellation operator level,
much like the operator's bellman residual function.
The training loops take the form of:
For each epoch
\begin{enumerate}
\item generate data
\item for each operator
\begin{enumerate}
\item Unfreeze branch
\item Train policy function on data
\item Freeze branch
\item Train Value function on data
\end{enumerate}
\item Check termination conditions
\end{enumerate}
Overall, this allows for each operator's policy and value functions to be approximated
on it's own bellman residuals, while maintaining a convenient interface.
\subsubsection{Planner}
%Planner
% policy topology
% Value function topology
% Training loop
The policy function for the Fleet Planner does not require any separate branches,
although it could if desired for comparison purposes.
The key point though, is that no parameter freezing is done during training,
allowing the repercussions on other constellations to be taken into account.
Similarly there is a single neural network used to estimate the value function.
The training loops take the form of:
For each epoch
\begin{enumerate}
\item generate data
\begin{enumerate}
\item Train policy function on data
\item Train Value function on data
\end{enumerate}
\item Check termination conditions
\end{enumerate}
\subsubsection{Heterogeneous Agents and Nash Equilibria}
One key question is how to handle the case of heterogeneous agents.
In the processes outlined above, the heterogeneous agents are simply
identified by their position in the state and action vectors and
then the NN learns how to operate with each of them\footnote{
I believe it may be possible to create some classifications of
different heterogeneous agent types that allows for simpler function transfers,
but the implementation will take some extensive code design work.
}.
The most difficult step is creating the euler equations.
When working with high dimensioned problems involving differentiation,
three general computational approaches exist:
\begin{itemize}
\item Using a symbolic library (sympy) or language (mathematica) to create the
euler equations.
This has the disadvantage of being (very) slow, but the advantage that
for a single problem specification it only needs completed once.
It requires taking a matrix inverse, which can easily complicate formulas
and is computationally complex, approximately $O(n^3)$ algorithm.
\item Using numerical differentiation (ND).
The primary issue with ND is that errors can grow quite quickly when
performing algebra on numerical derivatives.
This requires tracking how errors can grow and compound within your
specific formulation of the problem.
\item Using automatic differentiation (AD) to differentiate the computer code
directly.
This approach has a few major benefits.
\begin{itemize}
\item Precision is high, because you are calcuating symbolic
derivatives of your computer functions.
\item ML is heavily dependent on AD, thus the tools are plentiful
and tested.
\item The coupling of AD and ML lead to a tight integration with
the neural network libraries, simplifying the calibration procedure.
\end{itemize}
\end{itemize}
I have chosen to use the AD to generate a euler equation function, which will
then be the basis of our objective function.
The first step is to construct the intertemporal transition functions
(e.g \ref{put_refs_here}).
%Not sure how much detail to use.
%I'm debating on describing how it is done.
These take derivatives of the value function at time $t$ as an input, and output
derivatives of the value function at time $t+1$.
Once this function has been finished, it can be combined with the laws of motion
in an iterated manner to transition between times $t$ and times $t+k$.
I did so by coding a function that iteratively compose the transition
and laws of motion functions, retuning a $k$-period transition function.
The second step is to generate functions that represent the optimality conditions.
By taking the appropriate derivatives with respect to the laws of motion and
benefit functions, this can be constructed explicitly.
Once these two functions are completed, they can be combined to create
the euler equations, as described in appendix \ref{APX:Derivations:EulerEquations}.
%%% Is it FaFCCs or recursion that allows this to occur?
%%% I believe both are ways to approach the problem.
%\paragraph{Functions As First Class Citizens}
%The key computer science tool that makes this possible is the concept
%of ``functions as first class citizens'' (FaFCCs).
%In every computer language there are primitive values that functions
%operate on.
%When a language considers FaFCCs, functions are one of the primitives
%that functions can operate on.
%This is how we can get
%AD in pytorch does not work by FaFCC though, instead constructing a computational graph.
\paragraph{Training}
With the euler equation and resulting objective function in place,
standard training approachs can be used to fit the function.
I plan on using some variation on stochastic gradient descent.
Normally, neural networks are trained on real world data.
As this is a synthetic model, I am planning on training it on random selections
from the state space.
If I can data on how satellites are and have been distributed, I plan on
selecting from that distribution.
\paragraph{Heterogeneous Agents}
One key question is how to handle the case of heterogeneous agents.
When the laws of motion depend on other agents' decisions, as is the case
described in \ref{SEC:Laws}, intertemporal iteration may
require knowing the other agents best response function.
I believe I can model this in the constellation operator's case
by solving for the policy functions of each class of operator
simultaneously.
I would like to verify this approach as I have not dived into
some of the mathemeatics that deeply.
When the laws of motion depend on other agents' decisions, the opportunity
for Nash and other game theoretic equilibria to arise.
One benefit of using neural networks is that they can find standard equilibrium concepts,
including mixed nash equilibria if configured properly.
%concerns about nash computability
\subsection{Functional Forms}
The simpleset functional forms for the model are similar to those in
\autocite{RaoRondina2020}, giving:
The reference functional forms for the model are similar to those
given in \cite{RaoRondina2020}.
\begin{itemize}
\item The per-period benefit function:
\item The linear per-period benefit function:
\begin{align}
u^i(\{s^j_t\}, D_t) = \pi s^i_t
u^i(S_t, D_t, X_t) = \pi s^i_t - f \cdot x^i_t
\end{align}
\item The launch cost function:
\item Each constellation's satellite survival function:
\begin{align}
F(x^i_t) = f \cdot x^i_t
\end{align}
\item The satellite destruction rate function:
\begin{align}
l^i(\{s^j_t\}, D_t) = 1 - e^{- d\cdot D_t - \sum^N_{j=1} h^j s^j_t}
\end{align}
\item The debris autocatalysis function:
\begin{align}
g(D_t) = g\cdot D_t
\\
g > 1
R^i(S_t, D_t) = e^{- d\cdot D_t - \sum^N_{j=1} h^j s^j_t}
\end{align}
\end{itemize}
\subsection{Existence concerns}
\subsubsection{Parameter Values}
%I'm just guessing.
Currently, I've not found a way to estimate the proper parameters to use,
and there needs to be a discussion of how to calibrate those parameters.
So far, my goal is to choose parameters with approximately
the correct order of magnitude.
%\subsection{Existence concerns}
%check matrix inverses etc.
%
I am currently working on a plan to guarantee existence of solutions.
Some of what I want to do is check numerically crucial values and
mathematically necessary conditions for existence and uniqueness.
Unfortunately this is little more than just a plan right now.
%I am currently working on a plan to guarantee existence of solutions.
%Some of what I want to do is check numerically crucial values and
%mathematically necessary conditions for existence and uniqueness.
%Unfortunately this is little more than just a plan right now.
\end{document}

@ -2,24 +2,30 @@
\graphicspath{{\subfix{Assets/img/}}}
\begin{document}
So far, I have not been able to actually analyze any models,
but the following are cases of interest.
I am finishing the implementation of the analysis tools, and so I don't have any results yet.
Some analyses of interest are:
\begin{itemize}
\item Identify the Proto-Kessler regions.
\item Perform $\epsilon$-Kessler region transition analysis.
\item Check policy functions for non-entry regions of the state space.
\end{itemize}
Some situations I'm interested in implementing are:
\begin{itemize}
\item Reproduce Rao-Rondina single satellite model.
\item Reproduce Rao-Rondina linear profits model.
\item Reproduce Adilov's cornot-like market.
\item Add military operators to Adilov or Rao's model.
This will involve some sort of competitive complementarity
with diminishing marginal returns.
\item Add military operators who need to ``Keep up with the Jones''
\item Competitive market where the number of satellites improves quality, i.e. allows
for pricing differences (Orbital Internet, e.g. Starlink).
\item Interacting orbital shells, using a vector representation of heterogeneous risk
imposed by constellations and debris.
\item Add a deorbit choice variable to the model.
for pricing differences (Orbital Internet, e.g. Starlink vs OneWeb vs Amazon Kepler).
\item Interacting orbital shells, using a vector representation of debris.
\item Add deorbit and parking choice variables to the model.
\end{itemize}
Among these, policies that would be interesting to analyse include:
\begin{itemize}
\item Launch and Operation Taxes
\item Pigouvian Launch and Operation Taxes
\item Deorbit-contingent bonds, similar to environmental cleanup bonds
in mining operations.
\end{itemize}

@ -0,0 +1,13 @@
{
// See https://go.microsoft.com/fwlink/?LinkId=827846 to learn about workspace recommendations.
// Extension identifier format: ${publisher}.${name}. Example: vscode.csharp
// List of extensions which should be recommended for users of this workspace.
"recommendations": [
],
// List of extensions recommended by VS Code that should not be recommended for users of this workspace.
"unwantedRecommendations": [
]
}

@ -0,0 +1,4 @@
{
"python.pythonPath": "/bin/python3",
"editor.detectIndentation": false
}

@ -0,0 +1,945 @@
### A Pluto.jl notebook ###
# v0.17.0
using Markdown
using InteractiveUtils
# ╔═╡ cc16838c-3b25-11ec-2489-11c3d35f26f4
using Flux,LinearAlgebra,Zygote ,PlutoUI
# ╔═╡ 458bb826-4eaf-42ca-b889-4d1c50a2ecae
using Flux.Optimise: AbstractOptimiser
# ╔═╡ 7d6afccb-c45e-4371-827a-43d588d4945f
md"""
# Actor Critic Model
This is an implementation of an optimizer for an actor critic model.
"""
# ╔═╡ a810a371-d2e4-4d4f-9cf6-1b98582c4b0f
md"""
## Physical Model
"""
# ╔═╡ 2e59f530-aeb0-4ce1-9312-e81a66520181
abstract type AbstractPhysicalModel end
# ╔═╡ c57e9967-75e8-47d2-836e-f28159dc6592
#setup physical model
struct BasicModel <: AbstractPhysicalModel
#rate at which debris hits satellites
debris_collision_rate::Real
#rate at which satellites of different constellations collide
satellite_collision_rates::Matrix{Float64}
#rate at which debris exits orbits
decay_rate::Real
#rate at which satellites
autocatalysis_rate::Real
#ratio at which a collision between satellites produced debris
satellite_collision_debris_ratio::Real
#Ratio at which launches produce debris
launch_debris_ratio::Real
end
# ╔═╡ 4153bfd6-2ec6-4025-93ba-8e6e061809e8
md"""
## Setup NeuralNets
"""
# ╔═╡ bbc7e4b0-5edb-4e5d-9512-1aaeae1bb4ec
md"""
Custom function to zero out parameter traces
"""
# ╔═╡ 533250c6-fd76-47ef-a561-d567da04ae61
# ╔═╡ a305c33f-3388-4120-b036-3093c3ed6aa3
md"""
## setup Actor Critic struct and loop
"""
# ╔═╡ 0d375c34-3226-40b6-adaf-2f531c00d3ac
function zero(a::Flux.Params)
Flux.Params([Base.zero(x) for x=a])
end
# ╔═╡ e79e4ecf-caec-4c1a-9626-0c1a8b5006ce
mutable struct ActorCritic
#parameters
λʷ::Real
λᶿ::Real
αʷ::Real
αᶿ::Real
αᴿ::Real
#keep track of eligibility traces
zᶿ::Flux.Params
::Flux.Params
#keep track of update rate
::AbstractFloat
#Inside generator
ActorCritic(
λʷ::Real
,λᶿ::Real
,αʷ::Real
,αᶿ::Real
,αᴿ::Real
,θ::Flux.Params
,w::Flux.Params
,R::Real
) =
begin
= zero(θ) #custom zero handles params
zw = zero(w) #custom zero handles params
new(λʷ,λᶿ,αʷ,αᶿ,αᴿ,,zw,R)
end
end
# ╔═╡ 0ac4ac61-0ca2-4ed6-8855-85c9d1939afa
md"""
# Example use: Model setup
"""
# ╔═╡ 7102c12c-581b-4204-9986-741889cb03f6
#implement tranistion function
#percentage survival function
function survival(
stocks
,debris
,physical_model::AbstractPhysicalModel
)
return exp.(-physical_model.satellite_collision_rates*stocks .- (physical_model.debris_collision_rate*debris))
end
# ╔═╡ 283bdd91-d742-4f4b-a19b-2756ac01ce2c
#stock update rules
function G(
stocks::Vector
,debris::Vector
,launches::Vector
, physical_model::AbstractPhysicalModel
)
return LinearAlgebra.diagm(survival(stocks,debris,physical_model) .- physical_model.decay_rate)*stocks + launches
end
# ╔═╡ 8a3f9518-ae0a-46f3-b37d-1017ce99c70d
#debris evolution
function H(stocks,debris,launches,physical_model)
#get changes in debris from natural dynamics
natural_debris_dynamics = (1-physical_model.decay_rate+physical_model.autocatalysis_rate) * debris
#get changes in debris from satellite loss
satellite_loss_debris = physical_model.satellite_collision_debris_ratio * (1 .- survival(stocks,debris,physical_model))'*stocks
#get changes in debris from launches
launch_debris = physical_model.launch_debris_ratio*sum(launches)
#return total debris level
return natural_debris_dynamics .+ satellite_loss_debris .+ launch_debris
end
# ╔═╡ e4c8abfa-e96c-44b7-9858-af46adc1164a
#implement reward function
begin
const payoff = 3*LinearAlgebra.I #- 0.02*ones(N_constellations,N_constellations)
#Define the market profit function
F(stocks,debris,launches) = (stocks - 3.0*launches .+ (debris*-0.2))[1]
end
# ╔═╡ 42be8352-7ecf-4da4-bff0-61e328144ed1
#test
F(4,2,2)
# ╔═╡ e2135298-e280-40ac-a549-6a15f13b84ee
md"""
## Example use: Single actor.
### Model and Optimizer Parameterization
"""
# ╔═╡ 5a2dab62-985c-4689-ab48-da04981d15a0
#Model shape
begin
const N_constellations = 1;
const N_debris = 1;
const N_states = N_constellations + N_debris;
end
# ╔═╡ 3f14b399-cae8-433a-8729-fc91c9b0bee9
# Launch function
launch_policy = Flux.Chain(
Flux.Parallel(vcat
#parallel joins together stocks and debris, along with intermediate interpretation
,Flux.Chain(Flux.Dense(N_constellations, N_states*2,Flux.relu)
,Flux.Dense(N_states*2, N_states*2,Flux.σ)
)
,Flux.Chain(Flux.Dense(N_debris, N_states,Flux.relu)
,Flux.Dense(N_states, N_states,Flux.σ)
)
)
#Apply some transformations
,Flux.Dense(N_states*3,128,Flux.σ)
,Flux.Dense(128,128,Flux.σ)
,Flux.Dense(128,N_constellations,Flux.relu)
)
# ╔═╡ 89dabcc8-7f4e-4df4-8445-eae9f50ebfea
#inspect parameters
Flux.params(launch_policy)
# ╔═╡ 15d5ce62-135d-493e-abad-3ec1f99bfd3b
#Test the function above
zero(Flux.params(launch_policy))
# ╔═╡ 2c9346eb-048d-4667-b22b-583b814ae1a3
# Launch function
value = Flux.Chain(
Flux.Parallel(vcat
#parallel joins together stocks and debris, along with intermediate interpretation
,Flux.Chain(Flux.Dense(N_constellations, N_states*2,Flux.relu)
,Flux.Dense(N_states*2, N_states*2,Flux.σ)
)
,Flux.Chain(Flux.Dense(N_debris, N_states,Flux.relu)
,Flux.Dense(N_states, N_states,Flux.σ)
)
)
#Apply some transformations
,Flux.Dense(N_states*3,128,Flux.σ)
,Flux.Dense(128,128,Flux.σ)
,Flux.Dense(128,1)
);
# ╔═╡ 95b9943b-8c13-4717-8239-2bae9f93ee63
#create the current actor critic optimizer
optim = ActorCritic(
0.5
,0.5
,2.0
,2.0
,2.0
,Flux.params(launch_policy)
,Flux.params(value)
,0.5
)
# ╔═╡ 43704f08-6f1a-47bb-87e3-483a529b1654
#Starting States
begin
stock_state = ones(N_constellations);
debris_state = ones(N_debris);
end;
# ╔═╡ 6f3649c1-2ade-437d-81cb-d9f46306d1e0
#check launch policy
launch_policy((stock_state,debris_state))
# ╔═╡ 7dfc61a9-569a-4e97-a292-4c2b15a78e3a
value((stock_state,debris_state))
# ╔═╡ 2f0ff236-0dff-4cd8-bbb4-f1e81ecef308
#=
Setup the physical model
These values are just guesstimates
=#
begin
#Getting loss parameters together.
loss_param = 2e-3;
loss_weights = loss_param*(ones(N_constellations,N_constellations) - LinearAlgebra.I);
#orbital decay rate
decay_param = 0.01;
#debris generation parameters
autocatalysis_param = 0.001;
satellite_loss_debris_rate = 5.0;
launch_debris_rate = 0.05;
#Todo, wrap physical model as a struct with the parameters
bm = BasicModel(
loss_param
,loss_weights
,decay_param
,autocatalysis_param
,satellite_loss_debris_rate
,launch_debris_rate
);
end
# ╔═╡ df771ced-e541-4bb2-b6a3-bb562bbaa89c
md"""
### Evaluation loop
"""
# ╔═╡ 6d9a71ea-b9d1-4efd-aa32-9053b96a66e1
#=Actor Critic Loop
This iterates on the actor critic loop,
=#
with_terminal() do
for iit in 1:12
w = Flux.params(value)
θ = Flux.params(launch_policy)
#Get learning parameters
action = launch_policy((stock_state,debris_state))
new_stock_state = G(stock_state,debris_state,action,bm)
new_debris_state = H(stock_state,debris_state,action,bm)
println(new_debris_state)
#get current value
current_value = value((stock_state,debris_state))
new_value = value((new_stock_state,new_debris_state))
#need to define
R = F(stock_state,debris_state,action)
#FIX: R is a vector, so are the values. This needs fixed.
δ = ((R .- optim.) .+ (new_value .- current_value))[1] #fix
#store values
optim. = optim. + δ*optim.αᴿ
#check for exit conditions.
#probably use grad=0
continue #issue in calculating gradients
#update the learning traces
optim. = optim.λʷ .* optim. .+ Flux.gradient(value,w)
optim.zᶿ = optim.λᶿ .* optim.zᶿ .+ Flux.gradient(policy,θ)
continue
#update the policies
w = w .+ δ* optim.αʷ .* optim.;
θ = θ .+ δ* optim.αᶿ .* optim.zᶿ;
println("its working, $iit")
end
end
# ╔═╡ 9941e984-e51b-4e64-8d16-13c8071363b0
#TODO:issue is here, taking gradient. I get to figure this out next.
Zygote.gradient(() -> sum(value(stock_state,debris_state),w))
# ╔═╡ dd9f5f04-31e4-441e-9f5f-3019a30692d6
with_terminal() do
x = rand(Float32, 10)
m = Chain(Dense(10, 5, relu), Dense(5, 2), softmax)
l(x) = Flux.Losses.crossentropy(m(x), [0.5, 0.5])
grads = gradient(params(m)) do
l(x)
end
for p in params(m)
println(grads[p])
end
end
# ╔═╡ 00000000-0000-0000-0000-000000000001
PLUTO_PROJECT_TOML_CONTENTS = """
[deps]
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
PlutoUI = "7f904dfe-b85e-4ff6-b463-dae2292396a8"
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"
[compat]
Flux = "~0.12.8"
PlutoUI = "~0.7.17"
Zygote = "~0.6.29"
"""
# ╔═╡ 00000000-0000-0000-0000-000000000002
PLUTO_MANIFEST_TOML_CONTENTS = """
# This file is machine-generated - editing it directly is not advised
[[AbstractFFTs]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "485ee0867925449198280d4af84bdb46a2a404d0"
uuid = "621f4979-c628-5d54-868e-fcf4e3e8185c"
version = "1.0.1"
[[AbstractTrees]]
git-tree-sha1 = "03e0550477d86222521d254b741d470ba17ea0b5"
uuid = "1520ce14-60c1-5f80-bbc7-55ef81b5835c"
version = "0.3.4"
[[Adapt]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "84918055d15b3114ede17ac6a7182f68870c16f7"
uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
version = "3.3.1"
[[ArgTools]]
uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f"
[[ArrayInterface]]
deps = ["Compat", "IfElse", "LinearAlgebra", "Requires", "SparseArrays", "Static"]
git-tree-sha1 = "d9352737cef8525944bf9ef34392d756321cbd54"
uuid = "4fba245c-0d91-5ea0-9b3e-6abc04ee57a9"
version = "3.1.38"
[[Artifacts]]
uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
[[BFloat16s]]
deps = ["LinearAlgebra", "Printf", "Random", "Test"]
git-tree-sha1 = "a598ecb0d717092b5539dbbe890c98bac842b072"
uuid = "ab4f0b2a-ad5b-11e8-123f-65d77653426b"
version = "0.2.0"
[[Base64]]
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
[[CEnum]]
git-tree-sha1 = "215a9aa4a1f23fbd05b92769fdd62559488d70e9"
uuid = "fa961155-64e5-5f13-b03f-caf6b980ea82"
version = "0.4.1"
[[CUDA]]
deps = ["AbstractFFTs", "Adapt", "BFloat16s", "CEnum", "CompilerSupportLibraries_jll", "ExprTools", "GPUArrays", "GPUCompiler", "LLVM", "LazyArtifacts", "Libdl", "LinearAlgebra", "Logging", "Printf", "Random", "Random123", "RandomNumbers", "Reexport", "Requires", "SparseArrays", "SpecialFunctions", "TimerOutputs"]
git-tree-sha1 = "2c8329f16addffd09e6ca84c556e2185a4933c64"
uuid = "052768ef-5323-5732-b1bb-66c8b64840ba"
version = "3.5.0"
[[ChainRules]]
deps = ["ChainRulesCore", "Compat", "LinearAlgebra", "Random", "RealDot", "Statistics"]
git-tree-sha1 = "035ef8a5382a614b2d8e3091b6fdbb1c2b050e11"
uuid = "082447d4-558c-5d27-93f4-14fc19e9eca2"
version = "1.12.1"
[[ChainRulesCore]]
deps = ["Compat", "LinearAlgebra", "SparseArrays"]
git-tree-sha1 = "3533f5a691e60601fe60c90d8bc47a27aa2907ec"
uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
version = "1.11.0"
[[CodecZlib]]
deps = ["TranscodingStreams", "Zlib_jll"]
git-tree-sha1 = "ded953804d019afa9a3f98981d99b33e3db7b6da"
uuid = "944b1d66-785c-5afd-91f1-9de20f533193"
version = "0.7.0"
[[ColorTypes]]
deps = ["FixedPointNumbers", "Random"]
git-tree-sha1 = "024fe24d83e4a5bf5fc80501a314ce0d1aa35597"
uuid = "3da002f7-5984-5a60-b8a6-cbb66c0b333f"
version = "0.11.0"
[[Colors]]
deps = ["ColorTypes", "FixedPointNumbers", "Reexport"]
git-tree-sha1 = "417b0ed7b8b838aa6ca0a87aadf1bb9eb111ce40"
uuid = "5ae59095-9a9b-59fe-a467-6f913c188581"
version = "0.12.8"
[[CommonSubexpressions]]
deps = ["MacroTools", "Test"]
git-tree-sha1 = "7b8a93dba8af7e3b42fecabf646260105ac373f7"
uuid = "bbf7d656-a473-5ed7-a52c-81e309532950"
version = "0.3.0"
[[Compat]]
deps = ["Base64", "Dates", "DelimitedFiles", "Distributed", "InteractiveUtils", "LibGit2", "Libdl", "LinearAlgebra", "Markdown", "Mmap", "Pkg", "Printf", "REPL", "Random", "SHA", "Serialization", "SharedArrays", "Sockets", "SparseArrays", "Statistics", "Test", "UUIDs", "Unicode"]
git-tree-sha1 = "dce3e3fea680869eaa0b774b2e8343e9ff442313"
uuid = "34da2185-b29b-5c13-b0c7-acf172513d20"
version = "3.40.0"
[[CompilerSupportLibraries_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae"
[[DataAPI]]
git-tree-sha1 = "cc70b17275652eb47bc9e5f81635981f13cea5c8"
uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a"
version = "1.9.0"
[[DataStructures]]
deps = ["Compat", "InteractiveUtils", "OrderedCollections"]
git-tree-sha1 = "7d9d316f04214f7efdbb6398d545446e246eff02"
uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
version = "0.18.10"
[[Dates]]
deps = ["Printf"]
uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"
[[DelimitedFiles]]
deps = ["Mmap"]
uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab"
[[DiffResults]]
deps = ["StaticArrays"]
git-tree-sha1 = "c18e98cba888c6c25d1c3b048e4b3380ca956805"
uuid = "163ba53b-c6d8-5494-b064-1a9d43ac40c5"
version = "1.0.3"
[[DiffRules]]
deps = ["NaNMath", "Random", "SpecialFunctions"]
git-tree-sha1 = "7220bc21c33e990c14f4a9a319b1d242ebc5b269"
uuid = "b552c78f-8df3-52c6-915a-8e097449b14b"
version = "1.3.1"
[[Distributed]]
deps = ["Random", "Serialization", "Sockets"]
uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b"
[[DocStringExtensions]]
deps = ["LibGit2"]
git-tree-sha1 = "b19534d1895d702889b219c382a6e18010797f0b"
uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae"
version = "0.8.6"
[[Downloads]]
deps = ["ArgTools", "LibCURL", "NetworkOptions"]
uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6"
[[ExprTools]]
git-tree-sha1 = "b7e3d17636b348f005f11040025ae8c6f645fe92"
uuid = "e2ba6199-217a-4e67-a87a-7c52f15ade04"
version = "0.1.6"
[[FillArrays]]
deps = ["LinearAlgebra", "Random", "SparseArrays", "Statistics"]
git-tree-sha1 = "8756f9935b7ccc9064c6eef0bff0ad643df733a3"
uuid = "1a297f60-69ca-5386-bcde-b61e274b549b"
version = "0.12.7"
[[FixedPointNumbers]]
deps = ["Statistics"]
git-tree-sha1 = "335bfdceacc84c5cdf16aadc768aa5ddfc5383cc"
uuid = "53c48c17-4a7d-5ca2-90c5-79b7896eea93"
version = "0.8.4"
[[Flux]]
deps = ["AbstractTrees", "Adapt", "ArrayInterface", "CUDA", "CodecZlib", "Colors", "DelimitedFiles", "Functors", "Juno", "LinearAlgebra", "MacroTools", "NNlib", "NNlibCUDA", "Pkg", "Printf", "Random", "Reexport", "SHA", "SparseArrays", "Statistics", "StatsBase", "Test", "ZipFile", "Zygote"]
git-tree-sha1 = "e8b37bb43c01eed0418821d1f9d20eca5ba6ab21"
uuid = "587475ba-b771-5e3f-ad9e-33799f191a9c"
version = "0.12.8"
[[ForwardDiff]]
deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "LinearAlgebra", "NaNMath", "Preferences", "Printf", "Random", "SpecialFunctions", "StaticArrays"]
git-tree-sha1 = "63777916efbcb0ab6173d09a658fb7f2783de485"
uuid = "f6369f11-7733-5829-9624-2563aa707210"
version = "0.10.21"
[[Functors]]
git-tree-sha1 = "e4768c3b7f597d5a352afa09874d16e3c3f6ead2"
uuid = "d9f16b24-f501-4c13-a1f2-28368ffc5196"
version = "0.2.7"
[[GPUArrays]]
deps = ["Adapt", "LinearAlgebra", "Printf", "Random", "Serialization", "Statistics"]
git-tree-sha1 = "7772508f17f1d482fe0df72cabc5b55bec06bbe0"
uuid = "0c68f7d7-f131-5f86-a1c3-88cf8149b2d7"
version = "8.1.2"
[[GPUCompiler]]
deps = ["ExprTools", "InteractiveUtils", "LLVM", "Libdl", "Logging", "TimerOutputs", "UUIDs"]
git-tree-sha1 = "77d915a0af27d474f0aaf12fcd46c400a552e84c"
uuid = "61eb1bfa-7361-4325-ad38-22787b887f55"
version = "0.13.7"
[[Hyperscript]]
deps = ["Test"]
git-tree-sha1 = "8d511d5b81240fc8e6802386302675bdf47737b9"
uuid = "47d2ed2b-36de-50cf-bf87-49c2cf4b8b91"
version = "0.0.4"
[[HypertextLiteral]]
git-tree-sha1 = "5efcf53d798efede8fee5b2c8b09284be359bf24"
uuid = "ac1192a8-f4b3-4bfe-ba22-af5b92cd3ab2"
version = "0.9.2"
[[IOCapture]]
deps = ["Logging", "Random"]
git-tree-sha1 = "f7be53659ab06ddc986428d3a9dcc95f6fa6705a"
uuid = "b5f81e59-6552-4d32-b1f0-c071b021bf89"
version = "0.2.2"
[[IRTools]]
deps = ["InteractiveUtils", "MacroTools", "Test"]
git-tree-sha1 = "95215cd0076a150ef46ff7928892bc341864c73c"
uuid = "7869d1d1-7146-5819-86e3-90919afe41df"
version = "0.4.3"
[[IfElse]]
git-tree-sha1 = "debdd00ffef04665ccbb3e150747a77560e8fad1"
uuid = "615f187c-cbe4-4ef1-ba3b-2fcf58d6d173"
version = "0.1.1"
[[InteractiveUtils]]
deps = ["Markdown"]
uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
[[InverseFunctions]]
deps = ["Test"]
git-tree-sha1 = "f0c6489b12d28fb4c2103073ec7452f3423bd308"
uuid = "3587e190-3f89-42d0-90ee-14403ec27112"
version = "0.1.1"
[[IrrationalConstants]]
git-tree-sha1 = "7fd44fd4ff43fc60815f8e764c0f352b83c49151"
uuid = "92d709cd-6900-40b7-9082-c6be49f344b6"
version = "0.1.1"
[[JLLWrappers]]
deps = ["Preferences"]
git-tree-sha1 = "642a199af8b68253517b80bd3bfd17eb4e84df6e"
uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210"
version = "1.3.0"
[[JSON]]
deps = ["Dates", "Mmap", "Parsers", "Unicode"]
git-tree-sha1 = "8076680b162ada2a031f707ac7b4953e30667a37"
uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6"
version = "0.21.2"
[[Juno]]
deps = ["Base64", "Logging", "Media", "Profile"]
git-tree-sha1 = "07cb43290a840908a771552911a6274bc6c072c7"
uuid = "e5e0dc1b-0480-54bc-9374-aad01c23163d"
version = "0.8.4"
[[LLVM]]
deps = ["CEnum", "LLVMExtra_jll", "Libdl", "Printf", "Unicode"]
git-tree-sha1 = "46092047ca4edc10720ecab437c42283cd7c44f3"
uuid = "929cbde3-209d-540e-8aea-75f648917ca0"
version = "4.6.0"
[[LLVMExtra_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "6a2af408fe809c4f1a54d2b3f188fdd3698549d6"
uuid = "dad2f222-ce93-54a1-a47d-0025e8a3acab"
version = "0.0.11+0"
[[LazyArtifacts]]
deps = ["Artifacts", "Pkg"]
uuid = "4af54fe1-eca0-43a8-85a7-787d91b784e3"
[[LibCURL]]
deps = ["LibCURL_jll", "MozillaCACerts_jll"]
uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21"
[[LibCURL_jll]]
deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"]
uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0"
[[LibGit2]]
deps = ["Base64", "NetworkOptions", "Printf", "SHA"]
uuid = "76f85450-5226-5b5a-8eaa-529ad045b433"
[[LibSSH2_jll]]
deps = ["Artifacts", "Libdl", "MbedTLS_jll"]
uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8"
[[Libdl]]
uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
[[LinearAlgebra]]
deps = ["Libdl"]
uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
[[LogExpFunctions]]
deps = ["ChainRulesCore", "DocStringExtensions", "InverseFunctions", "IrrationalConstants", "LinearAlgebra"]
git-tree-sha1 = "6193c3815f13ba1b78a51ce391db8be016ae9214"
uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688"
version = "0.3.4"
[[Logging]]
uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"
[[MacroTools]]
deps = ["Markdown", "Random"]
git-tree-sha1 = "3d3e902b31198a27340d0bf00d6ac452866021cf"
uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09"
version = "0.5.9"
[[Markdown]]
deps = ["Base64"]
uuid = "d6f4376e-aef5-505a-96c1-9c027394607a"
[[MbedTLS_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1"
[[Media]]
deps = ["MacroTools", "Test"]
git-tree-sha1 = "75a54abd10709c01f1b86b84ec225d26e840ed58"
uuid = "e89f7d12-3494-54d1-8411-f7d8b9ae1f27"
version = "0.5.0"
[[Missings]]
deps = ["DataAPI"]
git-tree-sha1 = "bf210ce90b6c9eed32d25dbcae1ebc565df2687f"
uuid = "e1d29d7a-bbdc-5cf2-9ac0-f12de2c33e28"
version = "1.0.2"
[[Mmap]]
uuid = "a63ad114-7e13-5084-954f-fe012c677804"
[[MozillaCACerts_jll]]
uuid = "14a3606d-f60d-562e-9121-12d972cd8159"
[[NNlib]]
deps = ["Adapt", "ChainRulesCore", "Compat", "LinearAlgebra", "Pkg", "Requires", "Statistics"]
git-tree-sha1 = "5203a4532ad28c44f82c76634ad621d7c90abcbd"
uuid = "872c559c-99b0-510c-b3b7-b6c96a88d5cd"
version = "0.7.29"
[[NNlibCUDA]]
deps = ["CUDA", "LinearAlgebra", "NNlib", "Random", "Statistics"]
git-tree-sha1 = "04490d5e7570c038b1cb0f5c3627597181cc15a9"
uuid = "a00861dc-f156-4864-bf3c-e6376f28a68d"
version = "0.1.9"
[[NaNMath]]
git-tree-sha1 = "bfe47e760d60b82b66b61d2d44128b62e3a369fb"
uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3"
version = "0.3.5"
[[NetworkOptions]]
uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908"
[[OpenLibm_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "05823500-19ac-5b8b-9628-191a04bc5112"
[[OpenSpecFun_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "13652491f6856acfd2db29360e1bbcd4565d04f1"
uuid = "efe28fd5-8261-553b-a9e1-b2916fc3738e"
version = "0.5.5+0"
[[OrderedCollections]]
git-tree-sha1 = "85f8e6578bf1f9ee0d11e7bb1b1456435479d47c"
uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d"
version = "1.4.1"
[[Parsers]]
deps = ["Dates"]
git-tree-sha1 = "d911b6a12ba974dabe2291c6d450094a7226b372"
uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0"
version = "2.1.1"
[[Pkg]]
deps = ["Artifacts", "Dates", "Downloads", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"]
uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
[[PlutoUI]]
deps = ["Base64", "Dates", "Hyperscript", "HypertextLiteral", "IOCapture", "InteractiveUtils", "JSON", "Logging", "Markdown", "Random", "Reexport", "UUIDs"]
git-tree-sha1 = "615f3a1eff94add4bca9476ded096de60b46443b"
uuid = "7f904dfe-b85e-4ff6-b463-dae2292396a8"
version = "0.7.17"
[[Preferences]]
deps = ["TOML"]
git-tree-sha1 = "00cfd92944ca9c760982747e9a1d0d5d86ab1e5a"
uuid = "21216c6a-2e73-6563-6e65-726566657250"
version = "1.2.2"
[[Printf]]
deps = ["Unicode"]
uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
[[Profile]]
deps = ["Printf"]
uuid = "9abbd945-dff8-562f-b5e8-e1ebf5ef1b79"
[[REPL]]
deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"]
uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"
[[Random]]
deps = ["Serialization"]
uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
[[Random123]]
deps = ["Libdl", "Random", "RandomNumbers"]
git-tree-sha1 = "0e8b146557ad1c6deb1367655e052276690e71a3"
uuid = "74087812-796a-5b5d-8853-05524746bad3"
version = "1.4.2"
[[RandomNumbers]]
deps = ["Random", "Requires"]
git-tree-sha1 = "043da614cc7e95c703498a491e2c21f58a2b8111"
uuid = "e6cf234a-135c-5ec9-84dd-332b85af5143"
version = "1.5.3"
[[RealDot]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "9f0a1b71baaf7650f4fa8a1d168c7fb6ee41f0c9"
uuid = "c1ae055f-0cd5-4b69-90a6-9a35b1a98df9"
version = "0.1.0"
[[Reexport]]
git-tree-sha1 = "45e428421666073eab6f2da5c9d310d99bb12f9b"
uuid = "189a3867-3050-52da-a836-e630ba90ab69"
version = "1.2.2"
[[Requires]]
deps = ["UUIDs"]
git-tree-sha1 = "4036a3bd08ac7e968e27c203d45f5fff15020621"
uuid = "ae029012-a4dd-5104-9daa-d747884805df"
version = "1.1.3"
[[SHA]]
uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"
[[Serialization]]
uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
[[SharedArrays]]
deps = ["Distributed", "Mmap", "Random", "Serialization"]
uuid = "1a1011a3-84de-559e-8e89-a11a2f7dc383"
[[Sockets]]
uuid = "6462fe0b-24de-5631-8697-dd941f90decc"
[[SortingAlgorithms]]
deps = ["DataStructures"]
git-tree-sha1 = "b3363d7460f7d098ca0912c69b082f75625d7508"
uuid = "a2af1166-a08f-5f64-846c-94a0d3cef48c"
version = "1.0.1"
[[SparseArrays]]
deps = ["LinearAlgebra", "Random"]
uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
[[SpecialFunctions]]
deps = ["ChainRulesCore", "IrrationalConstants", "LogExpFunctions", "OpenLibm_jll", "OpenSpecFun_jll"]
git-tree-sha1 = "f0bccf98e16759818ffc5d97ac3ebf87eb950150"
uuid = "276daf66-3868-5448-9aa4-cd146d93841b"
version = "1.8.1"
[[Static]]
deps = ["IfElse"]
git-tree-sha1 = "e7bc80dc93f50857a5d1e3c8121495852f407e6a"
uuid = "aedffcd0-7271-4cad-89d0-dc628f76c6d3"
version = "0.4.0"
[[StaticArrays]]
deps = ["LinearAlgebra", "Random", "Statistics"]
git-tree-sha1 = "3c76dde64d03699e074ac02eb2e8ba8254d428da"
uuid = "90137ffa-7385-5640-81b9-e52037218182"
version = "1.2.13"
[[Statistics]]
deps = ["LinearAlgebra", "SparseArrays"]
uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
[[StatsAPI]]
git-tree-sha1 = "1958272568dc176a1d881acb797beb909c785510"
uuid = "82ae8749-77ed-4fe6-ae5f-f523153014b0"
version = "1.0.0"
[[StatsBase]]
deps = ["DataAPI", "DataStructures", "LinearAlgebra", "LogExpFunctions", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics", "StatsAPI"]
git-tree-sha1 = "eb35dcc66558b2dda84079b9a1be17557d32091a"
uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
version = "0.33.12"
[[TOML]]
deps = ["Dates"]
uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76"
[[Tar]]
deps = ["ArgTools", "SHA"]
uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e"
[[Test]]
deps = ["InteractiveUtils", "Logging", "Random", "Serialization"]
uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
[[TimerOutputs]]
deps = ["ExprTools", "Printf"]
git-tree-sha1 = "7cb456f358e8f9d102a8b25e8dfedf58fa5689bc"
uuid = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f"
version = "0.5.13"
[[TranscodingStreams]]
deps = ["Random", "Test"]
git-tree-sha1 = "216b95ea110b5972db65aa90f88d8d89dcb8851c"
uuid = "3bb67fe8-82b1-5028-8e26-92a6c54297fa"
version = "0.9.6"
[[UUIDs]]
deps = ["Random", "SHA"]
uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
[[Unicode]]
uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
[[ZipFile]]
deps = ["Libdl", "Printf", "Zlib_jll"]
git-tree-sha1 = "3593e69e469d2111389a9bd06bac1f3d730ac6de"
uuid = "a5390f91-8eb1-5f08-bee0-b1d1ffed6cea"
version = "0.9.4"
[[Zlib_jll]]
deps = ["Libdl"]
uuid = "83775a58-1f1d-513f-b197-d71354ab007a"
[[Zygote]]
deps = ["AbstractFFTs", "ChainRules", "ChainRulesCore", "DiffRules", "Distributed", "FillArrays", "ForwardDiff", "IRTools", "InteractiveUtils", "LinearAlgebra", "MacroTools", "NaNMath", "Random", "Requires", "SpecialFunctions", "Statistics", "ZygoteRules"]
git-tree-sha1 = "0fc9959bcabc4668c403810b4e851f6b8962eac9"
uuid = "e88e6eb3-aa80-5325-afca-941959d7151f"
version = "0.6.29"
[[ZygoteRules]]
deps = ["MacroTools"]
git-tree-sha1 = "8c1a8e4dfacb1fd631745552c8db35d0deb09ea0"
uuid = "700de1a5-db45-46bc-99cf-38207098b444"
version = "0.2.2"
[[nghttp2_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d"
[[p7zip_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0"
"""
# ╔═╡ Cell order:
# ╟─7d6afccb-c45e-4371-827a-43d588d4945f
# ╠═cc16838c-3b25-11ec-2489-11c3d35f26f4
# ╟─a810a371-d2e4-4d4f-9cf6-1b98582c4b0f
# ╠═2e59f530-aeb0-4ce1-9312-e81a66520181
# ╠═c57e9967-75e8-47d2-836e-f28159dc6592
# ╟─4153bfd6-2ec6-4025-93ba-8e6e061809e8
# ╟─bbc7e4b0-5edb-4e5d-9512-1aaeae1bb4ec
# ╠═533250c6-fd76-47ef-a561-d567da04ae61
# ╠═3f14b399-cae8-433a-8729-fc91c9b0bee9
# ╠═2c9346eb-048d-4667-b22b-583b814ae1a3
# ╠═89dabcc8-7f4e-4df4-8445-eae9f50ebfea
# ╠═6f3649c1-2ade-437d-81cb-d9f46306d1e0
# ╠═7dfc61a9-569a-4e97-a292-4c2b15a78e3a
# ╟─a305c33f-3388-4120-b036-3093c3ed6aa3
# ╠═458bb826-4eaf-42ca-b889-4d1c50a2ecae
# ╠═0d375c34-3226-40b6-adaf-2f531c00d3ac
# ╠═15d5ce62-135d-493e-abad-3ec1f99bfd3b
# ╠═e79e4ecf-caec-4c1a-9626-0c1a8b5006ce
# ╟─0ac4ac61-0ca2-4ed6-8855-85c9d1939afa
# ╠═7102c12c-581b-4204-9986-741889cb03f6
# ╠═283bdd91-d742-4f4b-a19b-2756ac01ce2c
# ╠═8a3f9518-ae0a-46f3-b37d-1017ce99c70d
# ╠═e4c8abfa-e96c-44b7-9858-af46adc1164a
# ╠═42be8352-7ecf-4da4-bff0-61e328144ed1
# ╟─e2135298-e280-40ac-a549-6a15f13b84ee
# ╠═95b9943b-8c13-4717-8239-2bae9f93ee63
# ╠═5a2dab62-985c-4689-ab48-da04981d15a0
# ╠═43704f08-6f1a-47bb-87e3-483a529b1654
# ╠═2f0ff236-0dff-4cd8-bbb4-f1e81ecef308
# ╟─df771ced-e541-4bb2-b6a3-bb562bbaa89c
# ╠═6d9a71ea-b9d1-4efd-aa32-9053b96a66e1
# ╠═9941e984-e51b-4e64-8d16-13c8071363b0
# ╠═dd9f5f04-31e4-441e-9f5f-3019a30692d6
# ╟─00000000-0000-0000-0000-000000000001
# ╟─00000000-0000-0000-0000-000000000002

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -0,0 +1,262 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "41dcca64-963f-488e-b92e-f1dc5109359a",
"metadata": {},
"outputs": [],
"source": [
"using Enzyme"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "ca966ab8-469e-4f8c-af54-579c55f54bd4",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"()"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"\n",
"function mymul!(R, A, B)\n",
" @assert axes(A,2) == axes(B,1)\n",
" @inbounds @simd for i in eachindex(R)\n",
" R[i] = 0\n",
" end\n",
" @inbounds for j in axes(B, 2), i in axes(A, 1)\n",
" @inbounds @simd for k in axes(A,2)\n",
" R[i,j] += A[i,k] * B[k,j]\n",
" end\n",
" end\n",
" nothing\n",
"end\n",
"\n",
"\n",
"A = rand(5, 3)\n",
"B = rand(3, 7)\n",
"\n",
"R = zeros(size(A,1), size(B,2))\n",
"∂z_∂R = rand(size(R)...) # Some gradient/tangent passed to us\n",
"\n",
"∂z_∂A = zero(A)\n",
"∂z_∂B = zero(B)\n",
"\n",
"Enzyme.autodiff(mymul!, Const, Duplicated(R, ∂z_∂R), Duplicated(A, ∂z_∂A), Duplicated(B, ∂z_∂B))"
]
},
{
"cell_type": "code",
"execution_count": 3,
"id": "7442bfb5-3146-493d-9abc-9afeb56c0471",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"true"
]
},
"execution_count": 3,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"R ≈ A * B &&\n",
"∂z_∂A ≈ ∂z_∂R * B' && # equivalent to Zygote.pullback(*, A, B)[2](∂z_∂R)[1]\n",
"∂z_∂B ≈ A' * ∂z_∂R # equivalent to Zygote.pullback(*, A, B)[2](∂z_∂R)[2]"
]
},
{
"cell_type": "code",
"execution_count": 4,
"id": "36a4cd0f-c5e2-4a6f-b434-2d347686a08b",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"3×7 Matrix{Float64}:\n",
" 0.0 0.0 0.0 0.0 0.0 0.0 0.0\n",
" 0.0 0.0 0.0 0.0 0.0 0.0 0.0\n",
" 0.0 0.0 0.0 0.0 0.0 0.0 0.0"
]
},
"execution_count": 4,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"#reset\n",
"R = zeros(size(A,1), size(B,2))\n",
"∂z_∂R = rand(size(R)...) # Some gradient/tangent passed to us\n",
"\n",
"∂z_∂A = zero(A)\n",
"∂z_∂B = zero(B)"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "49cbf4e1-1ef0-4428-90af-02ac2b46c2a8",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"revenue! (generic function with 1 method)"
]
},
"execution_count": 5,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"function revenue!(R, A, B)\n",
" @assert axes(A,2) == axes(B,1)\n",
" @inbounds @simd for i in eachindex(R)\n",
" R[i] = 0\n",
" end\n",
" @inbounds for j in axes(B, 2), i in axes(A, 1)\n",
" @inbounds @simd for k in axes(A,2)\n",
" R[i,j] += A[i,k] * B[k,j]\n",
" end\n",
" end\n",
" nothing\n",
"end\n"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "5fea97a5-39ac-4ade-9cb4-4aba66a80825",
"metadata": {},
"outputs": [],
"source": [
"batch_size = 5;\n",
"constellations = 2;\n",
"payoff_mat = zeros(batch_size,1);\n",
"\n",
"stocks = rand(batch_size, constellations);\n",
"\n",
"payoffs = rand(constellations,1);"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "d06ec650-621c-4a97-8bbd-0bfcfd4ab8d5",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"5×1 Matrix{Float64}:\n",
" 0.05943992677268309\n",
" 0.16746133343364858\n",
" 0.22311130107900645\n",
" 0.1326381498910713\n",
" 0.23997313509634804"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"revenue!(payoff_mat, stocks,payoffs)\n",
"\n",
"payoff_mat"
]
},
{
"cell_type": "code",
"execution_count": 12,
"id": "86425c6b-baa3-494d-8c2c-35eaf08cefaf",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"5×1 Matrix{Float64}:\n",
" 1.0\n",
" 1.0\n",
" 1.0\n",
" 1.0\n",
" 1.0"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"payoff_mat = zero(payoff_mat)\n",
"∂payoff_mat = ones(size(payoff_mat)...)\n",
"\n",
"∂stocks = zero(stocks)\n",
"∂payoff_mat"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "fbb5fa3f-b3c6-48bf-9df2-657d5d7aae18",
"metadata": {},
"outputs": [],
"source": [
"autodiff(revenue!\n",
" ,Duplicated(payoff_mat, ∂payoff_mat)\n",
" ,Const(payoffs)\n",
" ,Duplicated(stocks,∂stocks)\n",
")"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "0872e372-f3d1-4fca-b89f-f134a3dc563d",
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "code",
"execution_count": null,
"id": "984b1bd7-e53a-41c3-bafd-f56aacdae4b7",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Julia 1.6.2",
"language": "julia",
"name": "julia-1.6"
},
"language_info": {
"file_extension": ".jl",
"mimetype": "application/julia",
"name": "julia",
"version": "1.6.2"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -0,0 +1,788 @@
### A Pluto.jl notebook ###
# v0.17.1
using Markdown
using InteractiveUtils
# ╔═╡ be0ff5da-4326-11ec-0541-ff3acda2c9fd
using Flux, PlutoUI
# ╔═╡ 96456db4-efef-4d1b-b563-0699dcecbd3d
md"""
Goal:
- Flux parallel/chain that takes an input and combines the result
- double check an iterative traing across branches
- Create a flux parallel /chain combo that accepts a single input tuple and splits it across multiple branches
- Create a generator function that does this for a specifed number of branches
"""
# ╔═╡ f659eb0c-6588-4469-a478-46255222677e
#flux parallel
a = Flux.Parallel(vcat
,Dense(1,2,Flux.σ)
,Dense(1,2)
)
# ╔═╡ 4c890a60-0078-41f8-ab32-24602d7f0189
a([1])
# ╔═╡ b5a52f81-870e-4190-83b5-ef84a2ad30f1
a(([1],[2]))
# ╔═╡ fe755a26-d561-4d23-b94e-b2e3bcc62227
a(([1],[1]))
# ╔═╡ 9c4ee5ec-8248-415b-b444-2ad40ebebe74
a([1],[1])
# ╔═╡ 2d53cf21-ee2b-48f2-9a89-69eba76598e0
md"""
Compare the various results, note how the first and 3rd match, but the second doesn't
"""
# ╔═╡ 20293c9a-11ca-4495-833a-faddec4a7b67
b = Flux.Parallel((x,y) -> (x,y)
,Dense(1,2,Flux.σ)
,Dense(1,2)
)
# ╔═╡ d69b78f2-2431-4648-9b08-8d82774fbfbb
b([1])
# ╔═╡ db584226-d3f5-42f3-b9dc-7ddc8624bfda
b(([1],[2]))
# ╔═╡ 41c653bb-b6e0-43d2-a411-0ebd04b6c3e2
b(([1],[1]))
# ╔═╡ 7fb1fd3c-3ccd-45db-8757-bfbde6090522
b([1],[2])
# ╔═╡ c1420fc5-de42-4b32-9e13-e859e111996b
md"""
Again, very similar to previous work, but without the vcat
"""
# ╔═╡ 46c9f81c-3a8c-463a-952e-32287d11f1db
c =
Flux.Parallel(vcat
#add chain here
,Flux.Chain(
Flux.Parallel(vcat
,Dense(1,2,Flux.σ)
,Dense(1,2)
)
,Dense(4,1)
)
,Flux.Chain(
Flux.Parallel(vcat
,Dense(1,2,Flux.σ)
,Dense(1,2)
)
,Dense(4,1)
)
,Flux.Chain(
Flux.Parallel(vcat
,Dense(1,2,Flux.σ)
,Dense(1,2)
)
,Dense(4,1)
)
)
# ╔═╡ 8c651a48-a919-4e13-95ed-e0bd929e8b64
c(([1],[2]),([3],[4]),([1],[2]))
# ╔═╡ 10938bcf-dbc5-4109-b8a2-ee21c482f610
c([1],[2],[3],[4],[1],[2]) #ignores the last 3
# ╔═╡ e318c3f0-3828-41fc-9fd0-de2ae3d19e2f
c([1],[2],[3])
# ╔═╡ 0da65b18-e511-4203-a5af-cdb0096acbbf
c(([1],[2])) #don't run the 3rd branch
# ╔═╡ ec1bd1c4-fc35-482b-b186-7f95cb332463
md"""
In the first case each sub-parallel is getting passed its own tuple.
In the second case, each sub-branch gets its own details.
The big test will be whether training on each parallel branch will affect anything other than that entry
"""
# ╔═╡ 0d85a68f-3a5d-4aab-ab0a-b9830e50c383
begin #move to module
#= TupleDuplicator
This is used to create a tuple of size n with deepcopies of any object x
=#
struct TupleDuplicator
n::Int
end
(f::TupleDuplicator)(x) = tuple([deepcopy(x) for i=1:f.n]...)
#=
This generates a policy function full of branches with the properly scaled sides
=#
struct BranchGenerator
n::UInt8 #limit to 2^8 operators
end
function (b::BranchGenerator)(branch::Flux.Chain,join_fn::Function)
# used to deepcopy the branches and duplicate the inputs in the returned chain
f = TupleDuplicator(b.n)
return Flux.Chain(
f
,Flux.Parallel(join_fn
,f(branch)
)
)
end
end
# ╔═╡ 2f6b7042-5cf5-4312-a617-dbeb08e05175
bg3 = BranchGenerator(3)
# ╔═╡ b9fe5e74-a524-4c74-8c88-b26204ffa57b
begin
#Setup branch to duplicate
d = Flux.Chain(
Flux.Parallel(vcat
,Dense(1,2,Flux.σ)
,Dense(1,2)
)
,Dense(4,1)
)
#build branch
e = bg3(d, vcat)
end
# ╔═╡ 5a03a348-93bf-4298-b483-57f1256a09fb
e(([2],[1]))
# ╔═╡ 28ff091c-fa33-485a-b425-2197a7915419
loss(x) = sum(abs2,e(x)) #force to zero
# ╔═╡ 3f0c5427-6a45-44cb-a83e-8f3829c5f3cf
loss(([2],[1]))
# ╔═╡ 03e2712c-0edb-4fd0-ac1f-cd34be4bcc03
opt = Flux.Optimise.ADAGrad()
# ╔═╡ 64644280-577a-4d31-9863-6c9914bba94c
params = Flux.params(e[2][1])
# ╔═╡ 19a3a52f-3ecf-46ff-ac4d-5d44ef3a7821
Flux.train!(loss, params, (([2],[1])), opt)
# ╔═╡ ad690dd7-6fad-4194-9cf2-ab33b1b23a11
e(([2],[1]))
# ╔═╡ 00000000-0000-0000-0000-000000000001
PLUTO_PROJECT_TOML_CONTENTS = """
[deps]
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
PlutoUI = "7f904dfe-b85e-4ff6-b463-dae2292396a8"
[compat]
Flux = "~0.12.8"
PlutoUI = "~0.7.19"
"""
# ╔═╡ 00000000-0000-0000-0000-000000000002
PLUTO_MANIFEST_TOML_CONTENTS = """
# This file is machine-generated - editing it directly is not advised
[[AbstractFFTs]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "485ee0867925449198280d4af84bdb46a2a404d0"
uuid = "621f4979-c628-5d54-868e-fcf4e3e8185c"
version = "1.0.1"
[[AbstractPlutoDingetjes]]
deps = ["Pkg"]
git-tree-sha1 = "0bc60e3006ad95b4bb7497698dd7c6d649b9bc06"
uuid = "6e696c72-6542-2067-7265-42206c756150"
version = "1.1.1"
[[AbstractTrees]]
git-tree-sha1 = "03e0550477d86222521d254b741d470ba17ea0b5"
uuid = "1520ce14-60c1-5f80-bbc7-55ef81b5835c"
version = "0.3.4"
[[Adapt]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "84918055d15b3114ede17ac6a7182f68870c16f7"
uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
version = "3.3.1"
[[ArgTools]]
uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f"
[[ArrayInterface]]
deps = ["Compat", "IfElse", "LinearAlgebra", "Requires", "SparseArrays", "Static"]
git-tree-sha1 = "e527b258413e0c6d4f66ade574744c94edef81f8"
uuid = "4fba245c-0d91-5ea0-9b3e-6abc04ee57a9"
version = "3.1.40"
[[Artifacts]]
uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
[[BFloat16s]]
deps = ["LinearAlgebra", "Printf", "Random", "Test"]
git-tree-sha1 = "a598ecb0d717092b5539dbbe890c98bac842b072"
uuid = "ab4f0b2a-ad5b-11e8-123f-65d77653426b"
version = "0.2.0"
[[Base64]]
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
[[CEnum]]
git-tree-sha1 = "215a9aa4a1f23fbd05b92769fdd62559488d70e9"
uuid = "fa961155-64e5-5f13-b03f-caf6b980ea82"
version = "0.4.1"
[[CUDA]]
deps = ["AbstractFFTs", "Adapt", "BFloat16s", "CEnum", "CompilerSupportLibraries_jll", "ExprTools", "GPUArrays", "GPUCompiler", "LLVM", "LazyArtifacts", "Libdl", "LinearAlgebra", "Logging", "Printf", "Random", "Random123", "RandomNumbers", "Reexport", "Requires", "SparseArrays", "SpecialFunctions", "TimerOutputs"]
git-tree-sha1 = "2c8329f16addffd09e6ca84c556e2185a4933c64"
uuid = "052768ef-5323-5732-b1bb-66c8b64840ba"
version = "3.5.0"
[[ChainRules]]
deps = ["ChainRulesCore", "Compat", "LinearAlgebra", "Random", "RealDot", "Statistics"]
git-tree-sha1 = "035ef8a5382a614b2d8e3091b6fdbb1c2b050e11"
uuid = "082447d4-558c-5d27-93f4-14fc19e9eca2"
version = "1.12.1"
[[ChainRulesCore]]
deps = ["Compat", "LinearAlgebra", "SparseArrays"]
git-tree-sha1 = "f885e7e7c124f8c92650d61b9477b9ac2ee607dd"
uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
version = "1.11.1"
[[ChangesOfVariables]]
deps = ["LinearAlgebra", "Test"]
git-tree-sha1 = "9a1d594397670492219635b35a3d830b04730d62"
uuid = "9e997f8a-9a97-42d5-a9f1-ce6bfc15e2c0"
version = "0.1.1"
[[CodecZlib]]
deps = ["TranscodingStreams", "Zlib_jll"]
git-tree-sha1 = "ded953804d019afa9a3f98981d99b33e3db7b6da"
uuid = "944b1d66-785c-5afd-91f1-9de20f533193"
version = "0.7.0"
[[ColorTypes]]
deps = ["FixedPointNumbers", "Random"]
git-tree-sha1 = "024fe24d83e4a5bf5fc80501a314ce0d1aa35597"
uuid = "3da002f7-5984-5a60-b8a6-cbb66c0b333f"
version = "0.11.0"
[[Colors]]
deps = ["ColorTypes", "FixedPointNumbers", "Reexport"]
git-tree-sha1 = "417b0ed7b8b838aa6ca0a87aadf1bb9eb111ce40"
uuid = "5ae59095-9a9b-59fe-a467-6f913c188581"
version = "0.12.8"
[[CommonSubexpressions]]
deps = ["MacroTools", "Test"]
git-tree-sha1 = "7b8a93dba8af7e3b42fecabf646260105ac373f7"
uuid = "bbf7d656-a473-5ed7-a52c-81e309532950"
version = "0.3.0"
[[Compat]]
deps = ["Base64", "Dates", "DelimitedFiles", "Distributed", "InteractiveUtils", "LibGit2", "Libdl", "LinearAlgebra", "Markdown", "Mmap", "Pkg", "Printf", "REPL", "Random", "SHA", "Serialization", "SharedArrays", "Sockets", "SparseArrays", "Statistics", "Test", "UUIDs", "Unicode"]
git-tree-sha1 = "dce3e3fea680869eaa0b774b2e8343e9ff442313"
uuid = "34da2185-b29b-5c13-b0c7-acf172513d20"
version = "3.40.0"
[[CompilerSupportLibraries_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae"
[[DataAPI]]
git-tree-sha1 = "cc70b17275652eb47bc9e5f81635981f13cea5c8"
uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a"
version = "1.9.0"
[[DataStructures]]
deps = ["Compat", "InteractiveUtils", "OrderedCollections"]
git-tree-sha1 = "7d9d316f04214f7efdbb6398d545446e246eff02"
uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
version = "0.18.10"
[[Dates]]
deps = ["Printf"]
uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"
[[DelimitedFiles]]
deps = ["Mmap"]
uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab"
[[DiffResults]]
deps = ["StaticArrays"]
git-tree-sha1 = "c18e98cba888c6c25d1c3b048e4b3380ca956805"
uuid = "163ba53b-c6d8-5494-b064-1a9d43ac40c5"
version = "1.0.3"
[[DiffRules]]
deps = ["LogExpFunctions", "NaNMath", "Random", "SpecialFunctions"]
git-tree-sha1 = "3287dacf67c3652d3fed09f4c12c187ae4dbb89a"
uuid = "b552c78f-8df3-52c6-915a-8e097449b14b"
version = "1.4.0"
[[Distributed]]
deps = ["Random", "Serialization", "Sockets"]
uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b"
[[DocStringExtensions]]
deps = ["LibGit2"]
git-tree-sha1 = "b19534d1895d702889b219c382a6e18010797f0b"
uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae"
version = "0.8.6"
[[Downloads]]
deps = ["ArgTools", "LibCURL", "NetworkOptions"]
uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6"
[[ExprTools]]
git-tree-sha1 = "b7e3d17636b348f005f11040025ae8c6f645fe92"
uuid = "e2ba6199-217a-4e67-a87a-7c52f15ade04"
version = "0.1.6"
[[FillArrays]]
deps = ["LinearAlgebra", "Random", "SparseArrays", "Statistics"]
git-tree-sha1 = "8756f9935b7ccc9064c6eef0bff0ad643df733a3"
uuid = "1a297f60-69ca-5386-bcde-b61e274b549b"
version = "0.12.7"
[[FixedPointNumbers]]
deps = ["Statistics"]
git-tree-sha1 = "335bfdceacc84c5cdf16aadc768aa5ddfc5383cc"
uuid = "53c48c17-4a7d-5ca2-90c5-79b7896eea93"
version = "0.8.4"
[[Flux]]
deps = ["AbstractTrees", "Adapt", "ArrayInterface", "CUDA", "CodecZlib", "Colors", "DelimitedFiles", "Functors", "Juno", "LinearAlgebra", "MacroTools", "NNlib", "NNlibCUDA", "Pkg", "Printf", "Random", "Reexport", "SHA", "SparseArrays", "Statistics", "StatsBase", "Test", "ZipFile", "Zygote"]
git-tree-sha1 = "e8b37bb43c01eed0418821d1f9d20eca5ba6ab21"
uuid = "587475ba-b771-5e3f-ad9e-33799f191a9c"
version = "0.12.8"
[[ForwardDiff]]
deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "LinearAlgebra", "LogExpFunctions", "NaNMath", "Preferences", "Printf", "Random", "SpecialFunctions", "StaticArrays"]
git-tree-sha1 = "6406b5112809c08b1baa5703ad274e1dded0652f"
uuid = "f6369f11-7733-5829-9624-2563aa707210"
version = "0.10.23"
[[Functors]]
git-tree-sha1 = "e4768c3b7f597d5a352afa09874d16e3c3f6ead2"
uuid = "d9f16b24-f501-4c13-a1f2-28368ffc5196"
version = "0.2.7"
[[GPUArrays]]
deps = ["Adapt", "LinearAlgebra", "Printf", "Random", "Serialization", "Statistics"]
git-tree-sha1 = "7772508f17f1d482fe0df72cabc5b55bec06bbe0"
uuid = "0c68f7d7-f131-5f86-a1c3-88cf8149b2d7"
version = "8.1.2"
[[GPUCompiler]]
deps = ["ExprTools", "InteractiveUtils", "LLVM", "Libdl", "Logging", "TimerOutputs", "UUIDs"]
git-tree-sha1 = "77d915a0af27d474f0aaf12fcd46c400a552e84c"
uuid = "61eb1bfa-7361-4325-ad38-22787b887f55"
version = "0.13.7"
[[Hyperscript]]
deps = ["Test"]
git-tree-sha1 = "8d511d5b81240fc8e6802386302675bdf47737b9"
uuid = "47d2ed2b-36de-50cf-bf87-49c2cf4b8b91"
version = "0.0.4"
[[HypertextLiteral]]
git-tree-sha1 = "2b078b5a615c6c0396c77810d92ee8c6f470d238"
uuid = "ac1192a8-f4b3-4bfe-ba22-af5b92cd3ab2"
version = "0.9.3"
[[IOCapture]]
deps = ["Logging", "Random"]
git-tree-sha1 = "f7be53659ab06ddc986428d3a9dcc95f6fa6705a"
uuid = "b5f81e59-6552-4d32-b1f0-c071b021bf89"
version = "0.2.2"
[[IRTools]]
deps = ["InteractiveUtils", "MacroTools", "Test"]
git-tree-sha1 = "95215cd0076a150ef46ff7928892bc341864c73c"
uuid = "7869d1d1-7146-5819-86e3-90919afe41df"
version = "0.4.3"
[[IfElse]]
git-tree-sha1 = "debdd00ffef04665ccbb3e150747a77560e8fad1"
uuid = "615f187c-cbe4-4ef1-ba3b-2fcf58d6d173"
version = "0.1.1"
[[InteractiveUtils]]
deps = ["Markdown"]
uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
[[InverseFunctions]]
deps = ["Test"]
git-tree-sha1 = "a7254c0acd8e62f1ac75ad24d5db43f5f19f3c65"
uuid = "3587e190-3f89-42d0-90ee-14403ec27112"
version = "0.1.2"
[[IrrationalConstants]]
git-tree-sha1 = "7fd44fd4ff43fc60815f8e764c0f352b83c49151"
uuid = "92d709cd-6900-40b7-9082-c6be49f344b6"
version = "0.1.1"
[[JLLWrappers]]
deps = ["Preferences"]
git-tree-sha1 = "642a199af8b68253517b80bd3bfd17eb4e84df6e"
uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210"
version = "1.3.0"
[[JSON]]
deps = ["Dates", "Mmap", "Parsers", "Unicode"]
git-tree-sha1 = "8076680b162ada2a031f707ac7b4953e30667a37"
uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6"
version = "0.21.2"
[[Juno]]
deps = ["Base64", "Logging", "Media", "Profile"]
git-tree-sha1 = "07cb43290a840908a771552911a6274bc6c072c7"
uuid = "e5e0dc1b-0480-54bc-9374-aad01c23163d"
version = "0.8.4"
[[LLVM]]
deps = ["CEnum", "LLVMExtra_jll", "Libdl", "Printf", "Unicode"]
git-tree-sha1 = "46092047ca4edc10720ecab437c42283cd7c44f3"
uuid = "929cbde3-209d-540e-8aea-75f648917ca0"
version = "4.6.0"
[[LLVMExtra_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "6a2af408fe809c4f1a54d2b3f188fdd3698549d6"
uuid = "dad2f222-ce93-54a1-a47d-0025e8a3acab"
version = "0.0.11+0"
[[LazyArtifacts]]
deps = ["Artifacts", "Pkg"]
uuid = "4af54fe1-eca0-43a8-85a7-787d91b784e3"
[[LibCURL]]
deps = ["LibCURL_jll", "MozillaCACerts_jll"]
uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21"
[[LibCURL_jll]]
deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"]
uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0"
[[LibGit2]]
deps = ["Base64", "NetworkOptions", "Printf", "SHA"]
uuid = "76f85450-5226-5b5a-8eaa-529ad045b433"
[[LibSSH2_jll]]
deps = ["Artifacts", "Libdl", "MbedTLS_jll"]
uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8"
[[Libdl]]
uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
[[LinearAlgebra]]
deps = ["Libdl"]
uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
[[LogExpFunctions]]
deps = ["ChainRulesCore", "ChangesOfVariables", "DocStringExtensions", "InverseFunctions", "IrrationalConstants", "LinearAlgebra"]
git-tree-sha1 = "be9eef9f9d78cecb6f262f3c10da151a6c5ab827"
uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688"
version = "0.3.5"
[[Logging]]
uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"
[[MacroTools]]
deps = ["Markdown", "Random"]
git-tree-sha1 = "3d3e902b31198a27340d0bf00d6ac452866021cf"
uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09"
version = "0.5.9"
[[Markdown]]
deps = ["Base64"]
uuid = "d6f4376e-aef5-505a-96c1-9c027394607a"
[[MbedTLS_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1"
[[Media]]
deps = ["MacroTools", "Test"]
git-tree-sha1 = "75a54abd10709c01f1b86b84ec225d26e840ed58"
uuid = "e89f7d12-3494-54d1-8411-f7d8b9ae1f27"
version = "0.5.0"
[[Missings]]
deps = ["DataAPI"]
git-tree-sha1 = "bf210ce90b6c9eed32d25dbcae1ebc565df2687f"
uuid = "e1d29d7a-bbdc-5cf2-9ac0-f12de2c33e28"
version = "1.0.2"
[[Mmap]]
uuid = "a63ad114-7e13-5084-954f-fe012c677804"
[[MozillaCACerts_jll]]
uuid = "14a3606d-f60d-562e-9121-12d972cd8159"
[[NNlib]]
deps = ["Adapt", "ChainRulesCore", "Compat", "LinearAlgebra", "Pkg", "Requires", "Statistics"]
git-tree-sha1 = "e9ee896802054f832a646f607d26ea9fa1181c90"
uuid = "872c559c-99b0-510c-b3b7-b6c96a88d5cd"
version = "0.7.30"
[[NNlibCUDA]]
deps = ["CUDA", "LinearAlgebra", "NNlib", "Random", "Statistics"]
git-tree-sha1 = "04490d5e7570c038b1cb0f5c3627597181cc15a9"
uuid = "a00861dc-f156-4864-bf3c-e6376f28a68d"
version = "0.1.9"
[[NaNMath]]
git-tree-sha1 = "bfe47e760d60b82b66b61d2d44128b62e3a369fb"
uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3"
version = "0.3.5"
[[NetworkOptions]]
uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908"
[[OpenLibm_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "05823500-19ac-5b8b-9628-191a04bc5112"
[[OpenSpecFun_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "13652491f6856acfd2db29360e1bbcd4565d04f1"
uuid = "efe28fd5-8261-553b-a9e1-b2916fc3738e"
version = "0.5.5+0"
[[OrderedCollections]]
git-tree-sha1 = "85f8e6578bf1f9ee0d11e7bb1b1456435479d47c"
uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d"
version = "1.4.1"
[[Parsers]]
deps = ["Dates"]
git-tree-sha1 = "ae4bbcadb2906ccc085cf52ac286dc1377dceccc"
uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0"
version = "2.1.2"
[[Pkg]]
deps = ["Artifacts", "Dates", "Downloads", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"]
uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
[[PlutoUI]]
deps = ["AbstractPlutoDingetjes", "Base64", "Dates", "Hyperscript", "HypertextLiteral", "IOCapture", "InteractiveUtils", "JSON", "Logging", "Markdown", "Random", "Reexport", "UUIDs"]
git-tree-sha1 = "e071adf21e165ea0d904b595544a8e514c8bb42c"
uuid = "7f904dfe-b85e-4ff6-b463-dae2292396a8"
version = "0.7.19"
[[Preferences]]
deps = ["TOML"]
git-tree-sha1 = "00cfd92944ca9c760982747e9a1d0d5d86ab1e5a"
uuid = "21216c6a-2e73-6563-6e65-726566657250"
version = "1.2.2"
[[Printf]]
deps = ["Unicode"]
uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
[[Profile]]
deps = ["Printf"]
uuid = "9abbd945-dff8-562f-b5e8-e1ebf5ef1b79"
[[REPL]]
deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"]
uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"
[[Random]]
deps = ["Serialization"]
uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
[[Random123]]
deps = ["Libdl", "Random", "RandomNumbers"]
git-tree-sha1 = "0e8b146557ad1c6deb1367655e052276690e71a3"
uuid = "74087812-796a-5b5d-8853-05524746bad3"
version = "1.4.2"
[[RandomNumbers]]
deps = ["Random", "Requires"]
git-tree-sha1 = "043da614cc7e95c703498a491e2c21f58a2b8111"
uuid = "e6cf234a-135c-5ec9-84dd-332b85af5143"
version = "1.5.3"
[[RealDot]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "9f0a1b71baaf7650f4fa8a1d168c7fb6ee41f0c9"
uuid = "c1ae055f-0cd5-4b69-90a6-9a35b1a98df9"
version = "0.1.0"
[[Reexport]]
git-tree-sha1 = "45e428421666073eab6f2da5c9d310d99bb12f9b"
uuid = "189a3867-3050-52da-a836-e630ba90ab69"
version = "1.2.2"
[[Requires]]
deps = ["UUIDs"]
git-tree-sha1 = "4036a3bd08ac7e968e27c203d45f5fff15020621"
uuid = "ae029012-a4dd-5104-9daa-d747884805df"
version = "1.1.3"
[[SHA]]
uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"
[[Serialization]]
uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
[[SharedArrays]]
deps = ["Distributed", "Mmap", "Random", "Serialization"]
uuid = "1a1011a3-84de-559e-8e89-a11a2f7dc383"
[[Sockets]]
uuid = "6462fe0b-24de-5631-8697-dd941f90decc"
[[SortingAlgorithms]]
deps = ["DataStructures"]
git-tree-sha1 = "b3363d7460f7d098ca0912c69b082f75625d7508"
uuid = "a2af1166-a08f-5f64-846c-94a0d3cef48c"
version = "1.0.1"
[[SparseArrays]]
deps = ["LinearAlgebra", "Random"]
uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
[[SpecialFunctions]]
deps = ["ChainRulesCore", "IrrationalConstants", "LogExpFunctions", "OpenLibm_jll", "OpenSpecFun_jll"]
git-tree-sha1 = "f0bccf98e16759818ffc5d97ac3ebf87eb950150"
uuid = "276daf66-3868-5448-9aa4-cd146d93841b"
version = "1.8.1"
[[Static]]
deps = ["IfElse"]
git-tree-sha1 = "e7bc80dc93f50857a5d1e3c8121495852f407e6a"
uuid = "aedffcd0-7271-4cad-89d0-dc628f76c6d3"
version = "0.4.0"
[[StaticArrays]]
deps = ["LinearAlgebra", "Random", "Statistics"]
git-tree-sha1 = "3c76dde64d03699e074ac02eb2e8ba8254d428da"
uuid = "90137ffa-7385-5640-81b9-e52037218182"
version = "1.2.13"
[[Statistics]]
deps = ["LinearAlgebra", "SparseArrays"]
uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
[[StatsAPI]]
git-tree-sha1 = "1958272568dc176a1d881acb797beb909c785510"
uuid = "82ae8749-77ed-4fe6-ae5f-f523153014b0"
version = "1.0.0"
[[StatsBase]]
deps = ["DataAPI", "DataStructures", "LinearAlgebra", "LogExpFunctions", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics", "StatsAPI"]
git-tree-sha1 = "eb35dcc66558b2dda84079b9a1be17557d32091a"
uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
version = "0.33.12"
[[TOML]]
deps = ["Dates"]
uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76"
[[Tar]]
deps = ["ArgTools", "SHA"]
uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e"
[[Test]]
deps = ["InteractiveUtils", "Logging", "Random", "Serialization"]
uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
[[TimerOutputs]]
deps = ["ExprTools", "Printf"]
git-tree-sha1 = "7cb456f358e8f9d102a8b25e8dfedf58fa5689bc"
uuid = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f"
version = "0.5.13"
[[TranscodingStreams]]
deps = ["Random", "Test"]
git-tree-sha1 = "216b95ea110b5972db65aa90f88d8d89dcb8851c"
uuid = "3bb67fe8-82b1-5028-8e26-92a6c54297fa"
version = "0.9.6"
[[UUIDs]]
deps = ["Random", "SHA"]
uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
[[Unicode]]
uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
[[ZipFile]]
deps = ["Libdl", "Printf", "Zlib_jll"]
git-tree-sha1 = "3593e69e469d2111389a9bd06bac1f3d730ac6de"
uuid = "a5390f91-8eb1-5f08-bee0-b1d1ffed6cea"
version = "0.9.4"
[[Zlib_jll]]
deps = ["Libdl"]
uuid = "83775a58-1f1d-513f-b197-d71354ab007a"
[[Zygote]]
deps = ["AbstractFFTs", "ChainRules", "ChainRulesCore", "DiffRules", "Distributed", "FillArrays", "ForwardDiff", "IRTools", "InteractiveUtils", "LinearAlgebra", "MacroTools", "NaNMath", "Random", "Requires", "SpecialFunctions", "Statistics", "ZygoteRules"]
git-tree-sha1 = "2c30f2df0ba43c17e88c8b55b5b22c401f7cde4e"
uuid = "e88e6eb3-aa80-5325-afca-941959d7151f"
version = "0.6.30"
[[ZygoteRules]]
deps = ["MacroTools"]
git-tree-sha1 = "8c1a8e4dfacb1fd631745552c8db35d0deb09ea0"
uuid = "700de1a5-db45-46bc-99cf-38207098b444"
version = "0.2.2"
[[nghttp2_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d"
[[p7zip_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0"
"""
# ╔═╡ Cell order:
# ╠═be0ff5da-4326-11ec-0541-ff3acda2c9fd
# ╠═96456db4-efef-4d1b-b563-0699dcecbd3d
# ╠═f659eb0c-6588-4469-a478-46255222677e
# ╠═4c890a60-0078-41f8-ab32-24602d7f0189
# ╠═b5a52f81-870e-4190-83b5-ef84a2ad30f1
# ╠═fe755a26-d561-4d23-b94e-b2e3bcc62227
# ╠═9c4ee5ec-8248-415b-b444-2ad40ebebe74
# ╠═2d53cf21-ee2b-48f2-9a89-69eba76598e0
# ╠═20293c9a-11ca-4495-833a-faddec4a7b67
# ╠═d69b78f2-2431-4648-9b08-8d82774fbfbb
# ╠═db584226-d3f5-42f3-b9dc-7ddc8624bfda
# ╠═41c653bb-b6e0-43d2-a411-0ebd04b6c3e2
# ╠═7fb1fd3c-3ccd-45db-8757-bfbde6090522
# ╠═c1420fc5-de42-4b32-9e13-e859e111996b
# ╠═46c9f81c-3a8c-463a-952e-32287d11f1db
# ╠═8c651a48-a919-4e13-95ed-e0bd929e8b64
# ╠═10938bcf-dbc5-4109-b8a2-ee21c482f610
# ╠═e318c3f0-3828-41fc-9fd0-de2ae3d19e2f
# ╠═0da65b18-e511-4203-a5af-cdb0096acbbf
# ╠═ec1bd1c4-fc35-482b-b186-7f95cb332463
# ╠═0d85a68f-3a5d-4aab-ab0a-b9830e50c383
# ╠═2f6b7042-5cf5-4312-a617-dbeb08e05175
# ╠═b9fe5e74-a524-4c74-8c88-b26204ffa57b
# ╠═5a03a348-93bf-4298-b483-57f1256a09fb
# ╠═28ff091c-fa33-485a-b425-2197a7915419
# ╠═3f0c5427-6a45-44cb-a83e-8f3829c5f3cf
# ╠═03e2712c-0edb-4fd0-ac1f-cd34be4bcc03
# ╠═64644280-577a-4d31-9863-6c9914bba94c
# ╠═19a3a52f-3ecf-46ff-ac4d-5d44ef3a7821
# ╠═ad690dd7-6fad-4194-9cf2-ab33b1b23a11
# ╟─00000000-0000-0000-0000-000000000001
# ╟─00000000-0000-0000-0000-000000000002

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -0,0 +1,916 @@
### A Pluto.jl notebook ###
# v0.17.0
using Markdown
using InteractiveUtils
# ╔═╡ 20b324a4-3b6c-11ec-3673-d98ec8af9009
import Zygote, LinearAlgebra,Flux,BenchmarkTools,PlutoUI
# ╔═╡ 64f31ef7-e0ba-4353-8e51-e8356a894656
abstract type AbstractPhysicalModel end
# ╔═╡ 11a53b0c-465b-4f16-88d2-e0163e471fd6
#setup physical model
struct BasicModel <: AbstractPhysicalModel
#rate at which debris hits satellites
debris_collision_rate::Real
#rate at which satellites of different constellations collide
satellite_collision_rates::Matrix{Float64}
#rate at which debris exits orbits
decay_rate::Real
#rate at which satellites
autocatalysis_rate::Real
#ratio at which a collision between satellites produced debris
satellite_collision_debris_ratio::Real
#Ratio at which launches produce debris
launch_debris_ratio::Real
end
# ╔═╡ 1d4955f9-1d1c-4572-bc7b-9f002ea54042
begin
const N_constellations = 3;
const N_debris = 1;
const N_states = N_constellations + N_debris;
end
# ╔═╡ 6175d40b-0fa6-4cb4-8ef3-9765953ce97e
#=
Setup the physical model
These values are just guesstimates
=#
begin
#Getting loss parameters together.
loss_param = 2e-3;
loss_weights = loss_param*(ones(N_constellations,N_constellations) - LinearAlgebra.I);
#orbital decay rate
decay_param = 0.01;
#debris generation parameters
autocatalysis_param = 0.001;
satellite_loss_debris_rate = 5.0;
launch_debris_rate = 0.05;
#Todo, wrap physical model as a struct with the parameters
bm = BasicModel(
loss_param
,loss_weights
,decay_param
,autocatalysis_param
,satellite_loss_debris_rate
,launch_debris_rate
);
end
# ╔═╡ 28c0d31f-e90a-415c-b35e-312bdf771ddf
md"""
# Setup Model Functions
- Debris Transitions
- Derivatives of debris transitions
"""
# ╔═╡ 7462740e-23da-4151-81b5-cc2e6cbcf2c8
#implement tranistion function
#percentage survival function
function survival(
stocks
,debris
,physical_model::AbstractPhysicalModel
)
return exp.(
-physical_model.satellite_collision_rates * stocks
.- (physical_model.debris_collision_rate*debris)
)
end
# ╔═╡ 1eee99c9-5fef-498d-8a57-9c18e2f1cf49
#= Stock levels evolution
=#
function G(
stocks::Vector
,debris::Vector
,launches::Vector
, physical_model::AbstractPhysicalModel
)
return LinearAlgebra.diagm(survival(stocks,debris,physical_model) .- physical_model.decay_rate)*stocks + launches
end
# ╔═╡ bc3492c6-5c3b-4b08-86d4-dd4026e25655
#= Debris evolution
The model is
=#
function H(stocks,debris,launches,physical_model)
#get changes in debris from natural dynamics
natural_debris_dynamics = (1-physical_model.decay_rate+physical_model.autocatalysis_rate) * debris
#get changes in debris from satellite loss
satellite_loss_debris = physical_model.satellite_collision_debris_ratio * (1 .- survival(stocks,debris,physical_model))'*stocks
#Possible Issue: Broadcasts? ^^^
#get changes in debris from launches
launch_debris = physical_model.launch_debris_ratio*sum(launches)
#Possible Issue: Sum? ^^^
#return total debris level
return natural_debris_dynamics .+ satellite_loss_debris .+ launch_debris
end
# ╔═╡ 56314025-fb0a-4e98-8628-091bda52708c
begin
number_params=10
∂value = Flux.Chain(
Flux.Parallel(vcat
#parallel joins together stocks and debris
,Flux.Chain(Flux.Dense(N_constellations, N_states*2,Flux.relu)
#,Flux.Dense(N_states*2, N_states*2,Flux.σ)
)
,Flux.Chain(Flux.Dense(N_debris, N_states,Flux.relu)
#,Flux.Dense(N_states, N_states)
)
)
#Apply some transformations
,Flux.Dense(N_states*3,number_params,Flux.σ)
#,Flux.Dense(number_params,number_params,Flux.σ)
#Split out into partials related to stocks and debris
,Flux.Parallel(vcat
,Flux.Chain(
#Flux.Dense(number_params, number_params,Flux.relu)
#,
Flux.Dense(number_params, N_constellations,Flux.σ)
)
,Flux.Chain(
#Flux.Dense(number_params, number_params,Flux.relu)
#,
Flux.Dense(number_params, N_debris)
)
)
)
end
# ╔═╡ f61682fe-da30-459f-b807-4fc3e8f36f32
# ╔═╡ 31237f62-428a-4df3-9caf-1f5c5ade6ade
md"""
### Payout function
"""
# ╔═╡ 55fd682d-2808-4786-ad52-647ac6892860
begin
pay_matrix = zeros(N_constellations,N_constellations) + LinearAlgebra.I
end
# ╔═╡ bf869218-034e-42f9-96c5-4267a8c8fcfb
function F1(
s #stock levels
,d #debris levels
,a #actions
#,econ::Int #TODO: a struct with various bits of info
#,taxes::Int #TODO: a struct with info on taxes
#,debris_interactions::Matrix #Ignoring for now
,n::Matrix #constellation of interest
)
return n*(pay_matrix*s) - n*5.0*a
#-taxes and econ should be structs with parameters later
end
# ╔═╡ 4817d4fe-b86c-43e2-94d2-fa6c424fa001
begin
n1 = [1.0 2 3]
d1 = [-0.002 0]
end
# ╔═╡ 48a0d42f-bd00-4298-8b28-56acf2dbc8a7
# ╔═╡ 5d85cdb6-a627-4dd5-b76c-79ed2bc019c6
# ╔═╡ 5a5ca97f-28a9-4196-a1a6-40f3ae4bbb9c
# ╔═╡ 932b7a31-d5f4-44a3-a6c3-f608003a0a6f
md"""
## Building a function for the transition residuals (transversality conditions)
"""
# ╔═╡ 29a8f5c6-d5b8-4ec6-95a4-89025245787d
loss(x) = sum(x.^2)
# ╔═╡ ba7ca719-9687-4f1a-9a36-89b795a1bc13
md"""
### Testing training the value partials
"""
# ╔═╡ fda49870-c355-4b46-8d04-47268cb0372d
md"""
## Building a function for Optimality Residuals (Optimality Condition)
"""
# ╔═╡ 287ff2f9-3469-4e29-91a3-91ed54109f5a
md"""
## Parameters for testing
"""
# ╔═╡ d173ec63-b7c3-4042-862c-2626039d6d94
β = 0.95
# ╔═╡ 8eca54ad-f95b-450c-8567-480f0ab7ea19
function transition_residuals(G,H,F
,s,s #stocks t and t+1
,d,d #debris ''
,p #policy
,n #the constellation we are dealing with
,physics #Parameters of the physical model of the world
;econ=0.0,taxes=0.0 #economic parameters (to be incorporated later) and tax policy
)
#calculate partials of transition functions
∂G_∂s = Zygote.jacobian(stocks->G(stocks,d,p,physics),s)[1]
∂G_∂d = Zygote.jacobian(debris->G(s,debris,p,physics),d)[1]
∂H_∂s = Zygote.jacobian(stocks->H(stocks,d,p,physics),s)[1]
∂H_∂d = Zygote.jacobian(debris->H(s,debris,p,physics),d)[1]
#concatenate to create iterated vector
∂Vₜ₊₁_∂θ = vcat(hcat(∂G_∂s ,∂G_∂d),hcat(∂H_∂s ,∂H_∂d)) * ∂value((s,d))
#get partials of benefit function
∂F_∂d = Zygote.jacobian(debris->F(s,debris,p,n1),d)[1]
∂F_∂s = Zygote.jacobian(stocks->F(stocks,d,p,n1),s)[1] #this probaby should not be transposed
#concatenate
∂F_∂θ = hcat(∂F_∂s , ∂F_∂d )'
#calculate the optimality residuals
∂F_∂θ + β*∂Vₜ₊₁_∂θ - ∂value((s,d))
end
# ╔═╡ 274e3a3f-fe23-4d6f-893c-622db0413af0
begin
s = [1.0,2,3]
d = [1.0]
p = [1.0,2,3]
end
# ╔═╡ 5e754d71-f4c3-476c-9656-61f07687a534
G(s,d,[1,1,3],bm)
# ╔═╡ 749e6b65-fd8e-45ed-9a2b-f97274549933
survival(s,d,bm)
# ╔═╡ d38be057-952a-450e-ba23-bd2f40e79de3
F1(s,d,p,n1)
# ╔═╡ affe6dd2-05e2-4852-9ef0-9605bdb5e34b
Zygote.jacobian(stocks -> F1(stocks,d,p,n1),s)
# ╔═╡ 1c8f2c25-b8da-4e31-adac-ad575203f9cf
begin
#calculate partials of transition functions
∂G_∂s = Zygote.jacobian(stocks->G(stocks,d,p,bm),s)[1]
∂G_∂d = Zygote.jacobian(debris->G(s,debris,p,bm),d)[1]
∂H_∂s = Zygote.jacobian(stocks->H(stocks,d,p,bm),s)[1]
∂H_∂d = Zygote.jacobian(debris->H(s,debris,p,bm),d)[1]
#concatenate to create iterated vector
∂Vₜ₊₁_∂θ = vcat(hcat(∂G_∂s ,∂G_∂d),hcat(∂H_∂s ,∂H_∂d)) * ∂value((s,d))
#get partials of benefit function
∂F_∂d = Zygote.jacobian(debris->F1(s,debris,p,n1),d)[1]
∂F_∂s = Zygote.jacobian(stocks->F1(stocks,d,p,n1),s)[1] #this probaby should not be transposed
#concatenate
∂F_∂θ = hcat(∂F_∂s , ∂F_∂d )'
#calculate the optimality residuals
a = ∂F_∂θ + β * ∂Vₜ₊₁_∂θ - ∂value((s,d))
end
# ╔═╡ 48b09bfb-87fe-4974-a4f3-7a6ece521da6
sum(a.^2)
# ╔═╡ e1d0aeb8-8fb8-48a8-9842-7888d9fae1ad
loss(a)
# ╔═╡ 1e0cf592-fd96-4a01-84d1-078301ac7f99
x = Flux.gradient(() -> loss(a), Flux.params(∂value))
# ╔═╡ 9b59255c-77b4-4df6-a70b-e1fdaab7619b
grads_value1 = Flux.gradient(() -> loss(transition_residuals(G,H,F1,s,s,d,d,p,1,bm)),Flux.params(∂value))
# ╔═╡ 00000000-0000-0000-0000-000000000001
PLUTO_PROJECT_TOML_CONTENTS = """
[deps]
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
PlutoUI = "7f904dfe-b85e-4ff6-b463-dae2292396a8"
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"
[compat]
BenchmarkTools = "~1.2.0"
Flux = "~0.12.8"
PlutoUI = "~0.7.17"
Zygote = "~0.6.29"
"""
# ╔═╡ 00000000-0000-0000-0000-000000000002
PLUTO_MANIFEST_TOML_CONTENTS = """
# This file is machine-generated - editing it directly is not advised
[[AbstractFFTs]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "485ee0867925449198280d4af84bdb46a2a404d0"
uuid = "621f4979-c628-5d54-868e-fcf4e3e8185c"
version = "1.0.1"
[[AbstractTrees]]
git-tree-sha1 = "03e0550477d86222521d254b741d470ba17ea0b5"
uuid = "1520ce14-60c1-5f80-bbc7-55ef81b5835c"
version = "0.3.4"
[[Adapt]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "84918055d15b3114ede17ac6a7182f68870c16f7"
uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
version = "3.3.1"
[[ArgTools]]
uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f"
[[ArrayInterface]]
deps = ["Compat", "IfElse", "LinearAlgebra", "Requires", "SparseArrays", "Static"]
git-tree-sha1 = "d9352737cef8525944bf9ef34392d756321cbd54"
uuid = "4fba245c-0d91-5ea0-9b3e-6abc04ee57a9"
version = "3.1.38"
[[Artifacts]]
uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
[[BFloat16s]]
deps = ["LinearAlgebra", "Printf", "Random", "Test"]
git-tree-sha1 = "a598ecb0d717092b5539dbbe890c98bac842b072"
uuid = "ab4f0b2a-ad5b-11e8-123f-65d77653426b"
version = "0.2.0"
[[Base64]]
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
[[BenchmarkTools]]
deps = ["JSON", "Logging", "Printf", "Profile", "Statistics", "UUIDs"]
git-tree-sha1 = "61adeb0823084487000600ef8b1c00cc2474cd47"
uuid = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
version = "1.2.0"
[[CEnum]]
git-tree-sha1 = "215a9aa4a1f23fbd05b92769fdd62559488d70e9"
uuid = "fa961155-64e5-5f13-b03f-caf6b980ea82"
version = "0.4.1"
[[CUDA]]
deps = ["AbstractFFTs", "Adapt", "BFloat16s", "CEnum", "CompilerSupportLibraries_jll", "ExprTools", "GPUArrays", "GPUCompiler", "LLVM", "LazyArtifacts", "Libdl", "LinearAlgebra", "Logging", "Printf", "Random", "Random123", "RandomNumbers", "Reexport", "Requires", "SparseArrays", "SpecialFunctions", "TimerOutputs"]
git-tree-sha1 = "2c8329f16addffd09e6ca84c556e2185a4933c64"
uuid = "052768ef-5323-5732-b1bb-66c8b64840ba"
version = "3.5.0"
[[ChainRules]]
deps = ["ChainRulesCore", "Compat", "LinearAlgebra", "Random", "RealDot", "Statistics"]
git-tree-sha1 = "035ef8a5382a614b2d8e3091b6fdbb1c2b050e11"
uuid = "082447d4-558c-5d27-93f4-14fc19e9eca2"
version = "1.12.1"
[[ChainRulesCore]]
deps = ["Compat", "LinearAlgebra", "SparseArrays"]
git-tree-sha1 = "3533f5a691e60601fe60c90d8bc47a27aa2907ec"
uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
version = "1.11.0"
[[CodecZlib]]
deps = ["TranscodingStreams", "Zlib_jll"]
git-tree-sha1 = "ded953804d019afa9a3f98981d99b33e3db7b6da"
uuid = "944b1d66-785c-5afd-91f1-9de20f533193"
version = "0.7.0"
[[ColorTypes]]
deps = ["FixedPointNumbers", "Random"]
git-tree-sha1 = "024fe24d83e4a5bf5fc80501a314ce0d1aa35597"
uuid = "3da002f7-5984-5a60-b8a6-cbb66c0b333f"
version = "0.11.0"
[[Colors]]
deps = ["ColorTypes", "FixedPointNumbers", "Reexport"]
git-tree-sha1 = "417b0ed7b8b838aa6ca0a87aadf1bb9eb111ce40"
uuid = "5ae59095-9a9b-59fe-a467-6f913c188581"
version = "0.12.8"
[[CommonSubexpressions]]
deps = ["MacroTools", "Test"]
git-tree-sha1 = "7b8a93dba8af7e3b42fecabf646260105ac373f7"
uuid = "bbf7d656-a473-5ed7-a52c-81e309532950"
version = "0.3.0"
[[Compat]]
deps = ["Base64", "Dates", "DelimitedFiles", "Distributed", "InteractiveUtils", "LibGit2", "Libdl", "LinearAlgebra", "Markdown", "Mmap", "Pkg", "Printf", "REPL", "Random", "SHA", "Serialization", "SharedArrays", "Sockets", "SparseArrays", "Statistics", "Test", "UUIDs", "Unicode"]
git-tree-sha1 = "dce3e3fea680869eaa0b774b2e8343e9ff442313"
uuid = "34da2185-b29b-5c13-b0c7-acf172513d20"
version = "3.40.0"
[[CompilerSupportLibraries_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae"
[[DataAPI]]
git-tree-sha1 = "cc70b17275652eb47bc9e5f81635981f13cea5c8"
uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a"
version = "1.9.0"
[[DataStructures]]
deps = ["Compat", "InteractiveUtils", "OrderedCollections"]
git-tree-sha1 = "7d9d316f04214f7efdbb6398d545446e246eff02"
uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
version = "0.18.10"
[[Dates]]
deps = ["Printf"]
uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"
[[DelimitedFiles]]
deps = ["Mmap"]
uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab"
[[DiffResults]]
deps = ["StaticArrays"]
git-tree-sha1 = "c18e98cba888c6c25d1c3b048e4b3380ca956805"
uuid = "163ba53b-c6d8-5494-b064-1a9d43ac40c5"
version = "1.0.3"
[[DiffRules]]
deps = ["NaNMath", "Random", "SpecialFunctions"]
git-tree-sha1 = "7220bc21c33e990c14f4a9a319b1d242ebc5b269"
uuid = "b552c78f-8df3-52c6-915a-8e097449b14b"
version = "1.3.1"
[[Distributed]]
deps = ["Random", "Serialization", "Sockets"]
uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b"
[[DocStringExtensions]]
deps = ["LibGit2"]
git-tree-sha1 = "b19534d1895d702889b219c382a6e18010797f0b"
uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae"
version = "0.8.6"
[[Downloads]]
deps = ["ArgTools", "LibCURL", "NetworkOptions"]
uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6"
[[ExprTools]]
git-tree-sha1 = "b7e3d17636b348f005f11040025ae8c6f645fe92"
uuid = "e2ba6199-217a-4e67-a87a-7c52f15ade04"
version = "0.1.6"
[[FillArrays]]
deps = ["LinearAlgebra", "Random", "SparseArrays", "Statistics"]
git-tree-sha1 = "8756f9935b7ccc9064c6eef0bff0ad643df733a3"
uuid = "1a297f60-69ca-5386-bcde-b61e274b549b"
version = "0.12.7"
[[FixedPointNumbers]]
deps = ["Statistics"]
git-tree-sha1 = "335bfdceacc84c5cdf16aadc768aa5ddfc5383cc"
uuid = "53c48c17-4a7d-5ca2-90c5-79b7896eea93"
version = "0.8.4"
[[Flux]]
deps = ["AbstractTrees", "Adapt", "ArrayInterface", "CUDA", "CodecZlib", "Colors", "DelimitedFiles", "Functors", "Juno", "LinearAlgebra", "MacroTools", "NNlib", "NNlibCUDA", "Pkg", "Printf", "Random", "Reexport", "SHA", "SparseArrays", "Statistics", "StatsBase", "Test", "ZipFile", "Zygote"]
git-tree-sha1 = "e8b37bb43c01eed0418821d1f9d20eca5ba6ab21"
uuid = "587475ba-b771-5e3f-ad9e-33799f191a9c"
version = "0.12.8"
[[ForwardDiff]]
deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "LinearAlgebra", "NaNMath", "Preferences", "Printf", "Random", "SpecialFunctions", "StaticArrays"]
git-tree-sha1 = "63777916efbcb0ab6173d09a658fb7f2783de485"
uuid = "f6369f11-7733-5829-9624-2563aa707210"
version = "0.10.21"
[[Functors]]
git-tree-sha1 = "e4768c3b7f597d5a352afa09874d16e3c3f6ead2"
uuid = "d9f16b24-f501-4c13-a1f2-28368ffc5196"
version = "0.2.7"
[[GPUArrays]]
deps = ["Adapt", "LinearAlgebra", "Printf", "Random", "Serialization", "Statistics"]
git-tree-sha1 = "7772508f17f1d482fe0df72cabc5b55bec06bbe0"
uuid = "0c68f7d7-f131-5f86-a1c3-88cf8149b2d7"
version = "8.1.2"
[[GPUCompiler]]
deps = ["ExprTools", "InteractiveUtils", "LLVM", "Libdl", "Logging", "TimerOutputs", "UUIDs"]
git-tree-sha1 = "77d915a0af27d474f0aaf12fcd46c400a552e84c"
uuid = "61eb1bfa-7361-4325-ad38-22787b887f55"
version = "0.13.7"
[[Hyperscript]]
deps = ["Test"]
git-tree-sha1 = "8d511d5b81240fc8e6802386302675bdf47737b9"
uuid = "47d2ed2b-36de-50cf-bf87-49c2cf4b8b91"
version = "0.0.4"
[[HypertextLiteral]]
git-tree-sha1 = "5efcf53d798efede8fee5b2c8b09284be359bf24"
uuid = "ac1192a8-f4b3-4bfe-ba22-af5b92cd3ab2"
version = "0.9.2"
[[IOCapture]]
deps = ["Logging", "Random"]
git-tree-sha1 = "f7be53659ab06ddc986428d3a9dcc95f6fa6705a"
uuid = "b5f81e59-6552-4d32-b1f0-c071b021bf89"
version = "0.2.2"
[[IRTools]]
deps = ["InteractiveUtils", "MacroTools", "Test"]
git-tree-sha1 = "95215cd0076a150ef46ff7928892bc341864c73c"
uuid = "7869d1d1-7146-5819-86e3-90919afe41df"
version = "0.4.3"
[[IfElse]]
git-tree-sha1 = "debdd00ffef04665ccbb3e150747a77560e8fad1"
uuid = "615f187c-cbe4-4ef1-ba3b-2fcf58d6d173"
version = "0.1.1"
[[InteractiveUtils]]
deps = ["Markdown"]
uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
[[InverseFunctions]]
deps = ["Test"]
git-tree-sha1 = "f0c6489b12d28fb4c2103073ec7452f3423bd308"
uuid = "3587e190-3f89-42d0-90ee-14403ec27112"
version = "0.1.1"
[[IrrationalConstants]]
git-tree-sha1 = "7fd44fd4ff43fc60815f8e764c0f352b83c49151"
uuid = "92d709cd-6900-40b7-9082-c6be49f344b6"
version = "0.1.1"
[[JLLWrappers]]
deps = ["Preferences"]
git-tree-sha1 = "642a199af8b68253517b80bd3bfd17eb4e84df6e"
uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210"
version = "1.3.0"
[[JSON]]
deps = ["Dates", "Mmap", "Parsers", "Unicode"]
git-tree-sha1 = "8076680b162ada2a031f707ac7b4953e30667a37"
uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6"
version = "0.21.2"
[[Juno]]
deps = ["Base64", "Logging", "Media", "Profile"]
git-tree-sha1 = "07cb43290a840908a771552911a6274bc6c072c7"
uuid = "e5e0dc1b-0480-54bc-9374-aad01c23163d"
version = "0.8.4"
[[LLVM]]
deps = ["CEnum", "LLVMExtra_jll", "Libdl", "Printf", "Unicode"]
git-tree-sha1 = "46092047ca4edc10720ecab437c42283cd7c44f3"
uuid = "929cbde3-209d-540e-8aea-75f648917ca0"
version = "4.6.0"
[[LLVMExtra_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "6a2af408fe809c4f1a54d2b3f188fdd3698549d6"
uuid = "dad2f222-ce93-54a1-a47d-0025e8a3acab"
version = "0.0.11+0"
[[LazyArtifacts]]
deps = ["Artifacts", "Pkg"]
uuid = "4af54fe1-eca0-43a8-85a7-787d91b784e3"
[[LibCURL]]
deps = ["LibCURL_jll", "MozillaCACerts_jll"]
uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21"
[[LibCURL_jll]]
deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"]
uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0"
[[LibGit2]]
deps = ["Base64", "NetworkOptions", "Printf", "SHA"]
uuid = "76f85450-5226-5b5a-8eaa-529ad045b433"
[[LibSSH2_jll]]
deps = ["Artifacts", "Libdl", "MbedTLS_jll"]
uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8"
[[Libdl]]
uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
[[LinearAlgebra]]
deps = ["Libdl"]
uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
[[LogExpFunctions]]
deps = ["ChainRulesCore", "DocStringExtensions", "InverseFunctions", "IrrationalConstants", "LinearAlgebra"]
git-tree-sha1 = "6193c3815f13ba1b78a51ce391db8be016ae9214"
uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688"
version = "0.3.4"
[[Logging]]
uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"
[[MacroTools]]
deps = ["Markdown", "Random"]
git-tree-sha1 = "3d3e902b31198a27340d0bf00d6ac452866021cf"
uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09"
version = "0.5.9"
[[Markdown]]
deps = ["Base64"]
uuid = "d6f4376e-aef5-505a-96c1-9c027394607a"
[[MbedTLS_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1"
[[Media]]
deps = ["MacroTools", "Test"]
git-tree-sha1 = "75a54abd10709c01f1b86b84ec225d26e840ed58"
uuid = "e89f7d12-3494-54d1-8411-f7d8b9ae1f27"
version = "0.5.0"
[[Missings]]
deps = ["DataAPI"]
git-tree-sha1 = "bf210ce90b6c9eed32d25dbcae1ebc565df2687f"
uuid = "e1d29d7a-bbdc-5cf2-9ac0-f12de2c33e28"
version = "1.0.2"
[[Mmap]]
uuid = "a63ad114-7e13-5084-954f-fe012c677804"
[[MozillaCACerts_jll]]
uuid = "14a3606d-f60d-562e-9121-12d972cd8159"
[[NNlib]]
deps = ["Adapt", "ChainRulesCore", "Compat", "LinearAlgebra", "Pkg", "Requires", "Statistics"]
git-tree-sha1 = "5203a4532ad28c44f82c76634ad621d7c90abcbd"
uuid = "872c559c-99b0-510c-b3b7-b6c96a88d5cd"
version = "0.7.29"
[[NNlibCUDA]]
deps = ["CUDA", "LinearAlgebra", "NNlib", "Random", "Statistics"]
git-tree-sha1 = "04490d5e7570c038b1cb0f5c3627597181cc15a9"
uuid = "a00861dc-f156-4864-bf3c-e6376f28a68d"
version = "0.1.9"
[[NaNMath]]
git-tree-sha1 = "bfe47e760d60b82b66b61d2d44128b62e3a369fb"
uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3"
version = "0.3.5"
[[NetworkOptions]]
uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908"
[[OpenLibm_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "05823500-19ac-5b8b-9628-191a04bc5112"
[[OpenSpecFun_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "13652491f6856acfd2db29360e1bbcd4565d04f1"
uuid = "efe28fd5-8261-553b-a9e1-b2916fc3738e"
version = "0.5.5+0"
[[OrderedCollections]]
git-tree-sha1 = "85f8e6578bf1f9ee0d11e7bb1b1456435479d47c"
uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d"
version = "1.4.1"
[[Parsers]]
deps = ["Dates"]
git-tree-sha1 = "d911b6a12ba974dabe2291c6d450094a7226b372"
uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0"
version = "2.1.1"
[[Pkg]]
deps = ["Artifacts", "Dates", "Downloads", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"]
uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
[[PlutoUI]]
deps = ["Base64", "Dates", "Hyperscript", "HypertextLiteral", "IOCapture", "InteractiveUtils", "JSON", "Logging", "Markdown", "Random", "Reexport", "UUIDs"]
git-tree-sha1 = "615f3a1eff94add4bca9476ded096de60b46443b"
uuid = "7f904dfe-b85e-4ff6-b463-dae2292396a8"
version = "0.7.17"
[[Preferences]]
deps = ["TOML"]
git-tree-sha1 = "00cfd92944ca9c760982747e9a1d0d5d86ab1e5a"
uuid = "21216c6a-2e73-6563-6e65-726566657250"
version = "1.2.2"
[[Printf]]
deps = ["Unicode"]
uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
[[Profile]]
deps = ["Printf"]
uuid = "9abbd945-dff8-562f-b5e8-e1ebf5ef1b79"
[[REPL]]
deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"]
uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"
[[Random]]
deps = ["Serialization"]
uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
[[Random123]]
deps = ["Libdl", "Random", "RandomNumbers"]
git-tree-sha1 = "0e8b146557ad1c6deb1367655e052276690e71a3"
uuid = "74087812-796a-5b5d-8853-05524746bad3"
version = "1.4.2"
[[RandomNumbers]]
deps = ["Random", "Requires"]
git-tree-sha1 = "043da614cc7e95c703498a491e2c21f58a2b8111"
uuid = "e6cf234a-135c-5ec9-84dd-332b85af5143"
version = "1.5.3"
[[RealDot]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "9f0a1b71baaf7650f4fa8a1d168c7fb6ee41f0c9"
uuid = "c1ae055f-0cd5-4b69-90a6-9a35b1a98df9"
version = "0.1.0"
[[Reexport]]
git-tree-sha1 = "45e428421666073eab6f2da5c9d310d99bb12f9b"
uuid = "189a3867-3050-52da-a836-e630ba90ab69"
version = "1.2.2"
[[Requires]]
deps = ["UUIDs"]
git-tree-sha1 = "4036a3bd08ac7e968e27c203d45f5fff15020621"
uuid = "ae029012-a4dd-5104-9daa-d747884805df"
version = "1.1.3"
[[SHA]]
uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"
[[Serialization]]
uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
[[SharedArrays]]
deps = ["Distributed", "Mmap", "Random", "Serialization"]
uuid = "1a1011a3-84de-559e-8e89-a11a2f7dc383"
[[Sockets]]
uuid = "6462fe0b-24de-5631-8697-dd941f90decc"
[[SortingAlgorithms]]
deps = ["DataStructures"]
git-tree-sha1 = "b3363d7460f7d098ca0912c69b082f75625d7508"
uuid = "a2af1166-a08f-5f64-846c-94a0d3cef48c"
version = "1.0.1"
[[SparseArrays]]
deps = ["LinearAlgebra", "Random"]
uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
[[SpecialFunctions]]
deps = ["ChainRulesCore", "IrrationalConstants", "LogExpFunctions", "OpenLibm_jll", "OpenSpecFun_jll"]
git-tree-sha1 = "f0bccf98e16759818ffc5d97ac3ebf87eb950150"
uuid = "276daf66-3868-5448-9aa4-cd146d93841b"
version = "1.8.1"
[[Static]]
deps = ["IfElse"]
git-tree-sha1 = "e7bc80dc93f50857a5d1e3c8121495852f407e6a"
uuid = "aedffcd0-7271-4cad-89d0-dc628f76c6d3"
version = "0.4.0"
[[StaticArrays]]
deps = ["LinearAlgebra", "Random", "Statistics"]
git-tree-sha1 = "3c76dde64d03699e074ac02eb2e8ba8254d428da"
uuid = "90137ffa-7385-5640-81b9-e52037218182"
version = "1.2.13"
[[Statistics]]
deps = ["LinearAlgebra", "SparseArrays"]
uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
[[StatsAPI]]
git-tree-sha1 = "1958272568dc176a1d881acb797beb909c785510"
uuid = "82ae8749-77ed-4fe6-ae5f-f523153014b0"
version = "1.0.0"
[[StatsBase]]
deps = ["DataAPI", "DataStructures", "LinearAlgebra", "LogExpFunctions", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics", "StatsAPI"]
git-tree-sha1 = "eb35dcc66558b2dda84079b9a1be17557d32091a"
uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
version = "0.33.12"
[[TOML]]
deps = ["Dates"]
uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76"
[[Tar]]
deps = ["ArgTools", "SHA"]
uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e"
[[Test]]
deps = ["InteractiveUtils", "Logging", "Random", "Serialization"]
uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
[[TimerOutputs]]
deps = ["ExprTools", "Printf"]
git-tree-sha1 = "7cb456f358e8f9d102a8b25e8dfedf58fa5689bc"
uuid = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f"
version = "0.5.13"
[[TranscodingStreams]]
deps = ["Random", "Test"]
git-tree-sha1 = "216b95ea110b5972db65aa90f88d8d89dcb8851c"
uuid = "3bb67fe8-82b1-5028-8e26-92a6c54297fa"
version = "0.9.6"
[[UUIDs]]
deps = ["Random", "SHA"]
uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
[[Unicode]]
uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
[[ZipFile]]
deps = ["Libdl", "Printf", "Zlib_jll"]
git-tree-sha1 = "3593e69e469d2111389a9bd06bac1f3d730ac6de"
uuid = "a5390f91-8eb1-5f08-bee0-b1d1ffed6cea"
version = "0.9.4"
[[Zlib_jll]]
deps = ["Libdl"]
uuid = "83775a58-1f1d-513f-b197-d71354ab007a"
[[Zygote]]
deps = ["AbstractFFTs", "ChainRules", "ChainRulesCore", "DiffRules", "Distributed", "FillArrays", "ForwardDiff", "IRTools", "InteractiveUtils", "LinearAlgebra", "MacroTools", "NaNMath", "Random", "Requires", "SpecialFunctions", "Statistics", "ZygoteRules"]
git-tree-sha1 = "0fc9959bcabc4668c403810b4e851f6b8962eac9"
uuid = "e88e6eb3-aa80-5325-afca-941959d7151f"
version = "0.6.29"
[[ZygoteRules]]
deps = ["MacroTools"]
git-tree-sha1 = "8c1a8e4dfacb1fd631745552c8db35d0deb09ea0"
uuid = "700de1a5-db45-46bc-99cf-38207098b444"
version = "0.2.2"
[[nghttp2_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d"
[[p7zip_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0"
"""
# ╔═╡ Cell order:
# ╠═20b324a4-3b6c-11ec-3673-d98ec8af9009
# ╠═64f31ef7-e0ba-4353-8e51-e8356a894656
# ╠═11a53b0c-465b-4f16-88d2-e0163e471fd6
# ╠═1d4955f9-1d1c-4572-bc7b-9f002ea54042
# ╠═6175d40b-0fa6-4cb4-8ef3-9765953ce97e
# ╠═28c0d31f-e90a-415c-b35e-312bdf771ddf
# ╠═7462740e-23da-4151-81b5-cc2e6cbcf2c8
# ╠═1eee99c9-5fef-498d-8a57-9c18e2f1cf49
# ╠═bc3492c6-5c3b-4b08-86d4-dd4026e25655
# ╠═5e754d71-f4c3-476c-9656-61f07687a534
# ╠═749e6b65-fd8e-45ed-9a2b-f97274549933
# ╠═56314025-fb0a-4e98-8628-091bda52708c
# ╠═f61682fe-da30-459f-b807-4fc3e8f36f32
# ╟─31237f62-428a-4df3-9caf-1f5c5ade6ade
# ╠═55fd682d-2808-4786-ad52-647ac6892860
# ╠═bf869218-034e-42f9-96c5-4267a8c8fcfb
# ╠═4817d4fe-b86c-43e2-94d2-fa6c424fa001
# ╠═d38be057-952a-450e-ba23-bd2f40e79de3
# ╠═affe6dd2-05e2-4852-9ef0-9605bdb5e34b
# ╠═48a0d42f-bd00-4298-8b28-56acf2dbc8a7
# ╠═5d85cdb6-a627-4dd5-b76c-79ed2bc019c6
# ╠═5a5ca97f-28a9-4196-a1a6-40f3ae4bbb9c
# ╠═932b7a31-d5f4-44a3-a6c3-f608003a0a6f
# ╠═8eca54ad-f95b-450c-8567-480f0ab7ea19
# ╠═1c8f2c25-b8da-4e31-adac-ad575203f9cf
# ╠═48b09bfb-87fe-4974-a4f3-7a6ece521da6
# ╠═29a8f5c6-d5b8-4ec6-95a4-89025245787d
# ╠═e1d0aeb8-8fb8-48a8-9842-7888d9fae1ad
# ╠═1e0cf592-fd96-4a01-84d1-078301ac7f99
# ╟─ba7ca719-9687-4f1a-9a36-89b795a1bc13
# ╠═9b59255c-77b4-4df6-a70b-e1fdaab7619b
# ╠═fda49870-c355-4b46-8d04-47268cb0372d
# ╠═287ff2f9-3469-4e29-91a3-91ed54109f5a
# ╠═d173ec63-b7c3-4042-862c-2626039d6d94
# ╠═274e3a3f-fe23-4d6f-893c-622db0413af0
# ╟─00000000-0000-0000-0000-000000000001
# ╟─00000000-0000-0000-0000-000000000002

@ -0,0 +1,523 @@
# This file is machine-generated - editing it directly is not advised
[[AbstractFFTs]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "485ee0867925449198280d4af84bdb46a2a404d0"
uuid = "621f4979-c628-5d54-868e-fcf4e3e8185c"
version = "1.0.1"
[[AbstractTrees]]
git-tree-sha1 = "03e0550477d86222521d254b741d470ba17ea0b5"
uuid = "1520ce14-60c1-5f80-bbc7-55ef81b5835c"
version = "0.3.4"
[[Adapt]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "84918055d15b3114ede17ac6a7182f68870c16f7"
uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
version = "3.3.1"
[[ArgTools]]
uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f"
[[ArrayInterface]]
deps = ["Compat", "IfElse", "LinearAlgebra", "Requires", "SparseArrays", "Static"]
git-tree-sha1 = "e527b258413e0c6d4f66ade574744c94edef81f8"
uuid = "4fba245c-0d91-5ea0-9b3e-6abc04ee57a9"
version = "3.1.40"
[[Artifacts]]
uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
[[BFloat16s]]
deps = ["LinearAlgebra", "Printf", "Random", "Test"]
git-tree-sha1 = "a598ecb0d717092b5539dbbe890c98bac842b072"
uuid = "ab4f0b2a-ad5b-11e8-123f-65d77653426b"
version = "0.2.0"
[[BSON]]
git-tree-sha1 = "ebcd6e22d69f21249b7b8668351ebf42d6dc87a1"
uuid = "fbb218c0-5317-5bc6-957e-2ee96dd4b1f0"
version = "0.3.4"
[[Base64]]
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
[[CEnum]]
git-tree-sha1 = "215a9aa4a1f23fbd05b92769fdd62559488d70e9"
uuid = "fa961155-64e5-5f13-b03f-caf6b980ea82"
version = "0.4.1"
[[CUDA]]
deps = ["AbstractFFTs", "Adapt", "BFloat16s", "CEnum", "CompilerSupportLibraries_jll", "ExprTools", "GPUArrays", "GPUCompiler", "LLVM", "LazyArtifacts", "Libdl", "LinearAlgebra", "Logging", "Printf", "Random", "Random123", "RandomNumbers", "Reexport", "Requires", "SparseArrays", "SpecialFunctions", "TimerOutputs"]
git-tree-sha1 = "2c8329f16addffd09e6ca84c556e2185a4933c64"
uuid = "052768ef-5323-5732-b1bb-66c8b64840ba"
version = "3.5.0"
[[ChainRules]]
deps = ["ChainRulesCore", "Compat", "LinearAlgebra", "Random", "RealDot", "Statistics"]
git-tree-sha1 = "035ef8a5382a614b2d8e3091b6fdbb1c2b050e11"
uuid = "082447d4-558c-5d27-93f4-14fc19e9eca2"
version = "1.12.1"
[[ChainRulesCore]]
deps = ["Compat", "LinearAlgebra", "SparseArrays"]
git-tree-sha1 = "f885e7e7c124f8c92650d61b9477b9ac2ee607dd"
uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
version = "1.11.1"
[[ChangesOfVariables]]
deps = ["LinearAlgebra", "Test"]
git-tree-sha1 = "9a1d594397670492219635b35a3d830b04730d62"
uuid = "9e997f8a-9a97-42d5-a9f1-ce6bfc15e2c0"
version = "0.1.1"
[[CodecZlib]]
deps = ["TranscodingStreams", "Zlib_jll"]
git-tree-sha1 = "ded953804d019afa9a3f98981d99b33e3db7b6da"
uuid = "944b1d66-785c-5afd-91f1-9de20f533193"
version = "0.7.0"
[[ColorTypes]]
deps = ["FixedPointNumbers", "Random"]
git-tree-sha1 = "024fe24d83e4a5bf5fc80501a314ce0d1aa35597"
uuid = "3da002f7-5984-5a60-b8a6-cbb66c0b333f"
version = "0.11.0"
[[Colors]]
deps = ["ColorTypes", "FixedPointNumbers", "Reexport"]
git-tree-sha1 = "417b0ed7b8b838aa6ca0a87aadf1bb9eb111ce40"
uuid = "5ae59095-9a9b-59fe-a467-6f913c188581"
version = "0.12.8"
[[CommonSubexpressions]]
deps = ["MacroTools", "Test"]
git-tree-sha1 = "7b8a93dba8af7e3b42fecabf646260105ac373f7"
uuid = "bbf7d656-a473-5ed7-a52c-81e309532950"
version = "0.3.0"
[[Compat]]
deps = ["Base64", "Dates", "DelimitedFiles", "Distributed", "InteractiveUtils", "LibGit2", "Libdl", "LinearAlgebra", "Markdown", "Mmap", "Pkg", "Printf", "REPL", "Random", "SHA", "Serialization", "SharedArrays", "Sockets", "SparseArrays", "Statistics", "Test", "UUIDs", "Unicode"]
git-tree-sha1 = "dce3e3fea680869eaa0b774b2e8343e9ff442313"
uuid = "34da2185-b29b-5c13-b0c7-acf172513d20"
version = "3.40.0"
[[CompilerSupportLibraries_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae"
[[DataAPI]]
git-tree-sha1 = "cc70b17275652eb47bc9e5f81635981f13cea5c8"
uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a"
version = "1.9.0"
[[DataStructures]]
deps = ["Compat", "InteractiveUtils", "OrderedCollections"]
git-tree-sha1 = "7d9d316f04214f7efdbb6398d545446e246eff02"
uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
version = "0.18.10"
[[Dates]]
deps = ["Printf"]
uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"
[[DelimitedFiles]]
deps = ["Mmap"]
uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab"
[[DiffResults]]
deps = ["StaticArrays"]
git-tree-sha1 = "c18e98cba888c6c25d1c3b048e4b3380ca956805"
uuid = "163ba53b-c6d8-5494-b064-1a9d43ac40c5"
version = "1.0.3"
[[DiffRules]]
deps = ["LogExpFunctions", "NaNMath", "Random", "SpecialFunctions"]
git-tree-sha1 = "3287dacf67c3652d3fed09f4c12c187ae4dbb89a"
uuid = "b552c78f-8df3-52c6-915a-8e097449b14b"
version = "1.4.0"
[[Distributed]]
deps = ["Random", "Serialization", "Sockets"]
uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b"
[[DocStringExtensions]]
deps = ["LibGit2"]
git-tree-sha1 = "b19534d1895d702889b219c382a6e18010797f0b"
uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae"
version = "0.8.6"
[[Downloads]]
deps = ["ArgTools", "LibCURL", "NetworkOptions"]
uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6"
[[ExprTools]]
git-tree-sha1 = "b7e3d17636b348f005f11040025ae8c6f645fe92"
uuid = "e2ba6199-217a-4e67-a87a-7c52f15ade04"
version = "0.1.6"
[[FillArrays]]
deps = ["LinearAlgebra", "Random", "SparseArrays", "Statistics"]
git-tree-sha1 = "8756f9935b7ccc9064c6eef0bff0ad643df733a3"
uuid = "1a297f60-69ca-5386-bcde-b61e274b549b"
version = "0.12.7"
[[FixedPointNumbers]]
deps = ["Statistics"]
git-tree-sha1 = "335bfdceacc84c5cdf16aadc768aa5ddfc5383cc"
uuid = "53c48c17-4a7d-5ca2-90c5-79b7896eea93"
version = "0.8.4"
[[Flux]]
deps = ["AbstractTrees", "Adapt", "ArrayInterface", "CUDA", "CodecZlib", "Colors", "DelimitedFiles", "Functors", "Juno", "LinearAlgebra", "MacroTools", "NNlib", "NNlibCUDA", "Pkg", "Printf", "Random", "Reexport", "SHA", "SparseArrays", "Statistics", "StatsBase", "Test", "ZipFile", "Zygote"]
git-tree-sha1 = "e8b37bb43c01eed0418821d1f9d20eca5ba6ab21"
uuid = "587475ba-b771-5e3f-ad9e-33799f191a9c"
version = "0.12.8"
[[ForwardDiff]]
deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "LinearAlgebra", "LogExpFunctions", "NaNMath", "Preferences", "Printf", "Random", "SpecialFunctions", "StaticArrays"]
git-tree-sha1 = "6406b5112809c08b1baa5703ad274e1dded0652f"
uuid = "f6369f11-7733-5829-9624-2563aa707210"
version = "0.10.23"
[[Functors]]
git-tree-sha1 = "e4768c3b7f597d5a352afa09874d16e3c3f6ead2"
uuid = "d9f16b24-f501-4c13-a1f2-28368ffc5196"
version = "0.2.7"
[[GPUArrays]]
deps = ["Adapt", "LinearAlgebra", "Printf", "Random", "Serialization", "Statistics"]
git-tree-sha1 = "7772508f17f1d482fe0df72cabc5b55bec06bbe0"
uuid = "0c68f7d7-f131-5f86-a1c3-88cf8149b2d7"
version = "8.1.2"
[[GPUCompiler]]
deps = ["ExprTools", "InteractiveUtils", "LLVM", "Libdl", "Logging", "TimerOutputs", "UUIDs"]
git-tree-sha1 = "77d915a0af27d474f0aaf12fcd46c400a552e84c"
uuid = "61eb1bfa-7361-4325-ad38-22787b887f55"
version = "0.13.7"
[[IRTools]]
deps = ["InteractiveUtils", "MacroTools", "Test"]
git-tree-sha1 = "95215cd0076a150ef46ff7928892bc341864c73c"
uuid = "7869d1d1-7146-5819-86e3-90919afe41df"
version = "0.4.3"
[[IfElse]]
git-tree-sha1 = "debdd00ffef04665ccbb3e150747a77560e8fad1"
uuid = "615f187c-cbe4-4ef1-ba3b-2fcf58d6d173"
version = "0.1.1"
[[InteractiveUtils]]
deps = ["Markdown"]
uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
[[InverseFunctions]]
deps = ["Test"]
git-tree-sha1 = "a7254c0acd8e62f1ac75ad24d5db43f5f19f3c65"
uuid = "3587e190-3f89-42d0-90ee-14403ec27112"
version = "0.1.2"
[[IrrationalConstants]]
git-tree-sha1 = "7fd44fd4ff43fc60815f8e764c0f352b83c49151"
uuid = "92d709cd-6900-40b7-9082-c6be49f344b6"
version = "0.1.1"
[[JLLWrappers]]
deps = ["Preferences"]
git-tree-sha1 = "642a199af8b68253517b80bd3bfd17eb4e84df6e"
uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210"
version = "1.3.0"
[[Juno]]
deps = ["Base64", "Logging", "Media", "Profile"]
git-tree-sha1 = "07cb43290a840908a771552911a6274bc6c072c7"
uuid = "e5e0dc1b-0480-54bc-9374-aad01c23163d"
version = "0.8.4"
[[LLVM]]
deps = ["CEnum", "LLVMExtra_jll", "Libdl", "Printf", "Unicode"]
git-tree-sha1 = "46092047ca4edc10720ecab437c42283cd7c44f3"
uuid = "929cbde3-209d-540e-8aea-75f648917ca0"
version = "4.6.0"
[[LLVMExtra_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "9436f02a0c9f726d914cc6539f87850701be18fc"
uuid = "dad2f222-ce93-54a1-a47d-0025e8a3acab"
version = "0.0.12+0"
[[LazyArtifacts]]
deps = ["Artifacts", "Pkg"]
uuid = "4af54fe1-eca0-43a8-85a7-787d91b784e3"
[[LibCURL]]
deps = ["LibCURL_jll", "MozillaCACerts_jll"]
uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21"
[[LibCURL_jll]]
deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"]
uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0"
[[LibGit2]]
deps = ["Base64", "NetworkOptions", "Printf", "SHA"]
uuid = "76f85450-5226-5b5a-8eaa-529ad045b433"
[[LibSSH2_jll]]
deps = ["Artifacts", "Libdl", "MbedTLS_jll"]
uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8"
[[Libdl]]
uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
[[LinearAlgebra]]
deps = ["Libdl"]
uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
[[LogExpFunctions]]
deps = ["ChainRulesCore", "ChangesOfVariables", "DocStringExtensions", "InverseFunctions", "IrrationalConstants", "LinearAlgebra"]
git-tree-sha1 = "be9eef9f9d78cecb6f262f3c10da151a6c5ab827"
uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688"
version = "0.3.5"
[[Logging]]
uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"
[[MacroTools]]
deps = ["Markdown", "Random"]
git-tree-sha1 = "3d3e902b31198a27340d0bf00d6ac452866021cf"
uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09"
version = "0.5.9"
[[Markdown]]
deps = ["Base64"]
uuid = "d6f4376e-aef5-505a-96c1-9c027394607a"
[[MbedTLS_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1"
[[Media]]
deps = ["MacroTools", "Test"]
git-tree-sha1 = "75a54abd10709c01f1b86b84ec225d26e840ed58"
uuid = "e89f7d12-3494-54d1-8411-f7d8b9ae1f27"
version = "0.5.0"
[[Missings]]
deps = ["DataAPI"]
git-tree-sha1 = "bf210ce90b6c9eed32d25dbcae1ebc565df2687f"
uuid = "e1d29d7a-bbdc-5cf2-9ac0-f12de2c33e28"
version = "1.0.2"
[[Mmap]]
uuid = "a63ad114-7e13-5084-954f-fe012c677804"
[[MozillaCACerts_jll]]
uuid = "14a3606d-f60d-562e-9121-12d972cd8159"
[[NNlib]]
deps = ["Adapt", "ChainRulesCore", "Compat", "LinearAlgebra", "Pkg", "Requires", "Statistics"]
git-tree-sha1 = "2eb305b13eaed91d7da14269bf17ce6664bfee3d"
uuid = "872c559c-99b0-510c-b3b7-b6c96a88d5cd"
version = "0.7.31"
[[NNlibCUDA]]
deps = ["CUDA", "LinearAlgebra", "NNlib", "Random", "Statistics"]
git-tree-sha1 = "38358632d9c277f7bf8d202c127f601e8467aa4d"
uuid = "a00861dc-f156-4864-bf3c-e6376f28a68d"
version = "0.1.10"
[[NaNMath]]
git-tree-sha1 = "bfe47e760d60b82b66b61d2d44128b62e3a369fb"
uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3"
version = "0.3.5"
[[NetworkOptions]]
uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908"
[[OpenLibm_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "05823500-19ac-5b8b-9628-191a04bc5112"
[[OpenSpecFun_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "13652491f6856acfd2db29360e1bbcd4565d04f1"
uuid = "efe28fd5-8261-553b-a9e1-b2916fc3738e"
version = "0.5.5+0"
[[OrderedCollections]]
git-tree-sha1 = "85f8e6578bf1f9ee0d11e7bb1b1456435479d47c"
uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d"
version = "1.4.1"
[[Pkg]]
deps = ["Artifacts", "Dates", "Downloads", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"]
uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
[[Preferences]]
deps = ["TOML"]
git-tree-sha1 = "00cfd92944ca9c760982747e9a1d0d5d86ab1e5a"
uuid = "21216c6a-2e73-6563-6e65-726566657250"
version = "1.2.2"
[[Printf]]
deps = ["Unicode"]
uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
[[Profile]]
deps = ["Printf"]
uuid = "9abbd945-dff8-562f-b5e8-e1ebf5ef1b79"
[[REPL]]
deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"]
uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"
[[Random]]
deps = ["Serialization"]
uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
[[Random123]]
deps = ["Libdl", "Random", "RandomNumbers"]
git-tree-sha1 = "0e8b146557ad1c6deb1367655e052276690e71a3"
uuid = "74087812-796a-5b5d-8853-05524746bad3"
version = "1.4.2"
[[RandomNumbers]]
deps = ["Random", "Requires"]
git-tree-sha1 = "043da614cc7e95c703498a491e2c21f58a2b8111"
uuid = "e6cf234a-135c-5ec9-84dd-332b85af5143"
version = "1.5.3"
[[RealDot]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "9f0a1b71baaf7650f4fa8a1d168c7fb6ee41f0c9"
uuid = "c1ae055f-0cd5-4b69-90a6-9a35b1a98df9"
version = "0.1.0"
[[Reexport]]
git-tree-sha1 = "45e428421666073eab6f2da5c9d310d99bb12f9b"
uuid = "189a3867-3050-52da-a836-e630ba90ab69"
version = "1.2.2"
[[Requires]]
deps = ["UUIDs"]
git-tree-sha1 = "4036a3bd08ac7e968e27c203d45f5fff15020621"
uuid = "ae029012-a4dd-5104-9daa-d747884805df"
version = "1.1.3"
[[SHA]]
uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"
[[Serialization]]
uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
[[SharedArrays]]
deps = ["Distributed", "Mmap", "Random", "Serialization"]
uuid = "1a1011a3-84de-559e-8e89-a11a2f7dc383"
[[Sockets]]
uuid = "6462fe0b-24de-5631-8697-dd941f90decc"
[[SortingAlgorithms]]
deps = ["DataStructures"]
git-tree-sha1 = "b3363d7460f7d098ca0912c69b082f75625d7508"
uuid = "a2af1166-a08f-5f64-846c-94a0d3cef48c"
version = "1.0.1"
[[SparseArrays]]
deps = ["LinearAlgebra", "Random"]
uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
[[SpecialFunctions]]
deps = ["ChainRulesCore", "IrrationalConstants", "LogExpFunctions", "OpenLibm_jll", "OpenSpecFun_jll"]
git-tree-sha1 = "f0bccf98e16759818ffc5d97ac3ebf87eb950150"
uuid = "276daf66-3868-5448-9aa4-cd146d93841b"
version = "1.8.1"
[[Static]]
deps = ["IfElse"]
git-tree-sha1 = "e7bc80dc93f50857a5d1e3c8121495852f407e6a"
uuid = "aedffcd0-7271-4cad-89d0-dc628f76c6d3"
version = "0.4.0"
[[StaticArrays]]
deps = ["LinearAlgebra", "Random", "Statistics"]
git-tree-sha1 = "3c76dde64d03699e074ac02eb2e8ba8254d428da"
uuid = "90137ffa-7385-5640-81b9-e52037218182"
version = "1.2.13"
[[Statistics]]
deps = ["LinearAlgebra", "SparseArrays"]
uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
[[StatsAPI]]
git-tree-sha1 = "1958272568dc176a1d881acb797beb909c785510"
uuid = "82ae8749-77ed-4fe6-ae5f-f523153014b0"
version = "1.0.0"
[[StatsBase]]
deps = ["DataAPI", "DataStructures", "LinearAlgebra", "LogExpFunctions", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics", "StatsAPI"]
git-tree-sha1 = "2bb0cb32026a66037360606510fca5984ccc6b75"
uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
version = "0.33.13"
[[TOML]]
deps = ["Dates"]
uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76"
[[Tar]]
deps = ["ArgTools", "SHA"]
uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e"
[[Test]]
deps = ["InteractiveUtils", "Logging", "Random", "Serialization"]
uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
[[TimerOutputs]]
deps = ["ExprTools", "Printf"]
git-tree-sha1 = "7cb456f358e8f9d102a8b25e8dfedf58fa5689bc"
uuid = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f"
version = "0.5.13"
[[TranscodingStreams]]
deps = ["Random", "Test"]
git-tree-sha1 = "216b95ea110b5972db65aa90f88d8d89dcb8851c"
uuid = "3bb67fe8-82b1-5028-8e26-92a6c54297fa"
version = "0.9.6"
[[UUIDs]]
deps = ["Random", "SHA"]
uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
[[Unicode]]
uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
[[ZipFile]]
deps = ["Libdl", "Printf", "Zlib_jll"]
git-tree-sha1 = "3593e69e469d2111389a9bd06bac1f3d730ac6de"
uuid = "a5390f91-8eb1-5f08-bee0-b1d1ffed6cea"
version = "0.9.4"
[[Zlib_jll]]
deps = ["Libdl"]
uuid = "83775a58-1f1d-513f-b197-d71354ab007a"
[[Zygote]]
deps = ["AbstractFFTs", "ChainRules", "ChainRulesCore", "DiffRules", "Distributed", "FillArrays", "ForwardDiff", "IRTools", "InteractiveUtils", "LinearAlgebra", "MacroTools", "NaNMath", "Random", "Requires", "SpecialFunctions", "Statistics", "ZygoteRules"]
git-tree-sha1 = "2c30f2df0ba43c17e88c8b55b5b22c401f7cde4e"
uuid = "e88e6eb3-aa80-5325-afca-941959d7151f"
version = "0.6.30"
[[ZygoteRules]]
deps = ["MacroTools"]
git-tree-sha1 = "8c1a8e4dfacb1fd631745552c8db35d0deb09ea0"
uuid = "700de1a5-db45-46bc-99cf-38207098b444"
version = "0.2.2"
[[nghttp2_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d"
[[p7zip_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0"

@ -0,0 +1,5 @@
[deps]
BSON = "fbb218c0-5317-5bc6-957e-2ee96dd4b1f0"
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"

@ -0,0 +1 @@
Description goes here

@ -0,0 +1,81 @@
include("../src/Orbits.jl")
using .Orbits
using LinearAlgebra
#Set key dimensions
const N_constellations = 4
const N_debris = 1
const N_states = N_constellations + N_debris
#Setup Economic Models
em2_a = LinearModel(0.95, [1 -0.02 -0.02 0], [5.0 0 0 0])
em2_b = LinearModel(0.95, [-0.02 1 -0.02 0], [0.0 5 0 0])
em2_c = LinearModel(0.95, [0 -0.02 1 -0.02], [0.0 0 5 0])
em2_d = LinearModel(0.95, [0 -0.02 -0.02 1], [0.0 0 0 5])
#Setup Physics
basic_model = BasicPhysics(
0.002
,0.002*(ones(N_constellations,N_constellations) - LinearAlgebra.I)
,0.01
,0.001
,5.0
,0.05
)
# Setup NN
bg = BranchGenerator(N_constellations)
operators_policy = bg(operator_policy_function_generator(N_constellations,N_debris),vcat)
planners_policy = bg(operator_policy_function_generator(N_constellations,N_debris),vcat)
planners_value = value_function_generator(64)
# Setup Operators
const operator_array = [
#first operator
OperatorLoss(
em2_a
,value_function_generator()
,operators_policy #this is held by all operators
,params(operators_policy[2][1]) #first operator gets first branch of params
,basic_model
)
,OperatorLoss(
em2_b
,value_function_generator()
,operators_policy #this is held by all operators
,params(operators_policy[2][2]) #first operator gets first branch of params
,basic_model
)
,OperatorLoss(
em2_c
,value_function_generator()
,operators_policy #this is held by all operators
,params(operators_policy[2][3]) #first operator gets first branch of params
,basic_model
)
,OperatorLoss(
em2_d
,value_function_generator()
,operators_policy #this is held by all operators
,params(operators_policy[2][4]) #first operator gets first branch of params
,basic_model
)
]
#sanity check time
@assert length(operator_array) == N_constellations "Mismatch in predetermined number of constellations and the number of operators initialized"
# Setup Planner
pl = PlannerLoss(
0.95
,operator_array
,planners_policy
,params(planners_policy)
,planners_value
,params(planners_value)
,basic_model
)
# Export Planner

@ -0,0 +1,846 @@
### A Pluto.jl notebook ###
# v0.17.1
using Markdown
using InteractiveUtils
# ╔═╡ b194c2fc-340b-480e-b1ec-53db2fe4e7ec
module PhysicsModule
#Add exports here
using Flux, LinearAlgebra
using Zygote #included for saving and loading models
#==#
struct State
stocks::Array{Float32}
debris::Array{Float32}
end
### Physics
abstract type PhysicalModel end
struct BasicModel <: PhysicalModel
#rate at which debris hits satellites
debris_collision_rate::Real
#rate at which satellites of different constellations collide
satellite_collision_rates::Matrix{Float64}
#rate at which debris exits orbits
decay_rate::Real
#rate at which satellites
autocatalysis_rate::Real
#ratio at which a collision between satellites produced debris
satellite_collision_debris_ratio::Real
#Ratio at which launches produce debris
launch_debris_ratio::Real
end
function state_transition(
physics::BasicModel
,state::State
,launches::Vector{Float32}
)
#=
Physical Transitions
=#
survival_rate = survival(state,physics)
# Debris
# get changes in debris from natural dynamics
natural_debris_dynamics = (1 - physics.decay_rate + physics.autocatalysis_rate) * state.debris
# get changes in debris from satellite loss
satellite_loss_debris = physics.satellite_collision_debris_ratio * (1 .- survival_rate)'*state.stocks
# get changes in debris from launches
launch_debris = physics.launch_debris_ratio * sum(launches)
# total debris level
debris = natural_debris_dynamics .+ satellite_loss_debris .+ launch_debris
#stocks
stocks = (LinearAlgebra.diagm(survival_rate) .- physics.decay_rate)*state.stocks + launches
return State(stocks,debris)
end
function survival(
state::State
,physical_model::BasicModel
)
return exp.(
-(physical_model.satellite_collision_rates .+ physical_model.decay_rate) * state.stocks
.- (physical_model.debris_collision_rate * state.debris)
)
end
end #end physccs
# ╔═╡ 85897da8-bc22-40bd-8ec7-853f86ac4941
module NNTools
#= TupleDuplicator
This is used to create a tuple of size n with deepcopies of any object x
=#
using Flux,Zygote,BSON
struct TupleDuplicator
n::Int
end
(f::TupleDuplicator)(x) = tuple([deepcopy(x) for i=1:f.n]...)
#=
This generates a policy function full of branches with the properly scaled sides
=#
struct BranchGenerator
n::UInt8
#limit to 2^8 operators
end
function (b::BranchGenerator)(branch::Flux.Chain,join_fn::Function)
# used to deepcopy the branches and duplicate the inputs in the returned chain
f = TupleDuplicator(b.n)
return Flux.Chain(f,Flux.Parallel(join_fn,f(branch)))
end
## Couple of helpful policy generators, just for help.
function operator_policy_function_generator(number_params=32)
return Flux.Chain(
Flux.Parallel(vcat
#parallel joins together stocks and debris
,Flux.Chain(
Flux.Dense(N_constellations, number_params*2,Flux.relu)
,Flux.Dense(number_params*2, number_params*2,Flux.tanh)
)
,Flux.Chain(
Flux.Dense(N_debris, number_params,Flux.relu)
,Flux.Dense(number_params, number_params)
)
)
#Apply some transformations
,Flux.Dense(number_params*3,number_params,Flux.tanh)
,Flux.Dense(number_params,number_params,Flux.tanh)
,Flux.Dense(number_params,1,x -> Flux.relu.(sinh.(x)))
)
end
function planner_policy_function_generator(number_params=32)
return Flux.Chain(
Flux.Parallel(vcat
#parallel joins together stocks and debris
,Flux.Chain(
Flux.Dense(N_constellations, number_params*2,Flux.relu)
,Flux.Dense(number_params*2, number_params*2,Flux.σ)
)
,Flux.Chain(
Flux.Dense(N_debris, number_params,Flux.relu)
,Flux.Dense(number_params, number_params)
)
)
#Apply some transformations
,Flux.Dense(number_params*3,number_params,Flux.σ)
,Flux.Dense(number_params,number_params,Flux.σ)
,Flux.Dense(number_params,N_constellations,Flux.relu)
)
end
function value_function_generator(number_params=32)
return Flux.Chain(
Flux.Parallel(vcat
#parallel joins together stocks and debris, after a little bit of preprocessing
,Flux.Chain(
Flux.Dense(N_constellations, number_params*2,Flux.relu)
,Flux.Dense(number_params*2, number_params*2,Flux.σ)
)
,Flux.Chain(
Flux.Dense(N_debris, number_params,Flux.relu)
,Flux.Dense(number_params, number_params,Flux.σ)
)
)
#Apply some transformations to the preprocessed data.
,Flux.Dense(number_params*3,number_params,Flux.σ)
,Flux.Dense(number_params,number_params,Flux.σ)
,Flux.Dense(number_params,1)
)
end
end
# ╔═╡ 4e66c6cc-ad36-47e3-b2c0-2e9ca594ad04
using LinearAlgebra, BSON
# ╔═╡ 99aabead-81d0-4f66-994a-bc5485c08af5
### Economics
module EconomicModule
abstract type EconomicModel end
#basic linear model
struct LinearModel <: EconomicModel
β::Float32
payoff_array::Array{Float32}
policy_costs::Array{Float32}
end
function (em::LinearModel)(
s::Vector{Float32}
,d::Vector{Float32}
,a::Vector{Float32}
)
return em.payoff_array*s - em.policy_costs*a
end
#basic CES model
struct CES <: EconomicModel
β::Float32
r::Float32 #elasticity of subsititution
payoff_array::Array{Float32}
policy_costs::Array{Float32}
debris_costs::Array{Float32}
end
function (em::CES)(
s::Vector{Float32}
,d::Vector{Float32}
,a::Vector{Float32}
)
#issue here with multiplication
r1 = em.payoff_array .* (s.^em.r)
r2 = - em.debris_costs .* (d.^em.r)
r3 = - em.policy_costs .* (a.^em.r)
return (r1 + r2 + r3) .^ (1/em.r)
end
#basic CRRA
struct CRRA <: EconomicModel
β::Float32
σ::Float32 #elasticity of subsititution
payoff_array::Array{Float32}
policy_costs::Array{Float32}
debris_costs::Array{Float32}
end
function (em::CRRA)(
s::Vector{Float32}
,d::Vector{Float32}
,a::Vector{Float32}
)
#issue here with multiplication
core = (em.payoff_array*s - em.debris_costs*d - em.policy_costs).^(1 - em.σ)
return (core-1)/(1-em.σ)
end
end #end module
# ╔═╡ 3be917ed-7107-46a3-bd8e-969e0e8e0fea
md"""
# testing
Define the basic dimensions of the model
- Number of Constellations: $(const N_constellations = 4)
- Number of Debris Trackers: $(const N_debris = 1)
- Number of Overall States: $(const N_states = N_constellations + N_debris)
"""
# ╔═╡ f4d11666-dd44-4340-8fe7-b5cf735dea67
md"""
## Physics
"""
# ╔═╡ 30622c11-32f3-4b68-b391-e526c3b09570
begin #Setup for State tests
st = OD.State([1,2,3,4],[3])
launches = zeros(Float32,4)
loss_param = 2e-3;
loss_weights = loss_param*(ones(N_constellations,N_constellations) - LinearAlgebra.I);
#Todo, wrap physical model as a struct with the parameters
bm = OD.BasicModel(
loss_param
,loss_weights
,0.01
,0.001
,5.0
,0.05
)
end
# ╔═╡ 5497cf84-0fc1-4d47-8a1d-58f862000828
OD.state_transition(bm,st,launches)
# ╔═╡ 7ad063fa-70e1-4ffd-ad1a-8c65306bb2cc
md"""
## Neural Networks
"""
# ╔═╡ f7febfdd-81f0-4f67-8986-fb52e6e7d9fe
md"""
## Operator Models
"""
# ╔═╡ 00000000-0000-0000-0000-000000000001
PLUTO_PROJECT_TOML_CONTENTS = """
[deps]
BSON = "fbb218c0-5317-5bc6-957e-2ee96dd4b1f0"
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"
[compat]
BSON = "~0.3.4"
Flux = "~0.12.8"
Zygote = "~0.6.30"
"""
# ╔═╡ 00000000-0000-0000-0000-000000000002
PLUTO_MANIFEST_TOML_CONTENTS = """
# This file is machine-generated - editing it directly is not advised
[[AbstractFFTs]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "485ee0867925449198280d4af84bdb46a2a404d0"
uuid = "621f4979-c628-5d54-868e-fcf4e3e8185c"
version = "1.0.1"
[[AbstractTrees]]
git-tree-sha1 = "03e0550477d86222521d254b741d470ba17ea0b5"
uuid = "1520ce14-60c1-5f80-bbc7-55ef81b5835c"
version = "0.3.4"
[[Adapt]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "84918055d15b3114ede17ac6a7182f68870c16f7"
uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
version = "3.3.1"
[[ArgTools]]
uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f"
[[ArrayInterface]]
deps = ["Compat", "IfElse", "LinearAlgebra", "Requires", "SparseArrays", "Static"]
git-tree-sha1 = "e527b258413e0c6d4f66ade574744c94edef81f8"
uuid = "4fba245c-0d91-5ea0-9b3e-6abc04ee57a9"
version = "3.1.40"
[[Artifacts]]
uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
[[BFloat16s]]
deps = ["LinearAlgebra", "Printf", "Random", "Test"]
git-tree-sha1 = "a598ecb0d717092b5539dbbe890c98bac842b072"
uuid = "ab4f0b2a-ad5b-11e8-123f-65d77653426b"
version = "0.2.0"
[[BSON]]
git-tree-sha1 = "ebcd6e22d69f21249b7b8668351ebf42d6dc87a1"
uuid = "fbb218c0-5317-5bc6-957e-2ee96dd4b1f0"
version = "0.3.4"
[[Base64]]
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
[[CEnum]]
git-tree-sha1 = "215a9aa4a1f23fbd05b92769fdd62559488d70e9"
uuid = "fa961155-64e5-5f13-b03f-caf6b980ea82"
version = "0.4.1"
[[CUDA]]
deps = ["AbstractFFTs", "Adapt", "BFloat16s", "CEnum", "CompilerSupportLibraries_jll", "ExprTools", "GPUArrays", "GPUCompiler", "LLVM", "LazyArtifacts", "Libdl", "LinearAlgebra", "Logging", "Printf", "Random", "Random123", "RandomNumbers", "Reexport", "Requires", "SparseArrays", "SpecialFunctions", "TimerOutputs"]
git-tree-sha1 = "2c8329f16addffd09e6ca84c556e2185a4933c64"
uuid = "052768ef-5323-5732-b1bb-66c8b64840ba"
version = "3.5.0"
[[ChainRules]]
deps = ["ChainRulesCore", "Compat", "LinearAlgebra", "Random", "RealDot", "Statistics"]
git-tree-sha1 = "035ef8a5382a614b2d8e3091b6fdbb1c2b050e11"
uuid = "082447d4-558c-5d27-93f4-14fc19e9eca2"
version = "1.12.1"
[[ChainRulesCore]]
deps = ["Compat", "LinearAlgebra", "SparseArrays"]
git-tree-sha1 = "f885e7e7c124f8c92650d61b9477b9ac2ee607dd"
uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
version = "1.11.1"
[[ChangesOfVariables]]
deps = ["LinearAlgebra", "Test"]
git-tree-sha1 = "9a1d594397670492219635b35a3d830b04730d62"
uuid = "9e997f8a-9a97-42d5-a9f1-ce6bfc15e2c0"
version = "0.1.1"
[[CodecZlib]]
deps = ["TranscodingStreams", "Zlib_jll"]
git-tree-sha1 = "ded953804d019afa9a3f98981d99b33e3db7b6da"
uuid = "944b1d66-785c-5afd-91f1-9de20f533193"
version = "0.7.0"
[[ColorTypes]]
deps = ["FixedPointNumbers", "Random"]
git-tree-sha1 = "024fe24d83e4a5bf5fc80501a314ce0d1aa35597"
uuid = "3da002f7-5984-5a60-b8a6-cbb66c0b333f"
version = "0.11.0"
[[Colors]]
deps = ["ColorTypes", "FixedPointNumbers", "Reexport"]
git-tree-sha1 = "417b0ed7b8b838aa6ca0a87aadf1bb9eb111ce40"
uuid = "5ae59095-9a9b-59fe-a467-6f913c188581"
version = "0.12.8"
[[CommonSubexpressions]]
deps = ["MacroTools", "Test"]
git-tree-sha1 = "7b8a93dba8af7e3b42fecabf646260105ac373f7"
uuid = "bbf7d656-a473-5ed7-a52c-81e309532950"
version = "0.3.0"
[[Compat]]
deps = ["Base64", "Dates", "DelimitedFiles", "Distributed", "InteractiveUtils", "LibGit2", "Libdl", "LinearAlgebra", "Markdown", "Mmap", "Pkg", "Printf", "REPL", "Random", "SHA", "Serialization", "SharedArrays", "Sockets", "SparseArrays", "Statistics", "Test", "UUIDs", "Unicode"]
git-tree-sha1 = "dce3e3fea680869eaa0b774b2e8343e9ff442313"
uuid = "34da2185-b29b-5c13-b0c7-acf172513d20"
version = "3.40.0"
[[CompilerSupportLibraries_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae"
[[DataAPI]]
git-tree-sha1 = "cc70b17275652eb47bc9e5f81635981f13cea5c8"
uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a"
version = "1.9.0"
[[DataStructures]]
deps = ["Compat", "InteractiveUtils", "OrderedCollections"]
git-tree-sha1 = "7d9d316f04214f7efdbb6398d545446e246eff02"
uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
version = "0.18.10"
[[Dates]]
deps = ["Printf"]
uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"
[[DelimitedFiles]]
deps = ["Mmap"]
uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab"
[[DiffResults]]
deps = ["StaticArrays"]
git-tree-sha1 = "c18e98cba888c6c25d1c3b048e4b3380ca956805"
uuid = "163ba53b-c6d8-5494-b064-1a9d43ac40c5"
version = "1.0.3"
[[DiffRules]]
deps = ["LogExpFunctions", "NaNMath", "Random", "SpecialFunctions"]
git-tree-sha1 = "3287dacf67c3652d3fed09f4c12c187ae4dbb89a"
uuid = "b552c78f-8df3-52c6-915a-8e097449b14b"
version = "1.4.0"
[[Distributed]]
deps = ["Random", "Serialization", "Sockets"]
uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b"
[[DocStringExtensions]]
deps = ["LibGit2"]
git-tree-sha1 = "b19534d1895d702889b219c382a6e18010797f0b"
uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae"
version = "0.8.6"
[[Downloads]]
deps = ["ArgTools", "LibCURL", "NetworkOptions"]
uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6"
[[ExprTools]]
git-tree-sha1 = "b7e3d17636b348f005f11040025ae8c6f645fe92"
uuid = "e2ba6199-217a-4e67-a87a-7c52f15ade04"
version = "0.1.6"
[[FillArrays]]
deps = ["LinearAlgebra", "Random", "SparseArrays", "Statistics"]
git-tree-sha1 = "8756f9935b7ccc9064c6eef0bff0ad643df733a3"
uuid = "1a297f60-69ca-5386-bcde-b61e274b549b"
version = "0.12.7"
[[FixedPointNumbers]]
deps = ["Statistics"]
git-tree-sha1 = "335bfdceacc84c5cdf16aadc768aa5ddfc5383cc"
uuid = "53c48c17-4a7d-5ca2-90c5-79b7896eea93"
version = "0.8.4"
[[Flux]]
deps = ["AbstractTrees", "Adapt", "ArrayInterface", "CUDA", "CodecZlib", "Colors", "DelimitedFiles", "Functors", "Juno", "LinearAlgebra", "MacroTools", "NNlib", "NNlibCUDA", "Pkg", "Printf", "Random", "Reexport", "SHA", "SparseArrays", "Statistics", "StatsBase", "Test", "ZipFile", "Zygote"]
git-tree-sha1 = "e8b37bb43c01eed0418821d1f9d20eca5ba6ab21"
uuid = "587475ba-b771-5e3f-ad9e-33799f191a9c"
version = "0.12.8"
[[ForwardDiff]]
deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "LinearAlgebra", "LogExpFunctions", "NaNMath", "Preferences", "Printf", "Random", "SpecialFunctions", "StaticArrays"]
git-tree-sha1 = "6406b5112809c08b1baa5703ad274e1dded0652f"
uuid = "f6369f11-7733-5829-9624-2563aa707210"
version = "0.10.23"
[[Functors]]
git-tree-sha1 = "e4768c3b7f597d5a352afa09874d16e3c3f6ead2"
uuid = "d9f16b24-f501-4c13-a1f2-28368ffc5196"
version = "0.2.7"
[[GPUArrays]]
deps = ["Adapt", "LinearAlgebra", "Printf", "Random", "Serialization", "Statistics"]
git-tree-sha1 = "7772508f17f1d482fe0df72cabc5b55bec06bbe0"
uuid = "0c68f7d7-f131-5f86-a1c3-88cf8149b2d7"
version = "8.1.2"
[[GPUCompiler]]
deps = ["ExprTools", "InteractiveUtils", "LLVM", "Libdl", "Logging", "TimerOutputs", "UUIDs"]
git-tree-sha1 = "77d915a0af27d474f0aaf12fcd46c400a552e84c"
uuid = "61eb1bfa-7361-4325-ad38-22787b887f55"
version = "0.13.7"
[[IRTools]]
deps = ["InteractiveUtils", "MacroTools", "Test"]
git-tree-sha1 = "95215cd0076a150ef46ff7928892bc341864c73c"
uuid = "7869d1d1-7146-5819-86e3-90919afe41df"
version = "0.4.3"
[[IfElse]]
git-tree-sha1 = "debdd00ffef04665ccbb3e150747a77560e8fad1"
uuid = "615f187c-cbe4-4ef1-ba3b-2fcf58d6d173"
version = "0.1.1"
[[InteractiveUtils]]
deps = ["Markdown"]
uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
[[InverseFunctions]]
deps = ["Test"]
git-tree-sha1 = "a7254c0acd8e62f1ac75ad24d5db43f5f19f3c65"
uuid = "3587e190-3f89-42d0-90ee-14403ec27112"
version = "0.1.2"
[[IrrationalConstants]]
git-tree-sha1 = "7fd44fd4ff43fc60815f8e764c0f352b83c49151"
uuid = "92d709cd-6900-40b7-9082-c6be49f344b6"
version = "0.1.1"
[[JLLWrappers]]
deps = ["Preferences"]
git-tree-sha1 = "642a199af8b68253517b80bd3bfd17eb4e84df6e"
uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210"
version = "1.3.0"
[[Juno]]
deps = ["Base64", "Logging", "Media", "Profile"]
git-tree-sha1 = "07cb43290a840908a771552911a6274bc6c072c7"
uuid = "e5e0dc1b-0480-54bc-9374-aad01c23163d"
version = "0.8.4"
[[LLVM]]
deps = ["CEnum", "LLVMExtra_jll", "Libdl", "Printf", "Unicode"]
git-tree-sha1 = "46092047ca4edc10720ecab437c42283cd7c44f3"
uuid = "929cbde3-209d-540e-8aea-75f648917ca0"
version = "4.6.0"
[[LLVMExtra_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "9436f02a0c9f726d914cc6539f87850701be18fc"
uuid = "dad2f222-ce93-54a1-a47d-0025e8a3acab"
version = "0.0.12+0"
[[LazyArtifacts]]
deps = ["Artifacts", "Pkg"]
uuid = "4af54fe1-eca0-43a8-85a7-787d91b784e3"
[[LibCURL]]
deps = ["LibCURL_jll", "MozillaCACerts_jll"]
uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21"
[[LibCURL_jll]]
deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"]
uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0"
[[LibGit2]]
deps = ["Base64", "NetworkOptions", "Printf", "SHA"]
uuid = "76f85450-5226-5b5a-8eaa-529ad045b433"
[[LibSSH2_jll]]
deps = ["Artifacts", "Libdl", "MbedTLS_jll"]
uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8"
[[Libdl]]
uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
[[LinearAlgebra]]
deps = ["Libdl"]
uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
[[LogExpFunctions]]
deps = ["ChainRulesCore", "ChangesOfVariables", "DocStringExtensions", "InverseFunctions", "IrrationalConstants", "LinearAlgebra"]
git-tree-sha1 = "be9eef9f9d78cecb6f262f3c10da151a6c5ab827"
uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688"
version = "0.3.5"
[[Logging]]
uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"
[[MacroTools]]
deps = ["Markdown", "Random"]
git-tree-sha1 = "3d3e902b31198a27340d0bf00d6ac452866021cf"
uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09"
version = "0.5.9"
[[Markdown]]
deps = ["Base64"]
uuid = "d6f4376e-aef5-505a-96c1-9c027394607a"
[[MbedTLS_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1"
[[Media]]
deps = ["MacroTools", "Test"]
git-tree-sha1 = "75a54abd10709c01f1b86b84ec225d26e840ed58"
uuid = "e89f7d12-3494-54d1-8411-f7d8b9ae1f27"
version = "0.5.0"
[[Missings]]
deps = ["DataAPI"]
git-tree-sha1 = "bf210ce90b6c9eed32d25dbcae1ebc565df2687f"
uuid = "e1d29d7a-bbdc-5cf2-9ac0-f12de2c33e28"
version = "1.0.2"
[[Mmap]]
uuid = "a63ad114-7e13-5084-954f-fe012c677804"
[[MozillaCACerts_jll]]
uuid = "14a3606d-f60d-562e-9121-12d972cd8159"
[[NNlib]]
deps = ["Adapt", "ChainRulesCore", "Compat", "LinearAlgebra", "Pkg", "Requires", "Statistics"]
git-tree-sha1 = "2eb305b13eaed91d7da14269bf17ce6664bfee3d"
uuid = "872c559c-99b0-510c-b3b7-b6c96a88d5cd"
version = "0.7.31"
[[NNlibCUDA]]
deps = ["CUDA", "LinearAlgebra", "NNlib", "Random", "Statistics"]
git-tree-sha1 = "38358632d9c277f7bf8d202c127f601e8467aa4d"
uuid = "a00861dc-f156-4864-bf3c-e6376f28a68d"
version = "0.1.10"
[[NaNMath]]
git-tree-sha1 = "bfe47e760d60b82b66b61d2d44128b62e3a369fb"
uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3"
version = "0.3.5"
[[NetworkOptions]]
uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908"
[[OpenLibm_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "05823500-19ac-5b8b-9628-191a04bc5112"
[[OpenSpecFun_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "13652491f6856acfd2db29360e1bbcd4565d04f1"
uuid = "efe28fd5-8261-553b-a9e1-b2916fc3738e"
version = "0.5.5+0"
[[OrderedCollections]]
git-tree-sha1 = "85f8e6578bf1f9ee0d11e7bb1b1456435479d47c"
uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d"
version = "1.4.1"
[[Pkg]]
deps = ["Artifacts", "Dates", "Downloads", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"]
uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
[[Preferences]]
deps = ["TOML"]
git-tree-sha1 = "00cfd92944ca9c760982747e9a1d0d5d86ab1e5a"
uuid = "21216c6a-2e73-6563-6e65-726566657250"
version = "1.2.2"
[[Printf]]
deps = ["Unicode"]
uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
[[Profile]]
deps = ["Printf"]
uuid = "9abbd945-dff8-562f-b5e8-e1ebf5ef1b79"
[[REPL]]
deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"]
uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"
[[Random]]
deps = ["Serialization"]
uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
[[Random123]]
deps = ["Libdl", "Random", "RandomNumbers"]
git-tree-sha1 = "0e8b146557ad1c6deb1367655e052276690e71a3"
uuid = "74087812-796a-5b5d-8853-05524746bad3"
version = "1.4.2"
[[RandomNumbers]]
deps = ["Random", "Requires"]
git-tree-sha1 = "043da614cc7e95c703498a491e2c21f58a2b8111"
uuid = "e6cf234a-135c-5ec9-84dd-332b85af5143"
version = "1.5.3"
[[RealDot]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "9f0a1b71baaf7650f4fa8a1d168c7fb6ee41f0c9"
uuid = "c1ae055f-0cd5-4b69-90a6-9a35b1a98df9"
version = "0.1.0"
[[Reexport]]
git-tree-sha1 = "45e428421666073eab6f2da5c9d310d99bb12f9b"
uuid = "189a3867-3050-52da-a836-e630ba90ab69"
version = "1.2.2"
[[Requires]]
deps = ["UUIDs"]
git-tree-sha1 = "4036a3bd08ac7e968e27c203d45f5fff15020621"
uuid = "ae029012-a4dd-5104-9daa-d747884805df"
version = "1.1.3"
[[SHA]]
uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"
[[Serialization]]
uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
[[SharedArrays]]
deps = ["Distributed", "Mmap", "Random", "Serialization"]
uuid = "1a1011a3-84de-559e-8e89-a11a2f7dc383"
[[Sockets]]
uuid = "6462fe0b-24de-5631-8697-dd941f90decc"
[[SortingAlgorithms]]
deps = ["DataStructures"]
git-tree-sha1 = "b3363d7460f7d098ca0912c69b082f75625d7508"
uuid = "a2af1166-a08f-5f64-846c-94a0d3cef48c"
version = "1.0.1"
[[SparseArrays]]
deps = ["LinearAlgebra", "Random"]
uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
[[SpecialFunctions]]
deps = ["ChainRulesCore", "IrrationalConstants", "LogExpFunctions", "OpenLibm_jll", "OpenSpecFun_jll"]
git-tree-sha1 = "f0bccf98e16759818ffc5d97ac3ebf87eb950150"
uuid = "276daf66-3868-5448-9aa4-cd146d93841b"
version = "1.8.1"
[[Static]]
deps = ["IfElse"]
git-tree-sha1 = "e7bc80dc93f50857a5d1e3c8121495852f407e6a"
uuid = "aedffcd0-7271-4cad-89d0-dc628f76c6d3"
version = "0.4.0"
[[StaticArrays]]
deps = ["LinearAlgebra", "Random", "Statistics"]
git-tree-sha1 = "3c76dde64d03699e074ac02eb2e8ba8254d428da"
uuid = "90137ffa-7385-5640-81b9-e52037218182"
version = "1.2.13"
[[Statistics]]
deps = ["LinearAlgebra", "SparseArrays"]
uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
[[StatsAPI]]
git-tree-sha1 = "1958272568dc176a1d881acb797beb909c785510"
uuid = "82ae8749-77ed-4fe6-ae5f-f523153014b0"
version = "1.0.0"
[[StatsBase]]
deps = ["DataAPI", "DataStructures", "LinearAlgebra", "LogExpFunctions", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics", "StatsAPI"]
git-tree-sha1 = "2bb0cb32026a66037360606510fca5984ccc6b75"
uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
version = "0.33.13"
[[TOML]]
deps = ["Dates"]
uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76"
[[Tar]]
deps = ["ArgTools", "SHA"]
uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e"
[[Test]]
deps = ["InteractiveUtils", "Logging", "Random", "Serialization"]
uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
[[TimerOutputs]]
deps = ["ExprTools", "Printf"]
git-tree-sha1 = "7cb456f358e8f9d102a8b25e8dfedf58fa5689bc"
uuid = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f"
version = "0.5.13"
[[TranscodingStreams]]
deps = ["Random", "Test"]
git-tree-sha1 = "216b95ea110b5972db65aa90f88d8d89dcb8851c"
uuid = "3bb67fe8-82b1-5028-8e26-92a6c54297fa"
version = "0.9.6"
[[UUIDs]]
deps = ["Random", "SHA"]
uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
[[Unicode]]
uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
[[ZipFile]]
deps = ["Libdl", "Printf", "Zlib_jll"]
git-tree-sha1 = "3593e69e469d2111389a9bd06bac1f3d730ac6de"
uuid = "a5390f91-8eb1-5f08-bee0-b1d1ffed6cea"
version = "0.9.4"
[[Zlib_jll]]
deps = ["Libdl"]
uuid = "83775a58-1f1d-513f-b197-d71354ab007a"
[[Zygote]]
deps = ["AbstractFFTs", "ChainRules", "ChainRulesCore", "DiffRules", "Distributed", "FillArrays", "ForwardDiff", "IRTools", "InteractiveUtils", "LinearAlgebra", "MacroTools", "NaNMath", "Random", "Requires", "SpecialFunctions", "Statistics", "ZygoteRules"]
git-tree-sha1 = "2c30f2df0ba43c17e88c8b55b5b22c401f7cde4e"
uuid = "e88e6eb3-aa80-5325-afca-941959d7151f"
version = "0.6.30"
[[ZygoteRules]]
deps = ["MacroTools"]
git-tree-sha1 = "8c1a8e4dfacb1fd631745552c8db35d0deb09ea0"
uuid = "700de1a5-db45-46bc-99cf-38207098b444"
version = "0.2.2"
[[nghttp2_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d"
[[p7zip_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0"
"""
# ╔═╡ Cell order:
# ╠═b194c2fc-340b-480e-b1ec-53db2fe4e7ec
# ╠═99aabead-81d0-4f66-994a-bc5485c08af5
# ╠═85897da8-bc22-40bd-8ec7-853f86ac4941
# ╟─3be917ed-7107-46a3-bd8e-969e0e8e0fea
# ╠═f4d11666-dd44-4340-8fe7-b5cf735dea67
# ╠═4e66c6cc-ad36-47e3-b2c0-2e9ca594ad04
# ╠═30622c11-32f3-4b68-b391-e526c3b09570
# ╠═5497cf84-0fc1-4d47-8a1d-58f862000828
# ╠═7ad063fa-70e1-4ffd-ad1a-8c65306bb2cc
# ╠═f7febfdd-81f0-4f67-8986-fb52e6e7d9fe
# ╟─00000000-0000-0000-0000-000000000001
# ╟─00000000-0000-0000-0000-000000000002

@ -0,0 +1,962 @@
### A Pluto.jl notebook ###
# v0.17.1
using Markdown
using InteractiveUtils
# ╔═╡ b194c2fc-340b-480e-b1ec-53db2fe4e7ec
module PhysicsModule
#Add exports here
using Flux, LinearAlgebra
using Zygote #included for saving and loading models
#==#
struct State
stocks::Array{Float32}
debris::Array{Float32}
end
### Physics
abstract type PhysicalModel end
struct BasicModel <: PhysicalModel
#rate at which debris hits satellites
debris_collision_rate::Real
#rate at which satellites of different constellations collide
satellite_collision_rates::Matrix{Float64}
#rate at which debris exits orbits
decay_rate::Real
#rate at which satellites
autocatalysis_rate::Real
#ratio at which a collision between satellites produced debris
satellite_collision_debris_ratio::Real
#Ratio at which launches produce debris
launch_debris_ratio::Real
end
function state_transition(
physics::BasicModel
,state::State
,launches::Vector{Float32}
)
#=
Physical Transitions
=#
survival_rate = survival(state,physics)
# Debris
# get changes in debris from natural dynamics
natural_debris_dynamics = (1 - physics.decay_rate + physics.autocatalysis_rate) * state.debris
# get changes in debris from satellite loss
satellite_loss_debris = physics.satellite_collision_debris_ratio * (1 .- survival_rate)'*state.stocks
# get changes in debris from launches
launch_debris = physics.launch_debris_ratio * sum(launches)
# total debris level
debris = natural_debris_dynamics .+ satellite_loss_debris .+ launch_debris
#stocks
stocks = (LinearAlgebra.diagm(survival_rate) .- physics.decay_rate)*state.stocks + launches
return State(stocks,debris)
end
function survival(
state::State
,physical_model::BasicModel
)
return exp.(
-(physical_model.satellite_collision_rates .+ physical_model.decay_rate) * state.stocks
.- (physical_model.debris_collision_rate * state.debris)
)
end
end #end physccs
# ╔═╡ 4e66c6cc-ad36-47e3-b2c0-2e9ca594ad04
using LinearAlgebra, BSON
# ╔═╡ 85897da8-bc22-40bd-8ec7-853f86ac4941
module NNTools
#= TupleDuplicator
This is used to create a tuple of size n with deepcopies of any object x
=#
using Flux,Zygote,BSON
struct TupleDuplicator
n::Int
end
(f::TupleDuplicator)(x) = tuple([deepcopy(x) for i=1:f.n]...)
#=
This generates a policy function full of branches with the properly scaled sides
=#
struct BranchGenerator
n::UInt8
#limit to 2^8 operators
end
function (b::BranchGenerator)(branch::Flux.Chain,join_fn::Function)
# used to deepcopy the branches and duplicate the inputs in the returned chain
f = TupleDuplicator(b.n)
return Flux.Chain(f,Flux.Parallel(join_fn,f(branch)))
end
## Couple of helpful policy generators, just for help.
function operator_policy_function_generator(number_params=32)
return Flux.Chain(
Flux.Parallel(vcat
#parallel joins together stocks and debris
,Flux.Chain(
Flux.Dense(N_constellations, number_params*2,Flux.relu)
,Flux.Dense(number_params*2, number_params*2,Flux.tanh)
)
,Flux.Chain(
Flux.Dense(N_debris, number_params,Flux.relu)
,Flux.Dense(number_params, number_params)
)
)
#Apply some transformations
,Flux.Dense(number_params*3,number_params,Flux.tanh)
,Flux.Dense(number_params,number_params,Flux.tanh)
,Flux.Dense(number_params,1,x -> Flux.relu.(sinh.(x)))
)
end
function planner_policy_function_generator(number_params=32)
return Flux.Chain(
Flux.Parallel(vcat
#parallel joins together stocks and debris
,Flux.Chain(
Flux.Dense(N_constellations, number_params*2,Flux.relu)
,Flux.Dense(number_params*2, number_params*2,Flux.σ)
)
,Flux.Chain(
Flux.Dense(N_debris, number_params,Flux.relu)
,Flux.Dense(number_params, number_params)
)
)
#Apply some transformations
,Flux.Dense(number_params*3,number_params,Flux.σ)
,Flux.Dense(number_params,number_params,Flux.σ)
,Flux.Dense(number_params,N_constellations,Flux.relu)
)
end
function value_function_generator(number_params=32)
return Flux.Chain(
Flux.Parallel(vcat
#parallel joins together stocks and debris, after a little bit of preprocessing
,Flux.Chain(
Flux.Dense(N_constellations, number_params*2,Flux.relu)
,Flux.Dense(number_params*2, number_params*2,Flux.σ)
)
,Flux.Chain(
Flux.Dense(N_debris, number_params,Flux.relu)
,Flux.Dense(number_params, number_params,Flux.σ)
)
)
#Apply some transformations to the preprocessed data.
,Flux.Dense(number_params*3,number_params,Flux.σ)
,Flux.Dense(number_params,number_params,Flux.σ)
,Flux.Dense(number_params,1)
)
end
end
# ╔═╡ 3be917ed-7107-46a3-bd8e-969e0e8e0fea
md"""
# testing
Define the basic dimensions of the model
- Number of Constellations: $(const N_constellations = 4)
- Number of Debris Trackers: $(const N_debris = 1)
- Number of Overall States: $(const N_states = N_constellations + N_debris)
"""
# ╔═╡ f4d11666-dd44-4340-8fe7-b5cf735dea67
md"""
## Physics
"""
# ╔═╡ 30622c11-32f3-4b68-b391-e526c3b09570
begin #Setup for State tests
st = PhysicsModule.State([1,2,3,4],[3])
launches = zeros(Float32,4)
loss_param = 2e-3;
loss_weights = loss_param*(ones(N_constellations,N_constellations) - LinearAlgebra.I);
#Todo, wrap physical model as a struct with the parameters
bm = PhysicsModule.BasicModel(
loss_param
,loss_weights
,0.01
,0.001
,5.0
,0.05
)
end
# ╔═╡ 5497cf84-0fc1-4d47-8a1d-58f862000828
PhysicsModule.state_transition(bm,st,launches)
# ╔═╡ 7ad063fa-70e1-4ffd-ad1a-8c65306bb2cc
md"""
## Neural Networks
"""
# ╔═╡ 2879d65a-4a2e-4486-ac26-a0490a80f23e
NNTools.value_function_generator()
# ╔═╡ f7febfdd-81f0-4f67-8986-fb52e6e7d9fe
md"""
## Operator Models
"""
# ╔═╡ 99aabead-81d0-4f66-994a-bc5485c08af5
### Economics
module BenefitFunctions
abstract type EconomicModel end
#basic linear model
struct LinearModel <: EconomicModel
β::Float32
payoff_array::Array{Float32}
policy_costs::Array{Float32}
end
function (em::LinearModel)(
s::Vector{Float32}
,d::Vector{Float32}
,a::Vector{Float32}
)
return em.payoff_array*s - em.policy_costs*a
end
#basic CES model
struct CES <: EconomicModel
β::Float32
r::Float32 #elasticity of subsititution
payoff_array::Array{Float32}
policy_costs::Array{Float32}
debris_costs::Array{Float32}
end
function (em::CES)(
s::Vector{Float32}
,d::Vector{Float32}
,a::Vector{Float32}
)
#issue here with multiplication
r1 = em.payoff_array .* (s.^em.r)
r2 = - em.debris_costs .* (d.^em.r)
r3 = - em.policy_costs .* (a.^em.r)
return (r1 + r2 + r3) .^ (1/em.r)
end
#basic CRRA
struct CRRA <: EconomicModel
β::Float32
σ::Float32 #elasticity of subsititution
payoff_array::Array{Float32}
policy_costs::Array{Float32}
debris_costs::Array{Float32}
end
function (em::CRRA)(
s::Vector{Float32}
,d::Vector{Float32}
,a::Vector{Float32}
)
#issue here with multiplication
core = (em.payoff_array*s - em.debris_costs*d - em.policy_costs).^(1 - em.σ)
return (core-1)/(1-em.σ)
end
end #end module
# ╔═╡ 473c1c21-c3b8-4ca8-8e76-cc42b3ea0da4
begin
#=
This struct organizes information about a given constellation operator
Used to provide an interable loss function for training
=#
struct ConstellationOperatorLoss
#econ model describing operator
econ_model::EconomicModel
#Operator's value and policy functions, as well as which parameters the operator can train.
operator_value_fn::Flux.Chain
collected_policies::Flux.Chain #this is held by all operators
operator_policy_params::Flux.Params
#Transition functions
debris_transition::BasicDebrisEvolution
stocks_transition::BasicStockEvolution
end
# overriding function to calculate operator loss
function (operator::ConstellationOperatorLoss)(
s::Vector{Float32}
,d::Vector{Float32}
)
#get actions
a = operator.collected_policies((s,d))
#get updated stocks and debris
s = operator.stocks_transition(s ,d ,a)
d = operator.debris_transition(s ,d ,a)
bellman_residuals = operator.operator_value_fn((s,d)) - operator.econ_model(s,d,a) - operator.econ_model.β * operator.operator_value_fn((s,d))
maximization_condition = - operator.econ_model(s ,d ,a) - operator.econ_model.β * operator.operator_value_fn((s,d))
return Flux.mae(bellman_residuals.^2 ,maximization_condition)
end
function (operator::ConstellationOperatorLoss)(
s::Vector{Float32}
,d::Vector{Float32}
,policy::Flux.Chain #allow for another policy to be subsituted
)
#get actions
a = policy((s,d))
#get updated stocks and debris
s = operator.stocks_transition(s ,d ,a)
d = operator.debris_transition(s ,d ,a)
bellman_residuals = operator.operator_value_fn((s,d)) - operator.econ_model(s,d,a) - operator.econ_model.β * operator.operator_value_fn((s,d))
maximization_condition = - operator.econ_model(s ,d ,a) - operator.econ_model.β * operator.operator_value_fn((s,d))
return Flux.mae(bellman_residuals.^2 ,maximization_condition)
end
end
# ╔═╡ c5d4b5af-ec04-4941-b84f-c70e9665810b
begin
#do the same thing with the planner's problem
struct PlannerLoss
#=
Ideally, with just a well formed PlannerLoss and the training functions below, we should be able to train the approximation.
There is an issue with appropriately training the value functions.
In this case, it is not happening...
=#
β::Float32
operators::Array{ConstellationOperatorLoss}
policy::Flux.Chain
policy_params::Flux.Params
value::Flux.Chain
value_params::Flux.Params
debris_transition::BasicDebrisEvolution
stocks_transition::BasicStockEvolution
end
function (planner::PlannerLoss)(
s::Vector{Float32}
,d::Vector{Float32}
)
a = planner.policy((s ,d))
#get updated stocks and debris
s = planner.stocks_transition(s ,d ,a)
d = planner.debris_transition(s ,d ,a)
#calculate the total benefit from each of the models
benefit = sum([ co.econ_model(s ,d ,a) for co in planner.operators])
#issue here with mutating. Maybe generators/list comprehensions?
bellman_residuals = planner.value((s,d)) - benefit - planner.β .* planner.value((s,d))
maximization_condition = - benefit - planner.β .* planner.value((s,d))
return Flux.mae(bellman_residuals.^2 ,maximization_condition)
end
end
# ╔═╡ 00000000-0000-0000-0000-000000000001
PLUTO_PROJECT_TOML_CONTENTS = """
[deps]
BSON = "fbb218c0-5317-5bc6-957e-2ee96dd4b1f0"
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"
[compat]
BSON = "~0.3.4"
Flux = "~0.12.8"
Zygote = "~0.6.30"
"""
# ╔═╡ 00000000-0000-0000-0000-000000000002
PLUTO_MANIFEST_TOML_CONTENTS = """
# This file is machine-generated - editing it directly is not advised
[[AbstractFFTs]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "485ee0867925449198280d4af84bdb46a2a404d0"
uuid = "621f4979-c628-5d54-868e-fcf4e3e8185c"
version = "1.0.1"
[[AbstractTrees]]
git-tree-sha1 = "03e0550477d86222521d254b741d470ba17ea0b5"
uuid = "1520ce14-60c1-5f80-bbc7-55ef81b5835c"
version = "0.3.4"
[[Adapt]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "84918055d15b3114ede17ac6a7182f68870c16f7"
uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
version = "3.3.1"
[[ArgTools]]
uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f"
[[ArrayInterface]]
deps = ["Compat", "IfElse", "LinearAlgebra", "Requires", "SparseArrays", "Static"]
git-tree-sha1 = "e527b258413e0c6d4f66ade574744c94edef81f8"
uuid = "4fba245c-0d91-5ea0-9b3e-6abc04ee57a9"
version = "3.1.40"
[[Artifacts]]
uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
[[BFloat16s]]
deps = ["LinearAlgebra", "Printf", "Random", "Test"]
git-tree-sha1 = "a598ecb0d717092b5539dbbe890c98bac842b072"
uuid = "ab4f0b2a-ad5b-11e8-123f-65d77653426b"
version = "0.2.0"
[[BSON]]
git-tree-sha1 = "ebcd6e22d69f21249b7b8668351ebf42d6dc87a1"
uuid = "fbb218c0-5317-5bc6-957e-2ee96dd4b1f0"
version = "0.3.4"
[[Base64]]
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
[[CEnum]]
git-tree-sha1 = "215a9aa4a1f23fbd05b92769fdd62559488d70e9"
uuid = "fa961155-64e5-5f13-b03f-caf6b980ea82"
version = "0.4.1"
[[CUDA]]
deps = ["AbstractFFTs", "Adapt", "BFloat16s", "CEnum", "CompilerSupportLibraries_jll", "ExprTools", "GPUArrays", "GPUCompiler", "LLVM", "LazyArtifacts", "Libdl", "LinearAlgebra", "Logging", "Printf", "Random", "Random123", "RandomNumbers", "Reexport", "Requires", "SparseArrays", "SpecialFunctions", "TimerOutputs"]
git-tree-sha1 = "2c8329f16addffd09e6ca84c556e2185a4933c64"
uuid = "052768ef-5323-5732-b1bb-66c8b64840ba"
version = "3.5.0"
[[ChainRules]]
deps = ["ChainRulesCore", "Compat", "LinearAlgebra", "Random", "RealDot", "Statistics"]
git-tree-sha1 = "035ef8a5382a614b2d8e3091b6fdbb1c2b050e11"
uuid = "082447d4-558c-5d27-93f4-14fc19e9eca2"
version = "1.12.1"
[[ChainRulesCore]]
deps = ["Compat", "LinearAlgebra", "SparseArrays"]
git-tree-sha1 = "f885e7e7c124f8c92650d61b9477b9ac2ee607dd"
uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
version = "1.11.1"
[[ChangesOfVariables]]
deps = ["LinearAlgebra", "Test"]
git-tree-sha1 = "9a1d594397670492219635b35a3d830b04730d62"
uuid = "9e997f8a-9a97-42d5-a9f1-ce6bfc15e2c0"
version = "0.1.1"
[[CodecZlib]]
deps = ["TranscodingStreams", "Zlib_jll"]
git-tree-sha1 = "ded953804d019afa9a3f98981d99b33e3db7b6da"
uuid = "944b1d66-785c-5afd-91f1-9de20f533193"
version = "0.7.0"
[[ColorTypes]]
deps = ["FixedPointNumbers", "Random"]
git-tree-sha1 = "024fe24d83e4a5bf5fc80501a314ce0d1aa35597"
uuid = "3da002f7-5984-5a60-b8a6-cbb66c0b333f"
version = "0.11.0"
[[Colors]]
deps = ["ColorTypes", "FixedPointNumbers", "Reexport"]
git-tree-sha1 = "417b0ed7b8b838aa6ca0a87aadf1bb9eb111ce40"
uuid = "5ae59095-9a9b-59fe-a467-6f913c188581"
version = "0.12.8"
[[CommonSubexpressions]]
deps = ["MacroTools", "Test"]
git-tree-sha1 = "7b8a93dba8af7e3b42fecabf646260105ac373f7"
uuid = "bbf7d656-a473-5ed7-a52c-81e309532950"
version = "0.3.0"
[[Compat]]
deps = ["Base64", "Dates", "DelimitedFiles", "Distributed", "InteractiveUtils", "LibGit2", "Libdl", "LinearAlgebra", "Markdown", "Mmap", "Pkg", "Printf", "REPL", "Random", "SHA", "Serialization", "SharedArrays", "Sockets", "SparseArrays", "Statistics", "Test", "UUIDs", "Unicode"]
git-tree-sha1 = "dce3e3fea680869eaa0b774b2e8343e9ff442313"
uuid = "34da2185-b29b-5c13-b0c7-acf172513d20"
version = "3.40.0"
[[CompilerSupportLibraries_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae"
[[DataAPI]]
git-tree-sha1 = "cc70b17275652eb47bc9e5f81635981f13cea5c8"
uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a"
version = "1.9.0"
[[DataStructures]]
deps = ["Compat", "InteractiveUtils", "OrderedCollections"]
git-tree-sha1 = "7d9d316f04214f7efdbb6398d545446e246eff02"
uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
version = "0.18.10"
[[Dates]]
deps = ["Printf"]
uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"
[[DelimitedFiles]]
deps = ["Mmap"]
uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab"
[[DiffResults]]
deps = ["StaticArrays"]
git-tree-sha1 = "c18e98cba888c6c25d1c3b048e4b3380ca956805"
uuid = "163ba53b-c6d8-5494-b064-1a9d43ac40c5"
version = "1.0.3"
[[DiffRules]]
deps = ["LogExpFunctions", "NaNMath", "Random", "SpecialFunctions"]
git-tree-sha1 = "3287dacf67c3652d3fed09f4c12c187ae4dbb89a"
uuid = "b552c78f-8df3-52c6-915a-8e097449b14b"
version = "1.4.0"
[[Distributed]]
deps = ["Random", "Serialization", "Sockets"]
uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b"
[[DocStringExtensions]]
deps = ["LibGit2"]
git-tree-sha1 = "b19534d1895d702889b219c382a6e18010797f0b"
uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae"
version = "0.8.6"
[[Downloads]]
deps = ["ArgTools", "LibCURL", "NetworkOptions"]
uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6"
[[ExprTools]]
git-tree-sha1 = "b7e3d17636b348f005f11040025ae8c6f645fe92"
uuid = "e2ba6199-217a-4e67-a87a-7c52f15ade04"
version = "0.1.6"
[[FillArrays]]
deps = ["LinearAlgebra", "Random", "SparseArrays", "Statistics"]
git-tree-sha1 = "8756f9935b7ccc9064c6eef0bff0ad643df733a3"
uuid = "1a297f60-69ca-5386-bcde-b61e274b549b"
version = "0.12.7"
[[FixedPointNumbers]]
deps = ["Statistics"]
git-tree-sha1 = "335bfdceacc84c5cdf16aadc768aa5ddfc5383cc"
uuid = "53c48c17-4a7d-5ca2-90c5-79b7896eea93"
version = "0.8.4"
[[Flux]]
deps = ["AbstractTrees", "Adapt", "ArrayInterface", "CUDA", "CodecZlib", "Colors", "DelimitedFiles", "Functors", "Juno", "LinearAlgebra", "MacroTools", "NNlib", "NNlibCUDA", "Pkg", "Printf", "Random", "Reexport", "SHA", "SparseArrays", "Statistics", "StatsBase", "Test", "ZipFile", "Zygote"]
git-tree-sha1 = "e8b37bb43c01eed0418821d1f9d20eca5ba6ab21"
uuid = "587475ba-b771-5e3f-ad9e-33799f191a9c"
version = "0.12.8"
[[ForwardDiff]]
deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "LinearAlgebra", "LogExpFunctions", "NaNMath", "Preferences", "Printf", "Random", "SpecialFunctions", "StaticArrays"]
git-tree-sha1 = "6406b5112809c08b1baa5703ad274e1dded0652f"
uuid = "f6369f11-7733-5829-9624-2563aa707210"
version = "0.10.23"
[[Functors]]
git-tree-sha1 = "e4768c3b7f597d5a352afa09874d16e3c3f6ead2"
uuid = "d9f16b24-f501-4c13-a1f2-28368ffc5196"
version = "0.2.7"
[[GPUArrays]]
deps = ["Adapt", "LinearAlgebra", "Printf", "Random", "Serialization", "Statistics"]
git-tree-sha1 = "7772508f17f1d482fe0df72cabc5b55bec06bbe0"
uuid = "0c68f7d7-f131-5f86-a1c3-88cf8149b2d7"
version = "8.1.2"
[[GPUCompiler]]
deps = ["ExprTools", "InteractiveUtils", "LLVM", "Libdl", "Logging", "TimerOutputs", "UUIDs"]
git-tree-sha1 = "77d915a0af27d474f0aaf12fcd46c400a552e84c"
uuid = "61eb1bfa-7361-4325-ad38-22787b887f55"
version = "0.13.7"
[[IRTools]]
deps = ["InteractiveUtils", "MacroTools", "Test"]
git-tree-sha1 = "95215cd0076a150ef46ff7928892bc341864c73c"
uuid = "7869d1d1-7146-5819-86e3-90919afe41df"
version = "0.4.3"
[[IfElse]]
git-tree-sha1 = "debdd00ffef04665ccbb3e150747a77560e8fad1"
uuid = "615f187c-cbe4-4ef1-ba3b-2fcf58d6d173"
version = "0.1.1"
[[InteractiveUtils]]
deps = ["Markdown"]
uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
[[InverseFunctions]]
deps = ["Test"]
git-tree-sha1 = "a7254c0acd8e62f1ac75ad24d5db43f5f19f3c65"
uuid = "3587e190-3f89-42d0-90ee-14403ec27112"
version = "0.1.2"
[[IrrationalConstants]]
git-tree-sha1 = "7fd44fd4ff43fc60815f8e764c0f352b83c49151"
uuid = "92d709cd-6900-40b7-9082-c6be49f344b6"
version = "0.1.1"
[[JLLWrappers]]
deps = ["Preferences"]
git-tree-sha1 = "642a199af8b68253517b80bd3bfd17eb4e84df6e"
uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210"
version = "1.3.0"
[[Juno]]
deps = ["Base64", "Logging", "Media", "Profile"]
git-tree-sha1 = "07cb43290a840908a771552911a6274bc6c072c7"
uuid = "e5e0dc1b-0480-54bc-9374-aad01c23163d"
version = "0.8.4"
[[LLVM]]
deps = ["CEnum", "LLVMExtra_jll", "Libdl", "Printf", "Unicode"]
git-tree-sha1 = "46092047ca4edc10720ecab437c42283cd7c44f3"
uuid = "929cbde3-209d-540e-8aea-75f648917ca0"
version = "4.6.0"
[[LLVMExtra_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "9436f02a0c9f726d914cc6539f87850701be18fc"
uuid = "dad2f222-ce93-54a1-a47d-0025e8a3acab"
version = "0.0.12+0"
[[LazyArtifacts]]
deps = ["Artifacts", "Pkg"]
uuid = "4af54fe1-eca0-43a8-85a7-787d91b784e3"
[[LibCURL]]
deps = ["LibCURL_jll", "MozillaCACerts_jll"]
uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21"
[[LibCURL_jll]]
deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"]
uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0"
[[LibGit2]]
deps = ["Base64", "NetworkOptions", "Printf", "SHA"]
uuid = "76f85450-5226-5b5a-8eaa-529ad045b433"
[[LibSSH2_jll]]
deps = ["Artifacts", "Libdl", "MbedTLS_jll"]
uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8"
[[Libdl]]
uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
[[LinearAlgebra]]
deps = ["Libdl"]
uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
[[LogExpFunctions]]
deps = ["ChainRulesCore", "ChangesOfVariables", "DocStringExtensions", "InverseFunctions", "IrrationalConstants", "LinearAlgebra"]
git-tree-sha1 = "be9eef9f9d78cecb6f262f3c10da151a6c5ab827"
uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688"
version = "0.3.5"
[[Logging]]
uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"
[[MacroTools]]
deps = ["Markdown", "Random"]
git-tree-sha1 = "3d3e902b31198a27340d0bf00d6ac452866021cf"
uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09"
version = "0.5.9"
[[Markdown]]
deps = ["Base64"]
uuid = "d6f4376e-aef5-505a-96c1-9c027394607a"
[[MbedTLS_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1"
[[Media]]
deps = ["MacroTools", "Test"]
git-tree-sha1 = "75a54abd10709c01f1b86b84ec225d26e840ed58"
uuid = "e89f7d12-3494-54d1-8411-f7d8b9ae1f27"
version = "0.5.0"
[[Missings]]
deps = ["DataAPI"]
git-tree-sha1 = "bf210ce90b6c9eed32d25dbcae1ebc565df2687f"
uuid = "e1d29d7a-bbdc-5cf2-9ac0-f12de2c33e28"
version = "1.0.2"
[[Mmap]]
uuid = "a63ad114-7e13-5084-954f-fe012c677804"
[[MozillaCACerts_jll]]
uuid = "14a3606d-f60d-562e-9121-12d972cd8159"
[[NNlib]]
deps = ["Adapt", "ChainRulesCore", "Compat", "LinearAlgebra", "Pkg", "Requires", "Statistics"]
git-tree-sha1 = "2eb305b13eaed91d7da14269bf17ce6664bfee3d"
uuid = "872c559c-99b0-510c-b3b7-b6c96a88d5cd"
version = "0.7.31"
[[NNlibCUDA]]
deps = ["CUDA", "LinearAlgebra", "NNlib", "Random", "Statistics"]
git-tree-sha1 = "38358632d9c277f7bf8d202c127f601e8467aa4d"
uuid = "a00861dc-f156-4864-bf3c-e6376f28a68d"
version = "0.1.10"
[[NaNMath]]
git-tree-sha1 = "bfe47e760d60b82b66b61d2d44128b62e3a369fb"
uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3"
version = "0.3.5"
[[NetworkOptions]]
uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908"
[[OpenLibm_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "05823500-19ac-5b8b-9628-191a04bc5112"
[[OpenSpecFun_jll]]
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "13652491f6856acfd2db29360e1bbcd4565d04f1"
uuid = "efe28fd5-8261-553b-a9e1-b2916fc3738e"
version = "0.5.5+0"
[[OrderedCollections]]
git-tree-sha1 = "85f8e6578bf1f9ee0d11e7bb1b1456435479d47c"
uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d"
version = "1.4.1"
[[Pkg]]
deps = ["Artifacts", "Dates", "Downloads", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"]
uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
[[Preferences]]
deps = ["TOML"]
git-tree-sha1 = "00cfd92944ca9c760982747e9a1d0d5d86ab1e5a"
uuid = "21216c6a-2e73-6563-6e65-726566657250"
version = "1.2.2"
[[Printf]]
deps = ["Unicode"]
uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
[[Profile]]
deps = ["Printf"]
uuid = "9abbd945-dff8-562f-b5e8-e1ebf5ef1b79"
[[REPL]]
deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"]
uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"
[[Random]]
deps = ["Serialization"]
uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
[[Random123]]
deps = ["Libdl", "Random", "RandomNumbers"]
git-tree-sha1 = "0e8b146557ad1c6deb1367655e052276690e71a3"
uuid = "74087812-796a-5b5d-8853-05524746bad3"
version = "1.4.2"
[[RandomNumbers]]
deps = ["Random", "Requires"]
git-tree-sha1 = "043da614cc7e95c703498a491e2c21f58a2b8111"
uuid = "e6cf234a-135c-5ec9-84dd-332b85af5143"
version = "1.5.3"
[[RealDot]]
deps = ["LinearAlgebra"]
git-tree-sha1 = "9f0a1b71baaf7650f4fa8a1d168c7fb6ee41f0c9"
uuid = "c1ae055f-0cd5-4b69-90a6-9a35b1a98df9"
version = "0.1.0"
[[Reexport]]
git-tree-sha1 = "45e428421666073eab6f2da5c9d310d99bb12f9b"
uuid = "189a3867-3050-52da-a836-e630ba90ab69"
version = "1.2.2"
[[Requires]]
deps = ["UUIDs"]
git-tree-sha1 = "4036a3bd08ac7e968e27c203d45f5fff15020621"
uuid = "ae029012-a4dd-5104-9daa-d747884805df"
version = "1.1.3"
[[SHA]]
uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"
[[Serialization]]
uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
[[SharedArrays]]
deps = ["Distributed", "Mmap", "Random", "Serialization"]
uuid = "1a1011a3-84de-559e-8e89-a11a2f7dc383"
[[Sockets]]
uuid = "6462fe0b-24de-5631-8697-dd941f90decc"
[[SortingAlgorithms]]
deps = ["DataStructures"]
git-tree-sha1 = "b3363d7460f7d098ca0912c69b082f75625d7508"
uuid = "a2af1166-a08f-5f64-846c-94a0d3cef48c"
version = "1.0.1"
[[SparseArrays]]
deps = ["LinearAlgebra", "Random"]
uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
[[SpecialFunctions]]
deps = ["ChainRulesCore", "IrrationalConstants", "LogExpFunctions", "OpenLibm_jll", "OpenSpecFun_jll"]
git-tree-sha1 = "f0bccf98e16759818ffc5d97ac3ebf87eb950150"
uuid = "276daf66-3868-5448-9aa4-cd146d93841b"
version = "1.8.1"
[[Static]]
deps = ["IfElse"]
git-tree-sha1 = "e7bc80dc93f50857a5d1e3c8121495852f407e6a"
uuid = "aedffcd0-7271-4cad-89d0-dc628f76c6d3"
version = "0.4.0"
[[StaticArrays]]
deps = ["LinearAlgebra", "Random", "Statistics"]
git-tree-sha1 = "3c76dde64d03699e074ac02eb2e8ba8254d428da"
uuid = "90137ffa-7385-5640-81b9-e52037218182"
version = "1.2.13"
[[Statistics]]
deps = ["LinearAlgebra", "SparseArrays"]
uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
[[StatsAPI]]
git-tree-sha1 = "1958272568dc176a1d881acb797beb909c785510"
uuid = "82ae8749-77ed-4fe6-ae5f-f523153014b0"
version = "1.0.0"
[[StatsBase]]
deps = ["DataAPI", "DataStructures", "LinearAlgebra", "LogExpFunctions", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics", "StatsAPI"]
git-tree-sha1 = "2bb0cb32026a66037360606510fca5984ccc6b75"
uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
version = "0.33.13"
[[TOML]]
deps = ["Dates"]
uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76"
[[Tar]]
deps = ["ArgTools", "SHA"]
uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e"
[[Test]]
deps = ["InteractiveUtils", "Logging", "Random", "Serialization"]
uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
[[TimerOutputs]]
deps = ["ExprTools", "Printf"]
git-tree-sha1 = "7cb456f358e8f9d102a8b25e8dfedf58fa5689bc"
uuid = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f"
version = "0.5.13"
[[TranscodingStreams]]
deps = ["Random", "Test"]
git-tree-sha1 = "216b95ea110b5972db65aa90f88d8d89dcb8851c"
uuid = "3bb67fe8-82b1-5028-8e26-92a6c54297fa"
version = "0.9.6"
[[UUIDs]]
deps = ["Random", "SHA"]
uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
[[Unicode]]
uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
[[ZipFile]]
deps = ["Libdl", "Printf", "Zlib_jll"]
git-tree-sha1 = "3593e69e469d2111389a9bd06bac1f3d730ac6de"
uuid = "a5390f91-8eb1-5f08-bee0-b1d1ffed6cea"
version = "0.9.4"
[[Zlib_jll]]
deps = ["Libdl"]
uuid = "83775a58-1f1d-513f-b197-d71354ab007a"
[[Zygote]]
deps = ["AbstractFFTs", "ChainRules", "ChainRulesCore", "DiffRules", "Distributed", "FillArrays", "ForwardDiff", "IRTools", "InteractiveUtils", "LinearAlgebra", "MacroTools", "NaNMath", "Random", "Requires", "SpecialFunctions", "Statistics", "ZygoteRules"]
git-tree-sha1 = "2c30f2df0ba43c17e88c8b55b5b22c401f7cde4e"
uuid = "e88e6eb3-aa80-5325-afca-941959d7151f"
version = "0.6.30"
[[ZygoteRules]]
deps = ["MacroTools"]
git-tree-sha1 = "8c1a8e4dfacb1fd631745552c8db35d0deb09ea0"
uuid = "700de1a5-db45-46bc-99cf-38207098b444"
version = "0.2.2"
[[nghttp2_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d"
[[p7zip_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0"
"""
# ╔═╡ Cell order:
# ╟─b194c2fc-340b-480e-b1ec-53db2fe4e7ec
# ╟─3be917ed-7107-46a3-bd8e-969e0e8e0fea
# ╠═f4d11666-dd44-4340-8fe7-b5cf735dea67
# ╠═4e66c6cc-ad36-47e3-b2c0-2e9ca594ad04
# ╠═30622c11-32f3-4b68-b391-e526c3b09570
# ╠═5497cf84-0fc1-4d47-8a1d-58f862000828
# ╠═7ad063fa-70e1-4ffd-ad1a-8c65306bb2cc
# ╟─85897da8-bc22-40bd-8ec7-853f86ac4941
# ╠═2879d65a-4a2e-4486-ac26-a0490a80f23e
# ╠═f7febfdd-81f0-4f67-8986-fb52e6e7d9fe
# ╟─99aabead-81d0-4f66-994a-bc5485c08af5
# ╠═473c1c21-c3b8-4ca8-8e76-cc42b3ea0da4
# ╠═c5d4b5af-ec04-4941-b84f-c70e9665810b
# ╟─00000000-0000-0000-0000-000000000001
# ╟─00000000-0000-0000-0000-000000000002

@ -0,0 +1,401 @@
module Orbits
#Add exports here
export State ,state_to_tuple ,state_transition ,PhysicalModel ,BasicPhysics
export BranchGenerator, value_function_generator,
cross_linked_planner_policy_function_generator,
operator_policy_function_generator
# Exports
export GeneralizedLoss ,OperatorLoss ,PlannerLoss ,UniformDataConstructor,
LinearModel ,BasicPhysics,
operator_policy_function_generator ,value_function_generator ,BranchGenerator,
TupleDuplicator, BranchGenerator
using Flux, LinearAlgebra
using BSON, Zygote
export LinearModel, BenefitFunction, EconomicModel
#==#
struct State
stocks::Array{Float32}
debris::Array{Float32}
end
function state_to_tuple(s::State)
return (s.stocks ,s.debris)
end
abstract type PhysicalModel end
struct BasicPhysics <: PhysicalModel
#rate at which debris hits satellites
debris_collision_rate::Real
#rate at which satellites of different constellations collide
satellite_collision_rates::Matrix{Float64}
#rate at which debris exits orbits
decay_rate::Real
#rate at which satellites
autocatalysis_rate::Real
#ratio at which a collision between satellites produced debris
satellite_collision_debris_ratio::Real
#Ratio at which launches produce debris
launch_debris_ratio::Real
end
function state_transition(
physics::BasicPhysics
,state::State
,launches::Vector{Float32}
)
#=
Physical Transitions
=#
survival_rate = survival(state,physics)
# Debris transitions
# get changes in debris from natural dynamics
natural_debris_dynamics = (1 - physics.decay_rate + physics.autocatalysis_rate) * state.debris
# get changes in debris from satellite loss
satellite_loss_debris = physics.satellite_collision_debris_ratio * (1 .- survival_rate)' * state.stocks
# get changes in debris from launches
launch_debris = physics.launch_debris_ratio * sum(launches)
# total debris level
debris = natural_debris_dynamics .+ satellite_loss_debris .+ launch_debris
# Stocks Transitions
stocks = (LinearAlgebra.diagm(survival_rate) .- physics.decay_rate)*state.stocks + launches
return State(stocks,debris)
end
function survival(
state::State
,physical_model::BasicPhysics
)
return exp.(-(physical_model.satellite_collision_rates .+ physical_model.decay_rate) * state.stocks .- (physical_model.debris_collision_rate * state.debris))
end
#=
Benefit Functions:
These represent the benefits that different operators may recieve
=#
abstract type EconomicModel end
#basic linear model
struct LinearModel <: EconomicModel
β::Float32
payoff_array::Array{Float32}
policy_costs::Array{Float32}
end
function (em::LinearModel)(
state::State
,actions::Vector{Float32}
)
return em.payoff_array*state.stocks - em.policy_costs*actions
end
#basic CES model
struct CES <: EconomicModel
β::Float32
r::Float32 #elasticity of subsititution
payoff_array::Array{Float32}
policy_costs::Array{Float32}
debris_costs::Array{Float32}
end
function (em::CES)(
state::State
,actions::Vector{Float32}
)
#issue here with multiplication
r1 = em.payoff_array .* (state.stocks.^em.r)
r2 = - em.debris_costs .* (state.debris.^em.r)
r3 = - em.policy_costs .* (actions.^em.r)
return (r1 + r2 + r3) .^ (1/em.r)
end
#basic CRRA
struct CRRA <: EconomicModel
β::Float32
σ::Float32 #elasticity of subsititution
payoff_array::Array{Float32}
policy_costs::Array{Float32}
debris_costs::Array{Float32}
end
function (em::CRRA)(
state::State
,actions::Vector{Float32}
)
#issue here with multiplication
core = (em.payoff_array*state.stocks - em.debris_costs*state.debris - em.policy_costs*actions).^(1 - em.σ)
return (core-1)/(1-em.σ)
end
#= TupleDuplicator
This is used to create a tuple of size n with deepcopies of any object x
=#
struct TupleDuplicator
n::Int
end
#make tuples consisting of copies of whatever was provided
function (f::TupleDuplicator)(s::State)
st = state_to_tuple(s)
return f(st)
#BROKEN: Fails in test, but works in REPL.
end
(f::TupleDuplicator)(x) = tuple([deepcopy(x) for i=1:f.n]...)
struct BranchGenerator
n::UInt
end
function (b::BranchGenerator)(branch::Flux.Parallel,join_fn::Function)
# used to deepcopy the branches and duplicate the inputs in the returned chain
f = TupleDuplicator(b.n)
return Flux.Chain(state_to_tuple,f,Flux.Parallel(join_fn,f(branch)...))
#note that it destructures the state to a tuple, duplicates,
# and then passes to the parallelized functions.
end
# Neural Network Generators
function value_function_generator(number_params=32)
return Flux.Chain(
Flux.Parallel(vcat
#parallel joins together stocks and debris, after a little bit of preprocessing
,Flux.Chain(
Flux.Dense(N_constellations, number_params*2,Flux.relu)
,Flux.Dense(number_params*2, number_params*2,Flux.σ)
)
,Flux.Chain(
Flux.Dense(N_debris, number_params,Flux.relu)
,Flux.Dense(number_params, number_params,Flux.σ)
)
)
#Apply some transformations to the preprocessed data.
,Flux.Dense(number_params*3,number_params,Flux.σ)
,Flux.Dense(number_params,number_params,Flux.σ)
,Flux.Dense(number_params,1)
)
end
function cross_linked_planner_policy_function_generator(number_params=32)
return Flux.Chain(
Flux.Parallel(vcat
#parallel joins together stocks and debris
,Flux.Chain(
Flux.Dense(N_constellations, number_params*2,Flux.relu)
,Flux.Dense(number_params*2, number_params*2,Flux.σ)
)
,Flux.Chain(
Flux.Dense(N_debris, number_params,Flux.relu)
,Flux.Dense(number_params, number_params)
)
)
#Apply some transformations
,Flux.Dense(number_params*3,number_params,Flux.σ)
,Flux.Dense(number_params,number_params,Flux.σ)
,Flux.Dense(number_params,N_constellations,Flux.relu)
)
end
function operator_policy_function_generator(
N_constellations::Int
,N_debris
,number_params=32
)
return Flux.Chain(
Flux.Parallel(vcat
#parallel joins together stocks and debris
,Flux.Chain(
Flux.Dense(N_constellations, number_params*2,Flux.relu)
,Flux.Dense(number_params*2, number_params*2,Flux.tanh)
)
,Flux.Chain(
Flux.Dense(N_debris, number_params,Flux.relu)
,Flux.Dense(number_params, number_params)
)
)
#Apply some transformations
,Flux.Dense(number_params*3,number_params,Flux.tanh)
,Flux.Dense(number_params,number_params,Flux.tanh)
,Flux.Dense(number_params,1,x -> Flux.relu.(sinh.(x)))
)
end
abstract type GeneralizedLoss end
#=
This struct organizes information about a given constellation operator
Used to provide an interable loss function for training
=#
struct OperatorLoss <: GeneralizedLoss
#econ model describing operator
econ_model::EconomicModel
#Operator's value and policy functions, as well as which parameters the operator can train.
operator_value_fn::Flux.Chain
collected_policies::Flux.Chain #this is held by all operators
operator_policy_params::Flux.Params
physics::PhysicalModel
end
# overriding function to calculate operator loss
function (operator::OperatorLoss)(
state::State
)
#get actions
a = operator.collected_policies((s,d))
#get updated stocks and debris
state = state_transition(operator.physics,state,a)
bellman_residuals = operator.operator_value_fn(state) - operator.econ_model(state,a) - operator.econ_model.β * operator.operator_value_fn(state)
maximization_condition = - operator.econ_model(state ,a) - operator.econ_model.β * operator.operator_value_fn((state))
return Flux.mae(bellman_residuals.^2 ,maximization_condition)
end # struct
function (operator::OperatorLoss)(
state::State
,policy::Flux.Chain #allow for another policy to be subsituted
)
#get actions
a = policy(state)
#get updated stocks and debris
state = stake_transition(state,a)
bellman_residuals = operator.operator_value_fn(state) - operator.econ_model(state,a) - operator.econ_model.β * operator.operator_value_fn(state)
maximization_condition = - operator.econ_model(state,a) - operator.econ_model.β * operator.operator_value_fn(state)
return Flux.mae(bellman_residuals.^2 ,maximization_condition)
end #function
function train_operators!(op::OperatorLoss, data::State, opt)
Flux.train!(op, op.operator_policy_params, data, opt)
Flux.train!(op, Flux.params(op.operator_value_fn), data, opt)
end
#=
Describe the Planners loss function
=#
struct PlannerLoss <: GeneralizedLoss
#=
Ideally, with just a well formed PlannerLoss and the training functions below, we should be able to train the approximation.
There is an issue with appropriately training the value functions.
In this case, it is not happening...
=#
β::Float32
operators::Array{GeneralizedLoss}
policy::Flux.Chain
policy_params::Flux.Params
value::Flux.Chain
value_params::Flux.Params
physical_model::PhysicalModel
end
function (planner::PlannerLoss)(
state::State
)
a = planner.policy((s ,d))
#get updated stocks and debris
state = state_transition(s ,d ,a)
#calculate the total benefit from each of the models
benefit = sum([ co.econ_model(s ,d ,a) for co in planner.operators])
#issue here with mutating. Maybe generators/list comprehensions?
bellman_residuals = planner.value((s,d)) - benefit - planner.β .* planner.value((s,d))
maximization_condition = - benefit - planner.β .* planner.value((s,d))
return Flux.mae(bellman_residuals.^2 ,maximization_condition)
end
function train_planner!(pl::PlannerLoss, N_epoch::Int, opt)
errors = []
for i = 1:N_epoch
data = data_gen()
Flux.train!(pl, pl.policy_params, data, opt)
Flux.train!(pl, pl.value_params, data, opt)
append!(errors, error(pl, data1) / 200)
end
return errors
end
function train_operators!(pl::PlannerLoss, N_epoch::Int, opt)
errors = []
for i = 1:N_epoch
data = data_gen()
for op in pl.operators
train_operators!(op,data,opt)
end
end
return errors
end
#Construct and manage data
abstract type DataConstructor end
struct UniformDataConstructor <: DataConstructor
N::UInt64
satellites_bottom::Float32
satellites_top::Float32
debris_bottom::Float32
debris_top::Float32
end
function (dc::UniformDataConstructor)()
#currently ignores the quantity of data it should construct.
State(
rand(dc.satellites_bottom:dc.satellites_top, N_constellations)
, rand(dc.debris_bottom:dc.debris_top, N_debris)
)
end
end # end module

@ -0,0 +1,174 @@
module Orbits
using Flux,LinearAlgebra,Zygote,BSON
include("physical_model.jl")
export State, state_to_tuple, state_transition, BasicPhysics, PhysicalModel
include("benefit_functions.jl")
export LinearModel, EconomicModel
include("flux_helpers.jl")
export operator_policy_function_generator,
value_function_generator,
BranchGenerator,
cross_linked_planner_policy_function_generator
# Exports from below
export GeneralizedLoss ,OperatorLoss ,PlannerLoss ,UniformDataConstructor ,
train_planner!, train_operators! ,
BasicPhysics, survival_rates_1
# Code
#Construct and manage data
abstract type DataConstructor end
struct UniformDataConstructor <: DataConstructor
N::UInt64
satellites_bottom::Float32
satellites_top::Float32
debris_bottom::Float32
debris_top::Float32
end # struct
function (dc::UniformDataConstructor)(N_constellations,N_debris)
return State(
rand(dc.satellites_bottom:dc.satellites_top, dc.N, 1, N_constellations)
, rand(dc.debris_bottom:dc.debris_top, dc.N, 1, N_debris)
)
end # function
abstract type GeneralizedLoss end
struct OperatorLoss <: GeneralizedLoss
#=
This struct organizes information about a given constellation operator
It is used to provide an iterable loss function for training.
The fields each identify one aspect of the operator's decision problem:
- The economic model describing payoffs and discounting.
- The estimated NN value function held by the operator.
- The estimated NN policy function that describes each of the
- Each operator holds a reference to the parameters they can update.
- The physical world that describes how satellites progress.
There is an overriding function that uses these details to calculate the residual function based on
the bellman residuals and a maximization condition.
There are two versions of this function.
- return an actual loss calculation (MAE) using the owned policy function.
- return the loss calculation using a provided policy function.
=#
#econ model describing operator
econ_model::EconomicModel
#Operator's value and policy functions, as well as which parameters the operator can train.
operator_value_fn::Flux.Chain
collected_policies::Flux.Chain #this is held by all operators
operator_policy_params::Flux.Params #but only some of it is available for training
physics::PhysicalModel #It would be nice to move this to somewhere else in the model.
end # struct
# overriding function to calculate operator loss
function (operator::OperatorLoss)(
state::State
,policy::Flux.Chain #allow for another policy to be subsituted
)
#get actions
a = policy(state)
#get updated stocks and debris
state = stake_transition(state,a)
bellman_residuals = operator.operator_value_fn(state) - operator.econ_model(state,a) - operator.econ_model.β * operator.operator_value_fn(state)
maximization_condition = - operator.econ_model(state,a) - operator.econ_model.β * operator.operator_value_fn(state)
return Flux.mae(bellman_residuals.^2 ,maximization_condition)
end #function
function (operator::OperatorLoss)(
state::State
)
#just use the included policy.
return operator(state,operator.collected_policies)
end # function
function train_operator!(op::OperatorLoss, data::State, opt)
#Train the policy functions
Flux.train!(op, op.operator_policy_params, data, opt)
#Train the value function
Flux.train!(op, Flux.params(op.operator_value_fn), data, opt)
end
#=
Describe the Planners loss function
=#
struct PlannerLoss <: GeneralizedLoss
#=
Ideally, with just a well formed PlannerLoss and the training functions below, we should be able to train the approximation.
There is an issue with appropriately training the value functions.
In this case, it is not happening...
=#
#planner discount level (As it may disagree with operators)
β::Float32
operators::Array{GeneralizedLoss}
policy::Flux.Chain
policy_params::Flux.Params
value::Flux.Chain
value_params::Flux.Params
physical_model::PhysicalModel
end
function (planner::PlannerLoss)(
state::State
)
#TODO! Rewrite to use a new states setup.
a = planner.policy((s ,d)) #TODO: States
#get updated stocks and debris
state = state_transition(s ,d ,a) #TODO: States
#calculate the total benefit from each of the models
benefit = sum([ co.econ_model(s ,d ,a) for co in planner.operators]) #TODO: States
#issue here with mutating. Maybe generators/list comprehensions?
bellman_residuals = planner.value((s,d)) - benefit - ( planner.β .* planner.value((s,d)) )#TODO: States
maximization_condition = - benefit - planner.β .* planner.value((s,d)) #TODO: States
return Flux.mae(bellman_residuals.^2 ,maximization_condition)
end # function
function train_planner!(pl::PlannerLoss, N_epoch::Int, opt, data_gen::DataConstructor)
errors = []
for i = 1:N_epoch
data = data_gen()
Flux.train!(pl, pl.policy_params, data, opt)
Flux.train!(pl, pl.value_params, data, opt)
append!(errors, error(pl, data1) / 200)
end
return errors
end # function
function train_operators!(pl::PlannerLoss, N_epoch::Int, opt, data_gen::DataConstructor)
errors = []
for i = 1:N_epoch
data = data_gen()
data = data_gen()
for op in pl.operators
train_operator!(op,data,opt)
end
end
return errors
end # function
end # end module

@ -0,0 +1,62 @@
#don't write as a module
#=
Benefit Functions:
These represent the benefits that different operators may recieve
=#
abstract type EconomicModel end
#basic linear model
struct LinearModel <: EconomicModel
β::Float32
payoff_array::Array{Float32}
policy_costs::Array{Float32}
end
function (em::LinearModel)(
state::State
,actions::Vector{Float32}
)
return em.payoff_array*state.stocks - em.policy_costs*actions
end
#basic CES model
struct CES <: EconomicModel
β::Float32
r::Float32 #elasticity of subsititution
payoff_array::Array{Float32}
policy_costs::Array{Float32}
debris_costs::Array{Float32}
end
function (em::CES)(
state::State
,actions::Vector{Float32}
)
#issue here with multiplication
r1 = em.payoff_array .* (state.stocks.^em.r)
r2 = - em.debris_costs .* (state.debris.^em.r)
r3 = - em.policy_costs .* (actions.^em.r)
return (r1 + r2 + r3) .^ (1/em.r)
end
#basic CRRA
struct CRRA <: EconomicModel
β::Float32
σ::Float32 #elasticity of subsititution
payoff_array::Array{Float32}
policy_costs::Array{Float32}
debris_costs::Array{Float32}
end
function (em::CRRA)(
state::State
,actions::Vector{Float32}
)
#issue here with multiplication
core = (em.payoff_array*state.stocks - em.debris_costs*state.debris - em.policy_costs*actions).^(1 - em.σ)
return (core-1)/(1-em.σ)
end

@ -0,0 +1,101 @@
#= TupleDuplicator
This is used to create a tuple of size n with deepcopies of any object x
=#
struct TupleDuplicator
n::Int
end
#make tuples consisting of copies of whatever was provided
function (f::TupleDuplicator)(s::State)
st = state_to_tuple(s)
return f(st)
#BROKEN: Fails in test, but works in REPL.
end
(f::TupleDuplicator)(x) = tuple([deepcopy(x) for i=1:f.n]...)
struct BranchGenerator
n::UInt
end
function (b::BranchGenerator)(branch::Flux.Parallel,join_fn::Function)
# used to deepcopy the branches and duplicate the inputs in the returned chain
f = TupleDuplicator(b.n)
return Flux.Chain(state_to_tuple,f,Flux.Parallel(join_fn,f(branch)...))
#note that it destructures the state to a tuple, duplicates,
# and then passes to the parallelized functions.
end
# Neural Network Generators
function value_function_generator(number_params=32)
return Flux.Chain(
Flux.Parallel(vcat
#parallel joins together stocks and debris, after a little bit of preprocessing
,Flux.Chain(
Flux.Dense(N_constellations, number_params*2,Flux.relu)
,Flux.Dense(number_params*2, number_params*2,Flux.σ)
)
,Flux.Chain(
Flux.Dense(N_debris, number_params,Flux.relu)
,Flux.Dense(number_params, number_params,Flux.σ)
)
)
#Apply some transformations to the preprocessed data.
,Flux.Dense(number_params*3,number_params,Flux.σ)
,Flux.Dense(number_params,number_params,Flux.σ)
,Flux.Dense(number_params,1)
)
end
function cross_linked_planner_policy_function_generator(number_params=32)
return Flux.Chain(
Flux.Parallel(vcat
#parallel joins together stocks and debris
,Flux.Chain(
Flux.Dense(N_constellations, number_params*2,Flux.relu)
,Flux.Dense(number_params*2, number_params*2,Flux.σ)
)
,Flux.Chain(
Flux.Dense(N_debris, number_params,Flux.relu)
,Flux.Dense(number_params, number_params)
)
)
#Apply some transformations
,Flux.Dense(number_params*3,number_params,Flux.σ)
,Flux.Dense(number_params,number_params,Flux.σ)
,Flux.Dense(number_params,N_constellations,Flux.relu)
)
end
function operator_policy_function_generator(
N_constellations::Int
,N_debris
,number_params=32
)
return Flux.Chain(
Flux.Parallel(vcat
#parallel joins together stocks and debris
,Flux.Chain(
Flux.Dense(N_constellations, number_params*2,Flux.relu)
,Flux.Dense(number_params*2, number_params*2,Flux.tanh)
)
,Flux.Chain(
Flux.Dense(N_debris, number_params,Flux.relu)
,Flux.Dense(number_params, number_params)
)
)
#Apply some transformations
,Flux.Dense(number_params*3,number_params,Flux.tanh)
,Flux.Dense(number_params,number_params,Flux.tanh)
,Flux.Dense(number_params,1,x -> Flux.relu.(sinh.(x)))
)
end

@ -0,0 +1,81 @@
#=Satellite State=#
abstract type State end
struct SingleStates <: State
stocks::Vector{Float32}
debris::Vector{Float32}
end
struct MultiStates <: State
stocks::Array{Float32}
debris::Array{Float32}
end
#function state_to_tuple(s::State)
# return (s.stocks ,s.debris)
#end
#=Physical Model
This contains parameters describing the physical model.
=#
abstract type PhysicalModel end
#=Basic implementation of a physical model=#
struct BasicPhysics <: PhysicalModel
#rate at which debris hits satellites
debris_collision_rate::Real
#rate at which satellites of different constellations collide
satellite_collision_rates::Matrix{Float64}
#rate at which debris exits orbits
decay_rate::Real
#rate at which satellites
autocatalysis_rate::Real
#ratio at which a collision between satellites produced debris
satellite_collision_debris_ratio::Real
#Ratio at which launches produce debris
launch_debris_ratio::Real
end
function state_transition(
physics::BasicPhysics
,state::State
,launches::Vector{Float32}
,survival_rate::Function
)
#=
Physical Transitions
=#
survival_rates = survival_rate(state,physics)
# Debris transitions
# get changes in debris from natural dynamics
natural_debris_dynamics = (1 - physics.decay_rate + physics.autocatalysis_rate) * state.debris
# get changes in debris from satellite loss
satellite_loss_debris = physics.satellite_collision_debris_ratio * (1 .- survival_rates)' * state.stocks
# get changes in debris from launches
launch_debris = physics.launch_debris_ratio * sum(launches)
# total debris level
debris = natural_debris_dynamics .+ satellite_loss_debris .+ launch_debris
# Stocks Transitions
stocks = (LinearAlgebra.diagm(survival_rates) .- physics.decay_rate)*state.stocks + launches
return State(stocks,debris)
end
function survival_rates_1(
#This function describes the rate at which satellites survive each period.
state::State
,physical_model::BasicPhysics
)
#TODO! get this to broadcast correctly.
return exp.(-(physical_model.satellite_collision_rates .+ physical_model.decay_rate) * state.stocks .- (physical_model.debris_collision_rate * state.debris))
end

@ -0,0 +1,54 @@
using Test,Flux
include("../src/Orbits.jl")
using .Orbits
#=
Structure:
This is broken into three parts
- The test_interfaces module contains various tools used in testing
- The test_routines includes functions that setup, run, and teardown tests
- Everything else is a set of tests using the standard testing tools in Julia
=#
@testset "Overall" verbose=true begin
@testset "TupleDuplicator" verbose=true begin
#Check if tuple duplicator duplicates something
td2 = Orbits.TupleDuplicator(2)
@test typeof(td2) <: Orbits.TupleDuplicator
@test td2(([1,2],[3])) == (([1,2],[3]),([1,2],[3]))
st = State([1.0,2],[3])
@test td2((st.stocks,st.debris)) == (([1.0,2],[3]),([1.0,2],[3]))
@test td2(state_to_tuple(st)) == (([1.0,2],[3]),([1.0,2],[3]))
@test td2(st) == ((st.stocks,st.debris),(st.stocks,st.debris))
end
@testset "BranchGenerator" verbose=true begin
st = State([1.0,2],[3])
tp = state_to_tuple(st)
bg = BranchGenerator(2)
branch = Flux.Parallel(vcat, Dense(2,1),Dense(1,1))
branched = bg(branch,vcat)
#type tests
@test typeof(branch(tp)) <: Array{Float32}
@test_broken typeof(branched[2](tp)) <: Array{Float32}
#ISSUE: what am I really looking for here?
#Check behaviors of the
@test branched[1](st) == state_to_tuple(st) #Evaluation is of the wrong approach
end #branch generator
end #overall testset

@ -0,0 +1,62 @@
using Test, Flux, LinearAlgebra
include("../src/Orbits.jl")
using .Orbits
#=
The purpose of this document is to organize tests of the state structs and state transition functions
=#
@testset "States and Physical models testing" verbose=true begin
n_const = 2
n_debr = 3
n_data = 5
#built structs
u = UniformDataConstructor(n_data,0,5,2,3)
s = u(n_const,n_debr)
b = BasicPhysics(
0.05
,0.02*LinearAlgebra.ones(n_const,n_const)
,0.1
,0.002
,0.002
,0.2
)
a2 = ones(n_const,n_data)
#test that dimensions match etc
@test size(b.satellite_collision_rates)[1] == size(s.stocks)[1]
@test size(b.satellite_collision_rates)[2] == size(s.stocks)[1]
@test n_data == size(s.stocks)[2]
@test n_data == size(s.debris)[2]
@test size(s.stocks) == size(a2)
@testset "DataConstructor and states" begin
@test u.N == 5
@test length(s.debris) != 3
@test length(s.stocks) != 2
@test length(s.stocks) == 10
@test length(s.debris) == 15
@test size(s.stocks) == (2,5)
@test size(s.debris) == (3,5)
end
@testset "BasicPhysics" begin
@testset "Survival Functions" verbose = true begin
@test survival_rates_1(s,b) <: AbstractArray
end
@testset "Transitions" begin
@test true
end
end
end #States and physcial models testing

@ -0,0 +1,8 @@
{
"folders": [
{
"path": ".."
}
],
"settings": {}
}

@ -0,0 +1,617 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 124,
"id": "3d049ef3-2630-4064-a2bd-ee28c7a89e9e",
"metadata": {},
"outputs": [],
"source": [
"using LinearAlgebra, Zygote, Tullio"
]
},
{
"cell_type": "markdown",
"id": "cfa5d466-5354-4ef4-a4ab-2bf2ed69626e",
"metadata": {},
"source": [
"### notes\n",
"\n",
"Notice how this approach estimates policy and the value function in a single go.\n",
"While you could eliminate the value function partials, it would be much more complex.\n",
"Note that in RL, both must be iterated on."
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "2bc6f9bd-8308-40a0-bee4-acbb1be4672d",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"6"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"#Model dimensions\n",
"N_constellations = 5\n",
"N_debris = 1\n",
"N_states = N_constellations + N_debris"
]
},
{
"cell_type": "markdown",
"id": "15670d3b-9651-4fc5-b98a-29ba60b4b38e",
"metadata": {},
"source": [
"## Built Payoff functions"
]
},
{
"cell_type": "code",
"execution_count": 156,
"id": "1e99275a-9ab5-4c9e-921b-9587145b1fb5",
"metadata": {},
"outputs": [],
"source": [
"stocks = rand(1:N_constellations,N_constellations);\n",
"debris = rand(1:3);\n",
"payoff = 3*I + ones(5,5) .+ [1,0,0,0,0]; #TODO: move this into a struct\n",
"β=0.95\n",
"launches = ones(N_constellations);"
]
},
{
"cell_type": "code",
"execution_count": 157,
"id": "a4e4bbaf-61de-4860-adc2-1e91f0626ead",
"metadata": {},
"outputs": [],
"source": [
"#Define the market profit function\n",
"F(stocks,debris,payoff,launches) = payoff*stocks + 3.0*launches .+ (debris*-0.2)\n",
"\n",
"#create derivative functions\n",
"∂f_∂stocks(st,debris,payoff,launches) = Zygote.jacobian(s -> F(s,debris,payoff,launches),st)[1];\n",
"∂f_∂debris(stocks,d,payoff,launches) = Zygote.jacobian(s -> F(stocks,s,payoff,launches),d)[1];\n",
"∂f_∂launches(stocks,debris,payoff,launches) = Zygote.jacobian(l -> F(stocks,debris,payoff,l),launches)[1];\n"
]
},
{
"cell_type": "code",
"execution_count": 158,
"id": "ad73e64a-2fa5-4bdf-a4e4-22357b53002c",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"5×5 Matrix{Float64}:\n",
" 5.0 2.0 2.0 2.0 2.0\n",
" 1.0 4.0 1.0 1.0 1.0\n",
" 1.0 1.0 4.0 1.0 1.0\n",
" 1.0 1.0 1.0 4.0 1.0\n",
" 1.0 1.0 1.0 1.0 4.0"
]
},
"execution_count": 158,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"∂f_∂stocks(stocks,debris,payoff,launches) \n",
"#Rows are constellations, columns are derivatives indexed by constellations"
]
},
{
"cell_type": "code",
"execution_count": 159,
"id": "a6159740-8ef2-40e0-b10f-c451cf78418d",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"5-element Vector{Float64}:\n",
" -0.2\n",
" -0.2\n",
" -0.2\n",
" -0.2\n",
" -0.2"
]
},
"execution_count": 159,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"∂f_∂debris(stocks,debris,payoff,launches)"
]
},
{
"cell_type": "code",
"execution_count": 160,
"id": "b46da3d9-1765-4427-b3de-bf0a9fdd6576",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"5×5 Matrix{Float64}:\n",
" 3.0 0.0 0.0 0.0 0.0\n",
" 0.0 3.0 0.0 0.0 0.0\n",
" 0.0 0.0 3.0 0.0 0.0\n",
" 0.0 0.0 0.0 3.0 0.0\n",
" 0.0 0.0 0.0 0.0 3.0"
]
},
"execution_count": 160,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"∂f_∂launches(stocks,debris,payoff,launches)"
]
},
{
"cell_type": "markdown",
"id": "7a3a09cc-e48d-4c27-9e4f-02e45a5fc5c1",
"metadata": {},
"source": [
"## Building Physical Model"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "638ed4ac-36b0-4efb-bcb4-13d345470dd7",
"metadata": {},
"outputs": [],
"source": [
"struct BasicModel\n",
" #rate at which debris hits satellites\n",
" debris_collision_rate\n",
" #rate at which satellites of different constellations collide\n",
" satellite_collision_rates\n",
" #rate at which debris exits orbits\n",
" decay_rate\n",
" #rate at which satellites\n",
" autocatalysis_rate\n",
" #ratio at which a collision between satellites produced debris\n",
" satellite_collision_debris_ratio\n",
" #Ratio at which launches produce debris\n",
" launch_debris_ratio\n",
"end\n",
"\n",
"#Getting loss parameters together.\n",
"loss_param = 2e-3;\n",
"loss_weights = loss_param*(ones(N_constellations,N_constellations) - I);\n",
"\n",
"#orbital decay rate\n",
"decay_param = 0.01;\n",
"\n",
"#debris generation parameters\n",
"autocatalysis_param = 0.001;\n",
"satellite_loss_debris_rate = 5.0;\n",
"launch_debris_rate = 0.05;\n",
"\n",
"#Todo, wrap physical model as a struct with the parameters\n",
"bm = BasicModel(\n",
" loss_param\n",
" ,loss_weights\n",
" ,decay_param\n",
" ,autocatalysis_param\n",
" ,satellite_loss_debris_rate\n",
" ,launch_debris_rate\n",
");"
]
},
{
"cell_type": "code",
"execution_count": 163,
"id": "ae04a4f2-7401-450e-ba98-bfec375b7646",
"metadata": {},
"outputs": [],
"source": [
"#percentage survival function\n",
"function survival(stocks,debris,physical_model) \n",
" exp.(-physical_model.satellite_collision_rates*stocks .- (physical_model.debris_collision_rate*debris));\n",
"end\n",
"\n",
"#stock update rules\n",
"function G(stocks,debris,launches, physical_model)\n",
" return diagm(survival(stocks,debris,physical_model) .- physical_model.decay_rate)*stocks + launches\n",
"end;\n",
"\n",
"#stock evolution wrt various things\n",
"∂G_∂launches(stocks,debris,launches,bm) = Zygote.jacobian(x -> G(stocks,debris,x,bm),launches)[1];\n",
"∂G_∂debris(stocks,debris,launches,bm) = Zygote.jacobian(x -> G(stocks,x,launches,bm),debris)[1];\n",
"∂G_∂stocks(stocks,debris,launches,bm) = Zygote.jacobian(x -> G(x,debris,launches,bm),stocks)[1];\n",
"\n",
"#debris evolution \n",
"function H(stocks,debris,launches,physical_model)\n",
" #get changes in debris from natural dynamics\n",
" natural_debris_dynamics = (1-physical_model.decay_rate+physical_model.autocatalysis_rate) * debris \n",
" \n",
" #get changes in debris from satellite loss\n",
" satellite_loss_debris = physical_model.satellite_collision_debris_ratio * (1 .- survival(stocks,debris,physical_model))'*stocks \n",
" \n",
" #get changes in debris from launches\n",
" launch_debris = physical_model.launch_debris_ratio*sum(launches)\n",
" \n",
" #return total debris level\n",
" return natural_debris_dynamics + satellite_loss_debris + launch_debris\n",
"end;\n",
"\n",
"#get jacobians of debris dynamics\n",
"∂H_∂launches(stocks,debris,launches,physical_model) = Zygote.jacobian(x -> H(stocks,debris,x,physical_model),launches)[1];\n",
"∂H_∂debris(stocks,debris,launches,physical_model) = Zygote.jacobian(x -> H(stocks,x,launches,physical_model),debris)[1];\n",
"∂H_∂stocks(stocks,debris,launches,physical_model) = Zygote.jacobian(x -> H(x,debris,launches,physical_model),stocks)[1];"
]
},
{
"cell_type": "code",
"execution_count": 166,
"id": "c6519b6e-f9ef-466e-a9ac-746b8f36b9e7",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"5×5 Matrix{Float64}:\n",
" 1.0 0.0 0.0 0.0 0.0\n",
" 0.0 1.0 0.0 0.0 0.0\n",
" 0.0 0.0 1.0 0.0 0.0\n",
" 0.0 0.0 0.0 1.0 0.0\n",
" 0.0 0.0 0.0 0.0 1.0"
]
},
"execution_count": 166,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"∂G_∂launches(stocks,debris,launches,bm)"
]
},
{
"cell_type": "code",
"execution_count": 167,
"id": "1d46c09d-2c91-4302-8e8d-1c0abeb010aa",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"5-element Vector{Float64}:\n",
" -0.003843157756609293\n",
" -0.009665715046375067\n",
" -0.009665715046375067\n",
" -0.009665715046375067\n",
" -0.003843157756609293"
]
},
"execution_count": 167,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"∂G_∂debris(stocks,debris,launches,bm)"
]
},
{
"cell_type": "code",
"execution_count": 168,
"id": "1f791c8a-5457-4ec2-bf0b-68eb6bdd578f",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"5×5 Matrix{Float64}:\n",
" 0.950789 -0.00384316 -0.00384316 -0.00384316 -0.00384316\n",
" -0.00966572 0.956572 -0.00966572 -0.00966572 -0.00966572\n",
" -0.00966572 -0.00966572 0.956572 -0.00966572 -0.00966572\n",
" -0.00966572 -0.00966572 -0.00966572 0.956572 -0.00966572\n",
" -0.00384316 -0.00384316 -0.00384316 -0.00384316 0.950789"
]
},
"execution_count": 168,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"∂G_∂stocks(stocks,debris,launches,bm)"
]
},
{
"cell_type": "code",
"execution_count": 169,
"id": "d97f9af0-d3a9-4739-b569-fa100a1590f3",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"1×5 Matrix{Float64}:\n",
" 0.05 0.05 0.05 0.05 0.05"
]
},
"execution_count": 169,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"∂H_∂launches(stocks,debris,launches,bm)"
]
},
{
"cell_type": "code",
"execution_count": 170,
"id": "1a7c857f-7eee-4273-b954-3a7b84a3b3e0",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"1-element Vector{Float64}:\n",
" 1.174417303261719"
]
},
"execution_count": 170,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"∂H_∂debris(stocks,debris,launches,bm) "
]
},
{
"cell_type": "code",
"execution_count": 171,
"id": "9ea44146-740c-47c0-9286-846585a4db39",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"1×5 Matrix{Float64}:\n",
" 0.360254 0.302231 0.302231 0.302231 0.360254"
]
},
"execution_count": 171,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"∂H_∂stocks(stocks,debris,launches,bm)\n",
"#columns are derivatives"
]
},
{
"cell_type": "markdown",
"id": "17038bfe-c7c2-484b-9aaf-6b608bbbbfab",
"metadata": {},
"source": [
"## Build optimality conditions"
]
},
{
"cell_type": "markdown",
"id": "ef3c7625-e5ee-4220-8bed-b289d0baea1e",
"metadata": {},
"source": [
"## Build transition conditions\n",
"\n",
"I've built the transition conditions below."
]
},
{
"cell_type": "code",
"execution_count": 172,
"id": "c3ef6bcc-37ca-4f6c-b876-8ea2a068cbe1",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"5×5 Matrix{Float64}:\n",
" 2.0 1.0 1.0 1.0 1.0\n",
" 1.0 2.0 1.0 1.0 1.0\n",
" 1.0 1.0 2.0 1.0 1.0\n",
" 1.0 1.0 1.0 2.0 1.0\n",
" 1.0 1.0 1.0 1.0 2.0"
]
},
"execution_count": 172,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"W_∂stocks= ones(5,5) + I\n",
"# columns are stocks\n",
"# rows are derivatives"
]
},
{
"cell_type": "code",
"execution_count": 174,
"id": "5441641b-308b-4fec-a946-2568e79a1203",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"5×1 Matrix{Float64}:\n",
" 2.0\n",
" 2.0\n",
" 2.0\n",
" 2.0\n",
" 2.0"
]
},
"execution_count": 174,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
" #temporary value partials\n",
"W_∂debris = 2*ones(5,1) #temporary value partials\n",
"#columns are\n",
"#rows are"
]
},
{
"cell_type": "code",
"execution_count": 106,
"id": "819df0f9-e994-4461-9329-4c4607de8779",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"5×5 Matrix{Float64}:\n",
" 0.966286 -0.00195257 -0.00195257 -0.00195257 -0.00195257\n",
" -0.00785729 0.972161 -0.00785729 -0.00785729 -0.00785729\n",
" -0.00588119 -0.00588119 0.970199 -0.00588119 -0.00588119\n",
" -0.00195257 -0.00195257 -0.00195257 0.966286 -0.00195257\n",
" -0.00391296 -0.00391296 -0.00391296 -0.00391296 0.96824"
]
},
"execution_count": 106,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"∂G_∂stocks(stocks,debris,launches,bm)"
]
},
{
"cell_type": "code",
"execution_count": 108,
"id": "5cc19637-9727-479e-aa60-a30c71e9e8b8",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"5×5 Matrix{Float64}:\n",
" 1.91695 1.91695 1.91695 1.91695 1.91695\n",
" 1.88146 1.88146 1.88146 1.88146 1.88146\n",
" 1.89335 1.89335 1.89335 1.89335 1.89335\n",
" 1.91695 1.91695 1.91695 1.91695 1.91695\n",
" 1.90518 1.90518 1.90518 1.90518 1.90518"
]
},
"execution_count": 108,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"a = ∂f_∂stocks(stocks,debris,payoff,launches)\n",
"b = ∂G_∂stocks(stocks,debris,launches,bm) * W_∂stocks #This last bit should eventually get replaced with a NN\n",
"#Need to check dimensionality above. Not sure which direction is derivatives and which is functions."
]
},
{
"cell_type": "code",
"execution_count": 119,
"id": "65917e4c-bd25-4683-8553-17bc841e594a",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"5×1 Matrix{Float64}:\n",
" -0.019525714195158184\n",
" -0.07857288258866406\n",
" -0.05881192039840531\n",
" -0.019525714195158184\n",
" -0.039129609402048404"
]
},
"execution_count": 119,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"c = (∂G_∂debris(stocks,debris,launches,bm) .* ones(1,5)) * W_∂debris"
]
},
{
"cell_type": "code",
"execution_count": 59,
"id": "dd5ca5b4-c910-449c-a322-b24b3ef4ecf2",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"49.45445380487922"
]
},
"execution_count": 59,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"loss(stocks,debris,payoff,launches)"
]
},
{
"cell_type": "code",
"execution_count": 60,
"id": "8ead6e86-3d03-48ff-bc3e-b3fa9808f981",
"metadata": {},
"outputs": [],
"source": [
"#TODO: create a launch model in flux and see if I can get it to do pullbacks"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "658ba7f0-dfbc-42da-bb20-7f8bccbb257d",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Julia 1.6.2",
"language": "julia",
"name": "julia-1.6"
},
"language_info": {
"file_extension": ".jl",
"mimetype": "application/julia",
"name": "julia",
"version": "1.6.2"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

@ -0,0 +1,327 @@
{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"id": "0b5021da-575c-4db3-9e01-dc043a7c64b3",
"metadata": {},
"outputs": [],
"source": [
"using DiffEqFlux,Flux,Zygote, LinearAlgebra"
]
},
{
"cell_type": "code",
"execution_count": 2,
"id": "32ca6032-9d48-4bb2-b16e-4a66473464cd",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"2"
]
},
"execution_count": 2,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"const N_constellations = 1\n",
"const N_debris = 1\n",
"const N_states= N_constellations + N_debris"
]
},
{
"cell_type": "code",
"execution_count": 5,
"id": "77213ba3-1645-45b2-903f-b7f2817cbb47",
"metadata": {},
"outputs": [],
"source": [
"#setup physical model\n",
"struct BasicModel\n",
" #rate at which debris hits satellites\n",
" debris_collision_rate\n",
" #rate at which satellites of different constellations collide\n",
" satellite_collision_rates\n",
" #rate at which debris exits orbits\n",
" decay_rate\n",
" #rate at which satellites\n",
" autocatalysis_rate\n",
" #ratio at which a collision between satellites produced debris\n",
" satellite_collision_debris_ratio\n",
" #Ratio at which launches produce debris\n",
" launch_debris_ratio\n",
"end\n",
"\n",
"#Getting loss parameters together.\n",
"loss_param = 2e-3;\n",
"loss_weights = loss_param*(ones(N_constellations,N_constellations) - I);\n",
"\n",
"#orbital decay rate\n",
"decay_param = 0.01;\n",
"\n",
"#debris generation parameters\n",
"autocatalysis_param = 0.001;\n",
"satellite_loss_debris_rate = 5.0;\n",
"launch_debris_rate = 0.05;\n",
"\n",
"#Todo, wrap physical model as a struct with the parameters\n",
"bm = BasicModel(\n",
" loss_param\n",
" ,loss_weights\n",
" ,decay_param\n",
" ,autocatalysis_param\n",
" ,satellite_loss_debris_rate\n",
" ,launch_debris_rate\n",
");\n",
"\n",
"#implement tranistion function\n",
"#percentage survival function\n",
"function survival(stocks,debris,physical_model) \n",
" exp.(-physical_model.satellite_collision_rates*stocks .- (physical_model.debris_collision_rate*debris));\n",
"end\n",
"\n",
"#stock update rules\n",
"function G(stocks,debris,launches, physical_model)\n",
" return diagm(survival(stocks,debris,physical_model) .- physical_model.decay_rate)*stocks + launches\n",
"end;\n",
"\n",
"\n",
"#debris evolution \n",
"function H(stocks,debris,launches,physical_model)\n",
" #get changes in debris from natural dynamics\n",
" natural_debris_dynamics = (1-physical_model.decay_rate+physical_model.autocatalysis_rate) * debris \n",
" \n",
" #get changes in debris from satellite loss\n",
" satellite_loss_debris = physical_model.satellite_collision_debris_ratio * (1 .- survival(stocks,debris,physical_model))'*stocks \n",
" \n",
" #get changes in debris from launches\n",
" launch_debris = physical_model.launch_debris_ratio*sum(launches)\n",
" \n",
" #return total debris level\n",
" return natural_debris_dynamics + satellite_loss_debris + launch_debris\n",
"end;\n",
"\n",
"\n",
"#implement reward function\n",
"const payoff = 3*I - 0.02*ones(N_constellations,N_constellations)\n",
"\n",
"#Define the market profit function\n",
"F(stocks,debris,launches) = payoff*stocks + 3.0*launches .+ (debris*-0.2)\n",
"\n"
]
},
{
"cell_type": "code",
"execution_count": 18,
"id": "998a1ce8-a6ba-427d-a5d1-fece358146da",
"metadata": {},
"outputs": [],
"source": [
"# Launch function\n",
"launches = Chain(\n",
" Parallel(vcat\n",
" #parallel joins together stocks and debris, along with intermediate interpretation\n",
" ,Chain(Dense(N_constellations, N_states*2,relu)\n",
" ,Dense(N_states*2, N_states*2,relu)\n",
" )\n",
" ,Chain(Dense(N_debris, N_states,relu)\n",
" ,Dense(N_states, N_states,relu)\n",
" )\n",
" #chain gets applied to parallel\n",
" ,Dense(N_states*3,128,relu)\n",
" #,Dense(128,128,relu)\n",
" ,Dense(128,N_constellations,relu)\n",
" )\n",
");\n",
"\n",
"#Value functions\n",
"∂value = Chain(\n",
" Parallel(vcat\n",
" #parallel joins together stocks and debris, along with intermediate interpretation\n",
" ,Chain(Dense(N_constellations, N_states*2,relu)\n",
" ,Dense(N_states*2, N_states*2,relu)\n",
" )\n",
" ,Chain(Dense(N_debris, N_states,relu)\n",
" ,Dense(N_states, N_states,relu)\n",
" )\n",
" #chain gets applied to parallel\n",
" ,Dense(N_states*3,128,relu)\n",
" #,Dense(128,128,relu)\n",
" ,Dense(128,N_states,relu)\n",
" )\n",
");"
]
},
{
"cell_type": "code",
"execution_count": 6,
"id": "b4409af2-7f41-45bc-b7eb-4bda019e4092",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"1-element Vector{Float64}:\n",
" 0.0"
]
},
"execution_count": 6,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"#Extract parameter sets\n",
"\n",
"#= initialize Algorithm Parameters\n",
"Chose these randomly\n",
"=#\n",
"λʷ = 0.5\n",
"αʷ = 5.0\n",
"λᶿ = 0.5\n",
"αᶿ = 5.0\n",
"αʳ = 10\n",
"\n",
"# initialitze averaging returns\n",
"r = zeros(N_constellations)"
]
},
{
"cell_type": "code",
"execution_count": 7,
"id": "0529c209-55c0-49c7-815b-47578b029593",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"1-element Vector{Int64}:\n",
" 3"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# initial states\n",
"S₀ = rand(1:5,N_constellations)\n",
"D₀ = rand(1:3, N_debris)"
]
},
{
"cell_type": "code",
"execution_count": 16,
"id": "ae7d4152-77b0-42ff-92f6-9d5d83d6a39d",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Params([Float32[-0.110574484; 1.0583764; 0.039519094; 0.2908444], Float32[0.0, 0.0, 0.0, 0.0], Float32[0.4765299 0.5994208 -0.43710196 0.2269359; 0.5550531 0.5423604 -0.796175 0.76214457; 0.59269524 0.7436546 0.02525105 0.85908467; 0.3774994 -0.111040816 0.84196734 -0.18133782], Float32[0.0, 0.0, 0.0, 0.0], Float32[-1.2003294; -1.24031], Float32[0.0, 0.0], Float32[-0.004074011 -0.84631246; -0.5459394 1.1513239], Float32[0.0, 0.0], Float32[-0.19545768 -0.20670874 … 0.06923863 -0.09825141; 0.097166725 0.06564395 … -0.1928437 0.19962357; … ; 0.025075339 -0.06016964 … 0.0838129 -0.11523932; 0.20085223 0.16679004 … 0.016495213 -0.1548977], Float32[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 … 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], Float32[-0.21479565 0.0090183215 … -0.2022802 -0.19925424], Float32[0.0]])"
]
},
"execution_count": 16,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"launch_params = Flux.params(launches)"
]
},
{
"cell_type": "code",
"execution_count": 21,
"id": "232c0a44-be74-4431-a86e-dbc71e83c17a",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"loss (generic function with 1 method)"
]
},
"execution_count": 21,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"function loss(stocks,debris)\n",
" sum(launches((stocks,debris)))\n",
"end"
]
},
{
"cell_type": "code",
"execution_count": 25,
"id": "e1e1cdef-b164-43b6-a80e-b8665bdf9b14",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Grads(...)"
]
},
"execution_count": 25,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"g = Flux.gradient(() -> loss(S₀,D₀), launch_params)"
]
},
{
"cell_type": "code",
"execution_count": 28,
"id": "35d1763b-4650-4916-957d-fbb436280e1f",
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"Params([Float32[-0.110574484; 1.0583764; 0.039519094; 0.2908444], Float32[0.0, 0.0, 0.0, 0.0], Float32[0.4765299 0.5994208 -0.43710196 0.2269359; 0.5550531 0.5423604 -0.796175 0.76214457; 0.59269524 0.7436546 0.02525105 0.85908467; 0.3774994 -0.111040816 0.84196734 -0.18133782], Float32[0.0, 0.0, 0.0, 0.0], Float32[-1.2003294; -1.24031], Float32[0.0, 0.0], Float32[-0.004074011 -0.84631246; -0.5459394 1.1513239], Float32[0.0, 0.0], Float32[-0.19545768 -0.20670874 … 0.06923863 -0.09825141; 0.097166725 0.06564395 … -0.1928437 0.19962357; … ; 0.025075339 -0.06016964 … 0.0838129 -0.11523932; 0.20085223 0.16679004 … 0.016495213 -0.1548977], Float32[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 … 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], Float32[-0.21479565 0.0090183215 … -0.2022802 -0.19925424], Float32[0.0]])"
]
},
"execution_count": 28,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"launch_params"
]
},
{
"cell_type": "code",
"execution_count": null,
"id": "eaad2871-54ed-4674-8405-d4ebb950851d",
"metadata": {},
"outputs": [],
"source": []
}
],
"metadata": {
"kernelspec": {
"display_name": "Julia 1.6.2",
"language": "julia",
"name": "julia-1.6"
},
"language_info": {
"file_extension": ".jl",
"mimetype": "application/julia",
"name": "julia",
"version": "1.6.2"
}
},
"nbformat": 4,
"nbformat_minor": 5
}

Binary file not shown.
Loading…
Cancel
Save