Merged work sitting in bitbucket into local repo
commit
7ec2ede00e
@ -0,0 +1,10 @@
|
||||
%----------------------------------------------------------------------------------------
|
||||
% BIBLIOGRAPHY SETUP
|
||||
%----------------------------------------------------------------------------------------
|
||||
|
||||
|
||||
%%% Setup Bibliography
|
||||
\usepackage[backend=biber,style=apa,autocite=inline]{biblatex}
|
||||
\addbibresource{../Assets/preambles/References.bib}
|
||||
|
||||
|
||||
@ -0,0 +1,38 @@
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
%
|
||||
% Based on a template downloaded from:
|
||||
% http://www.LaTeXTemplates.com
|
||||
%
|
||||
% License:
|
||||
% CC BY-NC-SA 3.0 (http://creativecommons.org/licenses/by-nc-sa/3.0/)
|
||||
%
|
||||
% Changed theme to WSU by William King
|
||||
%
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
|
||||
%----------------------------------------------------------------------------------------
|
||||
% COLORS AND THEMES
|
||||
%----------------------------------------------------------------------------------------
|
||||
%%% Setup color
|
||||
\usetheme{Berkeley}
|
||||
\definecolor{WSUred}{RGB}{152,30,50}
|
||||
\definecolor{WSUgrey}{RGB}{94,106,113}
|
||||
\setbeamercolor{palette primary}{bg=WSUred,fg=white}
|
||||
\setbeamercolor{palette secondary}{bg=WSUred,fg=white}
|
||||
\setbeamercolor{palette tertiary}{bg=WSUred,fg=white}
|
||||
\setbeamercolor{palette quaternary}{bg=WSUred,fg=white}
|
||||
\setbeamercolor{structure}{fg=WSUgrey} % itemize, enumerate, etc
|
||||
\setbeamercolor{section in toc}{fg=WSUred} % TOC sections
|
||||
\setbeamercolor{block body}{fg=WSUred} % block body sections
|
||||
|
||||
|
||||
%\setbeamertemplate{footline} % To remove the footer line in all slides uncomment this line
|
||||
\setbeamertemplate{footline}[page number] % To replace the footer line in all slides with a simple slide count uncomment this line
|
||||
\setbeamertemplate{navigation symbols}{} % To remove the navigation symbols from the bottom of all slides uncomment this line
|
||||
|
||||
|
||||
%%% setup packages
|
||||
\usepackage{graphicx} % Allows including images
|
||||
\graphicspath{{./img/}} %Set a graphics path
|
||||
\usepackage{booktabs} % Allows the use of \toprule, \midrule and \bottomrule in tables
|
||||
|
||||
@ -0,0 +1,16 @@
|
||||
%Include other generally important packages
|
||||
|
||||
\usepackage{hyperref} % Allows for weblinks
|
||||
\hypersetup{
|
||||
colorlinks=true,
|
||||
citebordercolor=WSUgrey,
|
||||
citecolor=WSUred,
|
||||
linkcolor=WSUred,
|
||||
urlcolor=Blue
|
||||
}
|
||||
|
||||
|
||||
\usepackage{cleveref}
|
||||
|
||||
%Add algorithms
|
||||
\usepackage{algorithm,algpseudocode}
|
||||
@ -0,0 +1,20 @@
|
||||
|
||||
%%%%%%%%%Packages%%%%%%%%%%%%%%%
|
||||
\usepackage{amsmath}
|
||||
\usepackage{mathtools}
|
||||
\usepackage{amsthm}
|
||||
\usepackage{amssymb}
|
||||
\usepackage{thmtools, thm-restate}
|
||||
|
||||
|
||||
%%%%%%%%%%%% MATH FORMATTING %%%%%%%%%%%%%%%%%%%%%
|
||||
%Helpful bits
|
||||
\newcommand{\bb}[1]{\mathbb{#1}}
|
||||
|
||||
%Derivatives etc.
|
||||
\newcommand{\parder}[3]{\ensuremath{ \frac{\partial^{#3} #1}{\partial #2~^{#3}}}}
|
||||
\newcommand{\der}[3]{\ensuremath{ \frac{d^{#3} #1}{d #2~^{#3}}}}
|
||||
|
||||
%Math Operators
|
||||
\DeclareMathOperator{\argmax}{argmax}
|
||||
\DeclareMathOperator{\argmin}{argmin}
|
||||
@ -0,0 +1,12 @@
|
||||
%-----------------------------------------------------------
|
||||
% Tikzit Preamble
|
||||
%-----------------------------------------------------------
|
||||
|
||||
%Setup graphing
|
||||
\usepackage{tikz}
|
||||
\usepackage{tikzit}
|
||||
\input{../Assets/preambles/WSU_Econ.tikzstyles}
|
||||
%\input{../Assets/preambles/tikzit.sty}
|
||||
|
||||
|
||||
|
||||
@ -0,0 +1,32 @@
|
||||
% TiKZ style file generated by TikZiT. You may edit this file manually,
|
||||
% but some things (e.g. comments) may be overwritten. To be readable in
|
||||
% TikZiT, the only non-comment lines must be of the form:
|
||||
% \tikzstyle{NAME}=[PROPERTY LIST]
|
||||
|
||||
% Node styles
|
||||
\tikzstyle{CrimsonNode}=[fill={rgb,255: red,152; green,30; blue,50}, draw={rgb,255: red,152; green,30; blue,50}, shape=circle, tikzit category=WSU, tikzit draw={rgb,255: red,152; green,30; blue,50}, tikzit fill={rgb,255: red,152; green,30; blue,50}]
|
||||
\tikzstyle{GreyNode}=[fill={rgb,255: red,94; green,106; blue,113}, draw={rgb,255: red,94; green,106; blue,113}, shape=circle, tikzit category=WSU, tikzit draw={rgb,255: red,94; green,106; blue,113}, tikzit fill={rgb,255: red,94; green,106; blue,113}]
|
||||
\tikzstyle{Box}=[fill={rgb,255: red,94; green,106; blue,113}, draw={rgb,255: red,94; green,106; blue,113}, shape=rectangle, tikzit draw={rgb,255: red,94; green,106; blue,113}, tikzit fill={rgb,255: red,94; green,106; blue,113}]
|
||||
\tikzstyle{Red Box}=[fill={rgb,255: red,152; green,30; blue,50}, draw={rgb,255: red,152; green,30; blue,50}, shape=rectangle]
|
||||
\tikzstyle{new style 0}=[fill=white, draw=black, shape=circle, tikzit draw=black]
|
||||
\tikzstyle{new style 1}=[fill={rgb,255: red,128; green,0; blue,128}, draw=black, shape=circle]
|
||||
\tikzstyle{Box}=[fill=white, draw=black, shape=rectangle]
|
||||
\tikzstyle{rotated text}=[fill=none, draw=none, shape=circle, rotate=270, tikzit draw={rgb,255: red,191; green,191; blue,191}]
|
||||
|
||||
% Edge styles
|
||||
\tikzstyle{RightArrow}=[->]
|
||||
\tikzstyle{LeftRightArrow}=[<->]
|
||||
\tikzstyle{CrimsonBar}=[-, draw={rgb,255: red,152; green,30; blue,50}]
|
||||
\tikzstyle{GreyBar}=[-, draw={rgb,255: red,94; green,106; blue,113}, tikzit draw={rgb,255: red,94; green,106; blue,113}]
|
||||
\tikzstyle{divider}=[draw={rgb,255: red,64; green,64; blue,64}, dashed, dash pattern=on 2mm off 1, -]
|
||||
\tikzstyle{bars}=[{|-|}]
|
||||
\tikzstyle{Dashed}=[-, dashed, dash pattern=on 1mm off 2mm, tikzit draw={rgb,255: red,128; green,128; blue,128}]
|
||||
\tikzstyle{Light Arrow}=[->, draw={rgb,255: red,191; green,191; blue,191}]
|
||||
\tikzstyle{lightgreybar}=[-, draw={rgb,255: red,191; green,191; blue,191}]
|
||||
\tikzstyle{lightred}=[-, draw={rgb,255: red,222; green,148; blue,178}]
|
||||
\tikzstyle{Purple}=[-, draw={rgb,255: red,128; green,0; blue,128}, tikzit draw={rgb,255: red,128; green,0; blue,128}, line width=1mm]
|
||||
\tikzstyle{new edge style 1}=[draw={rgb,255: red,121; green,23; blue,40}, ->]
|
||||
\tikzstyle{filled2}=[-, fill={rgb,255: red,255; green,191; blue,191}, draw=black, tikzit draw=black, tikzit fill={rgb,255: red,255; green,191; blue,191}, opacity=0.5]
|
||||
\tikzstyle{filled1}=[-, fill={rgb,255: red,191; green,191; blue,191}, draw=black, tikzit draw=black, opacity=0.5, tikzit fill={rgb,255: red,191; green,191; blue,191}]
|
||||
\tikzstyle{emptyFill1}=[-, fill={rgb,255: red,255; green,191; blue,191}, draw=none, tikzit draw=blue, opacity=0.3]
|
||||
\tikzstyle{new edge style 0}=[-, draw=none, fill={rgb,255: red,191; green,191; blue,191}, tikzit draw=green, opacity=0.3, tikzit fill={rgb,255: red,191; green,191; blue,191}]
|
||||
@ -0,0 +1,42 @@
|
||||
\usepackage{tikz}
|
||||
\usetikzlibrary{backgrounds}
|
||||
\usetikzlibrary{arrows}
|
||||
\usetikzlibrary{shapes,shapes.geometric,shapes.misc}
|
||||
|
||||
% this style is applied by default to any tikzpicture included via \tikzfig
|
||||
\tikzstyle{tikzfig}=[baseline=-0.25em,scale=0.5]
|
||||
|
||||
% these are dummy properties used by TikZiT, but ignored by LaTex
|
||||
\pgfkeys{/tikz/tikzit fill/.initial=0}
|
||||
\pgfkeys{/tikz/tikzit draw/.initial=0}
|
||||
\pgfkeys{/tikz/tikzit shape/.initial=0}
|
||||
\pgfkeys{/tikz/tikzit category/.initial=0}
|
||||
|
||||
% standard layers used in .tikz files
|
||||
\pgfdeclarelayer{edgelayer}
|
||||
\pgfdeclarelayer{nodelayer}
|
||||
\pgfsetlayers{background,edgelayer,nodelayer,main}
|
||||
|
||||
% style for blank nodes
|
||||
\tikzstyle{none}=[inner sep=0mm]
|
||||
|
||||
% include a .tikz file
|
||||
\newcommand{\tikzfig}[1]{%
|
||||
{\tikzstyle{every picture}=[tikzfig]
|
||||
\IfFileExists{#1.tikz}
|
||||
{\input{#1.tikz}}
|
||||
{%
|
||||
\IfFileExists{./figures/#1.tikz}
|
||||
{\input{./figures/#1.tikz}}
|
||||
{\tikz[baseline=-0.5em]{\node[draw=red,font=\color{red},fill=red!10!white] {\textit{#1}};}}%
|
||||
}}%
|
||||
}
|
||||
|
||||
% the same as \tikzfig, but in a {center} environment
|
||||
\newcommand{\ctikzfig}[1]{%
|
||||
\begin{center}\rm
|
||||
\tikzfig{#1}
|
||||
\end{center}}
|
||||
|
||||
% fix strange self-loops, which are PGF/TikZ default
|
||||
\tikzstyle{every loop}=[]
|
||||
@ -0,0 +1,562 @@
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
% Beamer Presentation
|
||||
% LaTeX Template
|
||||
% Version 1.0 (10/11/12)
|
||||
%
|
||||
% This template has been downloaded from:
|
||||
% http://www.LaTeXTemplates.com
|
||||
%
|
||||
% License:
|
||||
% CC BY-NC-SA 3.0 (http://creativecommons.org/licenses/by-nc-sa/3.0/)
|
||||
%
|
||||
% Changed theme to WSU by William King
|
||||
%
|
||||
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
|
||||
%----------------------------------------------------------------------------------------
|
||||
% PACKAGES AND THEMES
|
||||
%----------------------------------------------------------------------------------------
|
||||
|
||||
\documentclass[xcolor=dvipsnames,aspectratio=169]{beamer}
|
||||
|
||||
|
||||
%Import Preamble bits
|
||||
\input{../Assets/preambles/FormattingPreamble.tex}
|
||||
\input{../Assets/preambles/TikzitPreamble.tex}
|
||||
\input{../Assets/preambles/MathPreamble.tex}
|
||||
\input{../Assets/preambles/BibPreamble.tex}
|
||||
\input{../Assets/preambles/GeneralPreamble.tex}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
%----------------------------------------------------------------------------------------
|
||||
% TITLE PAGE
|
||||
%----------------------------------------------------------------------------------------
|
||||
|
||||
\title[MDP Constellations]{Modeling decisions in operating satellite constellations}
|
||||
|
||||
\author{Will King} % Your name
|
||||
\institute[WSU] % Your institution as it will appear on the bottom of every slide, may be shorthand to save space
|
||||
{
|
||||
Washington State University \\ % Your institution for the title page
|
||||
\medskip
|
||||
\textit{william.f.king@wsu.edu} % Your email address
|
||||
}
|
||||
\date{\today} % Date, can be changed to a custom date
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
\begin{document}
|
||||
\begin{frame}
|
||||
\titlepage % Print the title page as the first slide
|
||||
\end{frame}
|
||||
|
||||
|
||||
|
||||
%-------------------------------------------------------------------------------------
|
||||
%%%%%%%%%%%%%%%%%%%% Developing the Model%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\section{Background}
|
||||
% Why should we care?
|
||||
% Uses of space
|
||||
% Pollution in space and it's impacts
|
||||
% Kessler Syndrome
|
||||
% What is different now
|
||||
%
|
||||
%
|
||||
%
|
||||
%-------------------------------------------------------------------------------------
|
||||
\begin{frame}
|
||||
\frametitle{Orbital Debris}
|
||||
%Story from monday ISS
|
||||
|
||||
\href{https://edition.cnn.com/2021/11/15/politics/russia-anti-satellite-weapon-test-scn/index.html}{ISS threatened by debris cloud - Monday Nov 15th, 2021}
|
||||
\begin{itemize}
|
||||
\item Russia conducts an Anti-Satellite Missle Test generating at least 1,500 items of trackable debris
|
||||
\item The Astronauts and Cosmonauts on the ISS entered lockdown, including donning pressure suits.
|
||||
\item The situation is still being monitored although the immediate danger appears to have passed.
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
%-------------------------------
|
||||
\begin{frame}
|
||||
\frametitle{Orbital Debris}
|
||||
Other events involving the ISS highlight the dangers from orbital debris:
|
||||
\begin{itemize}
|
||||
\item ISS conducts 3 evasive maneuvers to doge debris in 2020-
|
||||
\href{https://www.jpost.com/science/international-space-station-nearly-struck-by-chinese-satellite-debris-684809}{Jerusalem Post}
|
||||
\item ISS hit by debris, May 2021- \href{https://www.asc-csa.gc.ca/eng/iss/news.asp}{Canadian Space Agency}
|
||||
\item ISS dodged debris from 2007 Anti-Sat Missile, Nov 2021-
|
||||
\href{https://www.jpost.com/science/international-space-station-nearly-struck-by-chinese-satellite-debris-684809}{Jerusalem Post (Same as above)}
|
||||
\end{itemize}
|
||||
|
||||
%This isn't a unique experience
|
||||
%list of other issues the ISS has faced
|
||||
|
||||
\end{frame}
|
||||
%-------------------------------
|
||||
\begin{frame}
|
||||
\frametitle{Orbital Debris}
|
||||
%Talk about Fregat breakup
|
||||
Not just an issue for manned space flight.
|
||||
|
||||
|
||||
\href{https://www.orbitaldebris.jsc.nasa.gov/quarterly-news/pdfs/odqnv25i1.pdf}{Orbital Debris Quarterly News - NASA}
|
||||
\begin{itemize}
|
||||
\item In May of 2020, the Satellite SL-23 Zenit Fregat's
|
||||
tank suffered a second breakup event.
|
||||
\item While only 65 large pieces of debris were initially identified, by Feb. 2021
|
||||
over 325 had been attributed to the breakup.
|
||||
\item Debris was spread in orbits between 500km and 6,000km.
|
||||
\end{itemize}
|
||||
|
||||
\href{https://www.yahoo.com/news/space-debris-russian-missile-test-175253044.html}{Starlink and recent Anti-Sat test}
|
||||
\begin{itemize}
|
||||
\item Estimated that there will likely be some impact to Starlink operations.
|
||||
\item 1,500 large pieces of debris initially identified.
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
|
||||
%-------------------------------
|
||||
\begin{frame}
|
||||
\frametitle{Why now?}
|
||||
%launch costs
|
||||
%nano-satellites
|
||||
|
||||
In recent years two major changes have occured
|
||||
\begin{enumerate}
|
||||
\item New launch providers: SpaceX, RocketLab, etc have lead to plummeting launch costs
|
||||
\item CubeSates and other Nano-Satellites.
|
||||
\end{enumerate}
|
||||
\end{frame}
|
||||
%-------------------------------
|
||||
\begin{frame}
|
||||
\frametitle{Goals}
|
||||
%Model decision making of satellite operators to be able to investigate policies to reduce kessler syndrome.
|
||||
|
||||
Goals:
|
||||
\begin{itemize}
|
||||
\item Model the choices
|
||||
facing Satellite Constellation Operators
|
||||
and optimal policy policy response.
|
||||
\item Investigate the effect of various policies on debris pollution
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
%-------------------------------
|
||||
%-------------------------------------------------------------------------------------
|
||||
%%%%%%%%%%%%%%%%%%%% Developing the Model%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\section{TOC}
|
||||
%-------------------------------------------------------------------------------------
|
||||
\begin{frame}[allowframebreaks] %Allow frame breaks
|
||||
\frametitle{Overview} % Table of contents slide, comment this out to remove it
|
||||
\tableofcontents
|
||||
%Planned TOC
|
||||
% See ../outline2.txt
|
||||
\end{frame}
|
||||
%-------------------------------------------------------------------------------------
|
||||
%%%%%%%%%%%%%%%%%%%% Developing the Model%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\section{Literature}
|
||||
%-------------------------------------------------------------------------------------
|
||||
\begin{frame}
|
||||
\frametitle{Past Literature}
|
||||
|
||||
Key elements of recent literature.
|
||||
\begin{enumerate}
|
||||
\item \cite{Kessler1978}: Raised issue of runaway orbital pollution.
|
||||
\item \cite{Adilov2015}: Described 2 period salop model of interactions.
|
||||
\item \cite{Adilov2018,Adilov2018a}: Described an infinite period model with symmetric competitive interactions.
|
||||
\item \cite{RaoRondina2020}: Describe a symetric infinite period model (first to do so).
|
||||
\item \cite{Rao2020}: Examine the effect of Orbital-Use fees, find it would quadruple long term value produced of the space industry.
|
||||
\end{enumerate}
|
||||
\end{frame}
|
||||
%-------------------------------
|
||||
%-------------------------------------------------------------------------------------
|
||||
%%%%%%%%%%%%%%%%%%%% Developing the Model%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\section{Model}
|
||||
%-------------------------------------------------------------------------------------
|
||||
\begin{frame}
|
||||
\frametitle{Overview}
|
||||
|
||||
\begin{itemize}
|
||||
\item Mathematical Notation
|
||||
\item Law of motion for debris
|
||||
\item Law of motion for satellite stocks
|
||||
\item Kessler Syndrome
|
||||
\item Markov Decision Problems
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
%-------------------------------
|
||||
\begin{frame}
|
||||
\frametitle{Mathematical Notation}
|
||||
\begin{itemize}
|
||||
\item $S_t$: The set of constellation satellites stocks.
|
||||
\item $s^i_t$: The number of satellites (stock) for constellation $i$.
|
||||
\item $D_t$: The level of debris.
|
||||
\item $X_t$: The set of launches.
|
||||
\item $x^i_t$: The launches from constellation $i$.
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
%-------------------------------
|
||||
%------------------------------------
|
||||
\subsection{Laws of Motion}
|
||||
%------------------------------------
|
||||
\begin{frame}
|
||||
\frametitle{Debris}
|
||||
Law of motion for debris
|
||||
\begin{align}
|
||||
D_{t+1} =& (1-\delta)D_t \tag{Debris decay.} \\
|
||||
&+ g\cdot D_t \tag{Debris produced by collision with debris.} \\
|
||||
&+ \gamma \sum^N_{i=1} \left(1-R^i(S_t,D_t) \right) s^i_t \tag{Debris produced by satellite destruction.} \\
|
||||
&+ \Gamma \sum^n_{j=1} x^j_t \tag{Debris produced by launches.}
|
||||
\end{align}
|
||||
\end{frame}
|
||||
%-------------------------------
|
||||
\begin{frame}
|
||||
\frametitle{Satellite Stocks}
|
||||
|
||||
Law of motion for satellite stocks
|
||||
\begin{align}
|
||||
s^i_{t+1} =& \left(
|
||||
R^i(S_t,D_t,X_t)
|
||||
- \eta
|
||||
\right) \cdot s^i_t
|
||||
+ x^i_t
|
||||
\end{align}
|
||||
\begin{itemize}
|
||||
\item $\eta$ is the orbit decay rate of satellites.
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
%-------------------------------
|
||||
%------------------------------------
|
||||
\subsection{Kessler Syndrome}
|
||||
%------------------------------------
|
||||
\begin{frame}
|
||||
\frametitle{Explanation of Kessler Syndrome}
|
||||
\begin{block}{Kessler Syndrome}
|
||||
The situation in which collisions between
|
||||
objects in orbit produced debris and this debris begins collisions
|
||||
with other objects, leading to a runaway growth in debris.
|
||||
As debris can persist for millenia, this may make some orbits unusable.
|
||||
|
||||
\autocite{Kessler1978}
|
||||
\end{block}
|
||||
Often described as a condition with an exponential growth of debris.
|
||||
|
||||
\end{frame}
|
||||
%-------------------------------
|
||||
\begin{frame}
|
||||
\frametitle{Past approaches to Kessler Syndrome}
|
||||
\begin{itemize}
|
||||
\item \cite{Adilov2018}:\\
|
||||
Develops an analog of kessler syndrome where the condition is met when satellites
|
||||
are destroyed immediately after launch by debris.
|
||||
\begin{align}
|
||||
\left\{ (S_t,D_t) : R^i(S_t,D_t) = 0 \forall i\right\}
|
||||
\end{align}
|
||||
\item \cite{RaoRondina2020}:\\
|
||||
A working paper in which the authors develop a dynamic model and a definition of
|
||||
kessler syndrome that captures all increasing debris levels.
|
||||
\begin{align}
|
||||
\left\{ (S_t,D_t) :
|
||||
\lim_{t \rightarrow \infty} D_{t+1}(S_t,D_t) = \infty
|
||||
\right\}
|
||||
\end{align}
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
%-------------------------------
|
||||
\begin{frame}
|
||||
\frametitle{My contributions}
|
||||
I propose two refinements of these definitions to
|
||||
simplify analyzing kessler syndrome in computational models.
|
||||
\begin{itemize}
|
||||
\item $\epsilon$-Kessler Region
|
||||
\item Proto Kessler Region
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
%-------------------------------------------------------
|
||||
\begin{frame}
|
||||
\frametitle{$\epsilon$-Kessler Region}
|
||||
\begin{block}{$\epsilon$-Kessler Region}
|
||||
\begin{align}
|
||||
\kappa_\epsilon = \left\{ \left(S_t, D_t \right) : \forall k \geq 0,
|
||||
~~ D_{t+k+1} - D_{t+k} \geq \epsilon > 0 \right\}
|
||||
\end{align}
|
||||
\end{block}
|
||||
|
||||
Notable Features
|
||||
\begin{itemize}
|
||||
\item $\epsilon$ can be calibrated to capture only economically significant growth.
|
||||
\item Requires an explicit description of what is considered economically significant.
|
||||
\item Guarantees divergent behavior.
|
||||
\item Simulated transition paths can identify the region.
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
%-------------------------------------------------------
|
||||
\begin{frame}
|
||||
\frametitle{Proto Kessler Region}
|
||||
|
||||
\begin{block}{Proto Kessler Region}
|
||||
\begin{align}
|
||||
\kappa_\text{proto} =
|
||||
\left\{
|
||||
\left(S_t,D_t \right) : ~~ D_{t+1} - D_{t} \geq \epsilon_\text{proto}
|
||||
\right\}
|
||||
\end{align}
|
||||
\end{block}
|
||||
|
||||
Notable Features
|
||||
\begin{itemize}
|
||||
\item $\epsilon_\text{proto}$ can be calibrated to capture only economically significant growth.
|
||||
\item Requires an explicit description of what is considered economically significant.
|
||||
\item Does not guarantee divergent behavior.
|
||||
\item Easily computable kessler regions.
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
%-------------------------------------------------------
|
||||
\begin{frame}
|
||||
\frametitle{Proto Kessler Region}
|
||||
With the given law of motion for debris, the proto-kessler region is:
|
||||
\begin{align}
|
||||
\left\{
|
||||
\left(S_t,D_t \right) :
|
||||
(g-\delta) D_t
|
||||
+ \gamma \sum^n_{i=1} 1-R^i(S_t,D_t)
|
||||
+ \Gamma \sum^n_{i=1} x^i_t(S_t,D_t)
|
||||
\geq \epsilon_\text{proto}
|
||||
\right\}
|
||||
\end{align}
|
||||
\end{frame}
|
||||
%------------------------------------
|
||||
\subsection{Markov Decision Problem Formulation}
|
||||
%------------------------------------
|
||||
\begin{frame}
|
||||
\frametitle{Operator's Problem}
|
||||
\begin{align}
|
||||
V^i(S_t, x^{\sim i}_t, D_t) = \max_{x^i_t} u^i(S_t, D_t) -F(x^i_t)
|
||||
+ \beta \left[ V^i(S_{t+1}, x^{\sim i}_{t+1}, D_{t+1}) \right]
|
||||
\end{align}
|
||||
\end{frame}
|
||||
%-------------------------------
|
||||
\begin{frame}
|
||||
\frametitle{Benefit Functions}
|
||||
|
||||
Possible benefit functions
|
||||
\begin{itemize}
|
||||
\item Linear (Currently working on this one)
|
||||
\item Cournot Profits
|
||||
\item Profits under Partial substitutability
|
||||
\item Military capabilities (Keeping up with the Jones')
|
||||
\end{itemize}
|
||||
|
||||
\end{frame}
|
||||
%-------------------------------
|
||||
\begin{frame}
|
||||
\frametitle{Planner's Problem}
|
||||
\begin{align}
|
||||
W(S_t, D_t) =& \max_{X_t} \left[
|
||||
\sum^N_{i=1} \left(u^i(S_t, D_t) - F(x^i_t) \right)
|
||||
+ \beta \left[ W(S_{t+1}, D_{t+1}) \right]\right] \notag \\
|
||||
&\text{subject to:} \notag \\
|
||||
& s^i_{t+1} = (R^i(S_t, D_t)) s^i_t +x^i_t ~~~ \forall i \notag \\
|
||||
& D_{t+1} = (1-\delta + g)D_t
|
||||
+ \gamma \sum^N_{i=1} \left(1-R^i(\vec s_t, D_t)\right) s^i_t
|
||||
+ \Gamma \sum^N_{i=1} x^i_t
|
||||
\end{align}
|
||||
\end{frame}
|
||||
%-------------------------------
|
||||
\begin{frame}
|
||||
\frametitle{Planned model expansions}
|
||||
\begin{itemize}
|
||||
\item Multiple interacting orbital shells and debris terms.
|
||||
\item Stochastic laws of motion
|
||||
\item Multiple types of operators
|
||||
\item Operators benefit functions include taxation
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
%-------------------------------
|
||||
%-------------------------------------------------------------------------------------
|
||||
%%%%%%%%%%%%%%%%%%%% Developing the Model%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\section{Analysis}
|
||||
%-------------------------------------------------------------------------------------
|
||||
\begin{frame}
|
||||
\frametitle{Issues}
|
||||
The following issues characterize the Operators' and Planner's problem
|
||||
\begin{itemize}
|
||||
\item Curse of Dimensionality
|
||||
\item Strategic Interaction (operators only)
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
%-------------------------------
|
||||
\begin{frame}
|
||||
\frametitle{Possible approaches}
|
||||
|
||||
Possible approaches
|
||||
\begin{itemize}
|
||||
\item Standard VFI/Howards algorithm.
|
||||
\item VFI with sparse state space (dimensionality reduction).
|
||||
\item Reinforcement Learning.
|
||||
\item \cite{MALIAR2018} approaches using machine learning.
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
%-------------------------------
|
||||
\begin{frame}
|
||||
\frametitle{Chosen approach}
|
||||
% maliar et al - Bellman Residuals
|
||||
% present basic approach (using my notation)
|
||||
% Discuss basic training loop (use sutton and bartos term Generalized Policy Iteration)
|
||||
|
||||
Bellman Residual minimization due to \autocite{MALIAR2018}
|
||||
|
||||
Use NN to approximate $V(S_t,D_t|\theta_1)$ and $X(S_t,D_t|\theta_2)$.
|
||||
|
||||
The loss function is:
|
||||
\begin{align}
|
||||
0 =& \left[
|
||||
V(S_t, D_t) - F(S_t, D_t, X_t)- \beta V(S_{t+1}, D_{t+1})
|
||||
\right]^2 \notag\\
|
||||
&- v \left[
|
||||
F(S_t, D_t, X_t) + \beta V(S_{t+1}, D_{t+1})
|
||||
\right] \\
|
||||
0 =& \left[
|
||||
V(S_t, D_t) - F(S_t, D_t, X_t)- \beta V(S_{t+1}, D_{t+1})
|
||||
- \frac{v}{2}
|
||||
\right]^2
|
||||
- v \left[
|
||||
V(S_{t}, D_{t}) + \frac{v}{4}
|
||||
\right]
|
||||
\end{align}
|
||||
|
||||
\end{frame}
|
||||
%-------------------------------
|
||||
\begin{frame}
|
||||
\frametitle{Training Loop: Planner}
|
||||
|
||||
For each training epoch
|
||||
\begin{enumerate}
|
||||
\item Draw random data
|
||||
\item train policy function
|
||||
\item train value function
|
||||
\end{enumerate}
|
||||
\end{frame}
|
||||
%-------------------------------
|
||||
\begin{frame}
|
||||
\frametitle{Training Loop: Operators}
|
||||
|
||||
For each training epoch
|
||||
\begin{enumerate}
|
||||
\item Draw random data
|
||||
\item For each operator
|
||||
\begin{enumerate}
|
||||
\item train policy function
|
||||
\item train value function
|
||||
\end{enumerate}
|
||||
\end{enumerate}
|
||||
\end{frame}
|
||||
%-------------------------------
|
||||
%------------------------------------
|
||||
\subsection{Analysis so far}
|
||||
%------------------------------------
|
||||
\begin{frame}
|
||||
\frametitle{State of the Code}
|
||||
Currently functioning
|
||||
\begin{itemize}
|
||||
\item Planner Value and Policy training
|
||||
\end{itemize}
|
||||
|
||||
|
||||
Almost functioning
|
||||
\begin{itemize}
|
||||
\item Operator Value and Policy training
|
||||
\item Proto-Kessler Region analysis
|
||||
\end{itemize}
|
||||
%Planner training
|
||||
%Operator training
|
||||
\end{frame}
|
||||
%-------------------------------
|
||||
\begin{frame}
|
||||
\frametitle{Results}
|
||||
%simulated debris paths (increase the ammount of debris a couple orders of magnitude)
|
||||
%Protokessler region plots?
|
||||
|
||||
Results are currently waiting on finishing the code.
|
||||
|
||||
\vspace{12pt}
|
||||
|
||||
Some analyses I plan on completing include
|
||||
\begin{itemize}
|
||||
\item Kessler Region analysis
|
||||
\item Free Entry conditions analysis
|
||||
\end{itemize}
|
||||
\end{frame}
|
||||
%-------------------------------
|
||||
%-------------------------------------------------------------------------------------
|
||||
%%%%%%%%%%%%%%%%%%%% Developing the Model%%%%%%%%%%%%%%%%%%%%%%%%
|
||||
\section{Conclusion}
|
||||
%-------------------------------------------------------------------------------------
|
||||
%-------------------------------
|
||||
\begin{frame}
|
||||
\frametitle{Summary}
|
||||
Summary
|
||||
\begin{enumerate}
|
||||
\item Created Dynamic model of the MDP facing satellite operators.
|
||||
\item Defined new Kessler Regions for computational analysis.
|
||||
\item Currently developing solution and simulation tools.
|
||||
\item Much work left to do.
|
||||
\end{enumerate}
|
||||
\end{frame}
|
||||
%-------------------------------
|
||||
\begin{frame}
|
||||
\frametitle{Other Areas Needing Work}
|
||||
|
||||
Related Orbits Work
|
||||
\begin{enumerate}
|
||||
\item Adding stochastic elements to the model.
|
||||
\item Parameter Estimation.
|
||||
\item Rights of Way.
|
||||
\item Satellite Lifetimes and constellation management.
|
||||
\end{enumerate}
|
||||
|
||||
Related computational work
|
||||
\begin{enumerate}
|
||||
\item Automating the Euler Equation Residuals method.
|
||||
\end{enumerate}
|
||||
\end{frame}
|
||||
%-------------------------------
|
||||
\begin{frame}
|
||||
\frametitle{Questions?}
|
||||
|
||||
\center
|
||||
Any remaining questions?
|
||||
\end{frame}
|
||||
%-------------------------------
|
||||
\begin{frame}[allowframebreaks]
|
||||
\frametitle{References}
|
||||
\printbibliography
|
||||
\end{frame}
|
||||
%\begin{frame}
|
||||
% \frametitle{MarginalRevenue}
|
||||
% \begin{figure}
|
||||
% \tikzfig{../Assets/owned/ch8_MarginalRevenue}
|
||||
% \includegraphics[height=\textheight]{../Assets/copyrighted/KrugmanObsterfeldMeliz_fig8-7.jpg}
|
||||
% \label{FIG:costs}
|
||||
% \caption{Average Cost Curve as firms enter.}
|
||||
% \end{figure}
|
||||
%\end{frame}
|
||||
\end{document}
|
||||
% \begin{frame}
|
||||
% \frametitle{Columns}
|
||||
% \begin{columns}
|
||||
% \begin{column}{0.5\textwidth}
|
||||
% \end{column}
|
||||
% \begin{column}{0.5\textwidth}
|
||||
% \begin{figure}
|
||||
% \tikzfig{../Assets/owned/ch7_EstablishedAdvantageExample2}
|
||||
% \label{FIG:costs}
|
||||
% \caption{Setting the Stage}
|
||||
% \end{figure}
|
||||
% \end{column}
|
||||
% \end{columns}
|
||||
% \end{frame}
|
||||
% %---------------------------------------------------------------
|
||||
@ -0,0 +1,42 @@
|
||||
\usepackage{tikz}
|
||||
\usetikzlibrary{backgrounds}
|
||||
\usetikzlibrary{arrows}
|
||||
\usetikzlibrary{shapes,shapes.geometric,shapes.misc}
|
||||
|
||||
% this style is applied by default to any tikzpicture included via \tikzfig
|
||||
\tikzstyle{tikzfig}=[baseline=-0.25em,scale=0.5]
|
||||
|
||||
% these are dummy properties used by TikZiT, but ignored by LaTex
|
||||
\pgfkeys{/tikz/tikzit fill/.initial=0}
|
||||
\pgfkeys{/tikz/tikzit draw/.initial=0}
|
||||
\pgfkeys{/tikz/tikzit shape/.initial=0}
|
||||
\pgfkeys{/tikz/tikzit category/.initial=0}
|
||||
|
||||
% standard layers used in .tikz files
|
||||
\pgfdeclarelayer{edgelayer}
|
||||
\pgfdeclarelayer{nodelayer}
|
||||
\pgfsetlayers{background,edgelayer,nodelayer,main}
|
||||
|
||||
% style for blank nodes
|
||||
\tikzstyle{none}=[inner sep=0mm]
|
||||
|
||||
% include a .tikz file
|
||||
\newcommand{\tikzfig}[1]{%
|
||||
{\tikzstyle{every picture}=[tikzfig]
|
||||
\IfFileExists{#1.tikz}
|
||||
{\input{#1.tikz}}
|
||||
{%
|
||||
\IfFileExists{./figures/#1.tikz}
|
||||
{\input{./figures/#1.tikz}}
|
||||
{\tikz[baseline=-0.5em]{\node[draw=red,font=\color{red},fill=red!10!white] {\textit{#1}};}}%
|
||||
}}%
|
||||
}
|
||||
|
||||
% the same as \tikzfig, but in a {center} environment
|
||||
\newcommand{\ctikzfig}[1]{%
|
||||
\begin{center}\rm
|
||||
\tikzfig{#1}
|
||||
\end{center}}
|
||||
|
||||
% fix strange self-loops, which are PGF/TikZ default
|
||||
\tikzstyle{every loop}=[]
|
||||
@ -0,0 +1,27 @@
|
||||
|
||||
utline
|
||||
|
||||
Tell story
|
||||
- Why do we care? Kessler syndrome.
|
||||
- Why is space access needed? (satellite internet, GPS, military, hobbies, science, R&D, etc.)
|
||||
- Why now? (cubesats, lower launch costs, massive expansion in who launches, etc.)
|
||||
|
||||
|
||||
Present past models
|
||||
-
|
||||
-
|
||||
-
|
||||
|
||||
Present Current model
|
||||
-
|
||||
-
|
||||
-
|
||||
|
||||
|
||||
Present work on solving models.
|
||||
-
|
||||
-
|
||||
-
|
||||
|
||||
|
||||
Future work
|
||||
@ -0,0 +1,86 @@
|
||||
# Intro
|
||||
|
||||
Why do we care
|
||||
|
||||
Possible stories:
|
||||
sep 2019
|
||||
2020 - 3 near misses with ISS (https://www.jpost.com/science/international-space-station-nearly-struck-by-chinese-satellite-debris-684809)
|
||||
(https://orbitaldebris.jsc.nasa.gov/quarterly-news/pdfs/odqnv24i3.pdf)
|
||||
May 2020 - Fregat tank breakup (left debris from 1000 to 6000 miles in altitude)
|
||||
May 2021 - Canadarm2 got hit (https://www.asc-csa.gc.ca/eng/iss/news.asp, part way down)
|
||||
Nov 2021 - Dodge debris from anti-satellite test in 2007 (China) (Jerusalem post above)
|
||||
Nasa releases a
|
||||
Monday Nov 15, 2021 - ISS astronauts have to shelter in their exit craft due to a debris cloud nearing the ISS. (https://www.space.com/space-debris-astronauts-shelter-november-2021) (https://www.youtube.com/watch?v=m-LIh0fdfq8)
|
||||
- from youtube: limits the set of experiments they can work with.
|
||||
|
||||
Maybe explain the whole section.
|
||||
|
||||
Why should we care?
|
||||
- All orbits are subject to some degree of polution.
|
||||
- Common uses: GPS, Military Communications, Commercial internet and TV.
|
||||
- Exploratory uses: R&D of pharmaceuticals, exploration.
|
||||
- Collisions and debris damage are to some degree inevitable.
|
||||
- Kessler Syndrome
|
||||
|
||||
What is different now:
|
||||
- Launch costs (https://aerospace.csis.org/data/space-launch-to-low-earth-orbit-how-much-does-it-cost/) (https://fortune.com/2017/06/17/spacex-launch-cost-competition/)
|
||||
- Cubesats/nanosats (numbers at https://www.nanosats.eu/)
|
||||
- in short, accessability. With lower cost per mass to orbit, more reasons to go. With lower development costs, easier to build many small satellites. This gives us a need for urgency.
|
||||
- Anti-Satellite missles (US, RU, CH all have capability)
|
||||
- Starlink vs Kupiter vs OneWeb (UK gov)
|
||||
|
||||
# Present previous literature
|
||||
|
||||
Rao Rondina
|
||||
- Major results: exploitation of common pool resource
|
||||
Adilov et al
|
||||
- Major results: Exploitation of common pool resource
|
||||
Adilov et al
|
||||
- Major results: Divergence between economic and non-economic kessler syndromes
|
||||
|
||||
# my model
|
||||
- Kessler Syndrome work
|
||||
- Model description
|
||||
|
||||
# Solution methods
|
||||
- Issues
|
||||
- High dimensionality
|
||||
- Many Firms, Governments, and other organizations (How many different operators currently?)
|
||||
- Interacting debris fields between orbits (see the Fregat breakup)
|
||||
- Approximation is required
|
||||
- state space discretization
|
||||
- Not sure which states to examine
|
||||
- fuctional approximation
|
||||
- Maliar
|
||||
- Reinforcemnt learning
|
||||
- Choice of using NN approach
|
||||
- Well supported on hardware
|
||||
- Transfer learning
|
||||
-
|
||||
|
||||
|
||||
|
||||
# Analysis
|
||||
|
||||
# major points
|
||||
- Summaries of results so far.
|
||||
- Request for suggestions on utility functions that might be worth investigating
|
||||
- Discussion of goals
|
||||
- Investigate pigouvian taxation, cleanup bonds, etc.
|
||||
- Standardize interface so it is easy to estimate results.
|
||||
-
|
||||
- Discussion of other work that should happen
|
||||
- Estimation of parameters (simulation, bayesian, calibration, best guesstimates, etc)
|
||||
- Rights of way work (way to get operators to declare a no-move value?)
|
||||
- Satellite Lifetime Management and it's impact on decision making. (nested overlapping generations?)
|
||||
|
||||
Other sources
|
||||
Historical breakup events: https://www.orbitaldebris.jsc.nasa.gov/quarterly-news/pdfs/odqnv23i1.pdf
|
||||
Breakups: https://www.orbitaldebris.jsc.nasa.gov/quarterly-news/pdfs/odqnv25i1.pdf
|
||||
Newsletter on debris breakups: https://www.orbitaldebris.jsc.nasa.gov/quarterly-news/
|
||||
|
||||
|
||||
remaining todo
|
||||
- Review planned model expansion
|
||||
- Reformulate bellman residual minimization to use Q and M
|
||||
- Get code working, and run a basic analysis (proto kessler regions)
|
||||
@ -0,0 +1,34 @@
|
||||
# Things I have learned about PyTorch and Neural networks.
|
||||
|
||||
## Building models
|
||||
All model building in Pytorch is based on the following three steps
|
||||
1. start by creating an object that extends the nn.Module base class
|
||||
1. define layers as class attributes (sequential wrapper for ease of use)
|
||||
2. implement the `.forward()` method
|
||||
|
||||
Each layer is just a predefined 'function'.
|
||||
Really, they are objects that extend the nn.Module base class.
|
||||
Thus each NN can act as a layer in another NN.
|
||||
For example, I reimplemented an upscaling layer in BasicNeuralNet2.
|
||||
(I picked up a lot of this info here.)[https://deeplizard.com/learn/video/k4jY9L8H89U]
|
||||
|
||||
|
||||
Also, neural networks can return more than just a single output as long as the
|
||||
loss function that is used for optimization can consume both of them.
|
||||
Thus I could write two separate neural networks (such as for launch and partials),
|
||||
and then write a third NN that binds the two together.
|
||||
|
||||
|
||||
## Notes on functions
|
||||
ReLU is a linear rectifier, it does not have any training involved.
|
||||
This makes it good for working as a final cleanup of the launch function.
|
||||
This also makes it not so good for the partial derivatives.
|
||||
|
||||
Linear is a good but basic network type.
|
||||
|
||||
Upscaling allows you to create more features.
|
||||
Downscaling reduces the number of features (by throwing data away?).
|
||||
Instead of downscaling, use a linear function to change the dimensions.
|
||||
|
||||
# Remaining Questions
|
||||
- How do you set it up to run over a set of variables, i.e. batches?
|
||||
@ -0,0 +1,208 @@
|
||||
import torch
|
||||
from torch.autograd.functional import jacobian
|
||||
import itertools
|
||||
import math
|
||||
import abc
|
||||
|
||||
|
||||
class EstimandInterface():
|
||||
"""
|
||||
This defines a clean interface for working with the estimand (i.e. thing we are trying to estimate).
|
||||
In general, we are trying to estimate the choice variables and the partial derivatives of the value functions.
|
||||
This
|
||||
|
||||
This class wraps output for the neural network (or other estimand), allowing me to
|
||||
- easily substitute various types of launch functions by having a common interface
|
||||
- this eases testing
|
||||
- check dimensionality etc without dealing with randomness
|
||||
- again, easing testing
|
||||
- reason more cleanly about the component pieces
|
||||
- easing programming
|
||||
- provide a clean interface to find constellation level launch decisions etc.
|
||||
|
||||
It takes inputs of two general categories:
|
||||
- the choice function results
|
||||
- the partial derivatives of the value function
|
||||
"""
|
||||
def __init__(self, partials, choices, deorbits=None):
|
||||
self.partials = partials
|
||||
self.choices = choices
|
||||
|
||||
@property
|
||||
def number_constellations(self):
|
||||
pass #fix this
|
||||
return self.choices.shape[-1]
|
||||
@property
|
||||
def number_states(self):
|
||||
pass #fix this
|
||||
return self.partials.shape[-1] #This depends on the debris trackers technically.
|
||||
|
||||
def choice_single(self, constellation):
|
||||
#returns the launch decision for the constellation of interest
|
||||
|
||||
filter_tensor = torch.zeros(self.number_constellations)
|
||||
filter_tensor[constellation] = 1.0
|
||||
|
||||
return self.choices @ filter_tensor
|
||||
|
||||
def choice_vector(self, constellation):
|
||||
#returns the launch decision for the constellation of interest as a vector
|
||||
|
||||
filter_tensor = torch.zeros(self.number_constellations)
|
||||
filter_tensor[constellation] = 1.0
|
||||
|
||||
return self.choices * filter_tensor
|
||||
|
||||
def partial_vector(self, constellation):
|
||||
#returns the partials of the value function corresponding to the constellation of interest
|
||||
|
||||
filter_tensor = torch.zeros(self.number_states)
|
||||
filter_tensor[constellation] = 1.0
|
||||
|
||||
return self.partials @ filter_tensor
|
||||
|
||||
def partial_matrix(self, constellation):
|
||||
#returns the partials of the value function corresponding to
|
||||
#the constellation of interest as a matrix
|
||||
|
||||
filter_tensor = torch.zeros(self.number_states)
|
||||
filter_tensor[constellation] = 1.0
|
||||
|
||||
return self.partials * filter_tensor
|
||||
|
||||
def __str__(self):
|
||||
#just a human readable descriptor
|
||||
return "Launch Decisions and Partial Derivativs of value function with\n\tlaunches\n\t\t {}\n\tPartials\n\t\t{}".format(self.choices,self.partials)
|
||||
|
||||
|
||||
class ChoiceFunction(torch.nn.Module):
|
||||
"""
|
||||
This is used to estimate the launch function
|
||||
"""
|
||||
def __init__(self
|
||||
,batch_size
|
||||
,number_states
|
||||
,number_choices
|
||||
,number_constellations
|
||||
,layer_size=12
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
#preprocess
|
||||
self.preprocess = torch.nn.Linear(in_features=number_states, out_features=layer_size)
|
||||
|
||||
#upsample
|
||||
self.upsample = lambda x: torch.nn.Upsample(scale_factor=number_constellations)(x).view(batch_size
|
||||
,number_constellations
|
||||
,layer_size)
|
||||
|
||||
self.relu = torch.nn.ReLU() #used for coersion to the state space we care about.
|
||||
|
||||
|
||||
#sequential steps
|
||||
self.sequential = torch.nn.Sequential(
|
||||
torch.nn.Linear(in_features=layer_size, out_features=layer_size)
|
||||
#who knows if a convolution might help here.
|
||||
,torch.nn.Linear(in_features=layer_size, out_features=layer_size)
|
||||
,torch.nn.Linear(in_features=layer_size, out_features=layer_size)
|
||||
)
|
||||
|
||||
#reduce the feature axis to match expected results
|
||||
self.feature_reduction = torch.nn.Linear(in_features=layer_size, out_features=number_choices)
|
||||
|
||||
|
||||
def forward(self, input_values):
|
||||
|
||||
intermediate_values = self.relu(input_values) #states should be positive anyway.
|
||||
|
||||
intermediate_values = self.preprocess(intermediate_values)
|
||||
intermediate_values = self.upsample(intermediate_values)
|
||||
intermediate_values = self.sequential(intermediate_values)
|
||||
intermediate_values = self.feature_reduction(intermediate_values)
|
||||
|
||||
intermediate_values = self.relu(intermediate_values) #launches are always positive, this may need removed for other types of choices.
|
||||
|
||||
return intermediate_values
|
||||
|
||||
class PartialDerivativesOfValueEstimand(torch.nn.Module):
|
||||
"""
|
||||
This is used to estimate the partial derivatives of the value functions
|
||||
"""
|
||||
def __init__(self
|
||||
,batch_size
|
||||
,number_constellations
|
||||
,number_states
|
||||
,layer_size=12):
|
||||
super().__init__()
|
||||
self.batch_size = batch_size #used for upscaling
|
||||
self.number_constellations = number_constellations
|
||||
self.number_states = number_states
|
||||
self.layer_size = layer_size
|
||||
|
||||
|
||||
#preprocess (single linear layer in case there is anything that needs to happen to all states)
|
||||
self.preprocess = torch.nn.Sequential(
|
||||
torch.nn.ReLU() #cleanup as states must be positive
|
||||
,torch.nn.Linear(in_features = self.number_states, out_features=self.number_states)
|
||||
)
|
||||
|
||||
#upsample to get the basic dimensionality correct. From (batch,State) to (batch, constellation, state). Includes a reshape
|
||||
self.upsample = lambda x: torch.nn.Upsample(scale_factor=self.number_constellations)(x).view(self.batch_size
|
||||
,self.number_constellations
|
||||
,self.number_states)
|
||||
|
||||
#sequential steps
|
||||
self.sequential = torch.nn.Sequential(
|
||||
torch.nn.Linear(in_features=number_states, out_features=layer_size)
|
||||
#who knows if a convolution or other layer type might help here.
|
||||
,torch.nn.Linear(in_features=layer_size, out_features=layer_size)
|
||||
,torch.nn.Linear(in_features=layer_size, out_features=layer_size)
|
||||
)
|
||||
|
||||
#reduce the feature axis to match expected results
|
||||
self.feature_reduction = torch.nn.Linear(in_features=layer_size, out_features=number_states)
|
||||
|
||||
def forward(self, states):
|
||||
#Note that the input values are just going to be the state variables
|
||||
#TODO:check that input values match the prepared dimension?
|
||||
|
||||
#preprocess
|
||||
intermediate = self.preprocess(states)
|
||||
|
||||
#upscale the input values
|
||||
intermediate = self.upsample(intermediate)
|
||||
|
||||
#intermediate processing
|
||||
intermediate = self.sequential(intermediate)
|
||||
|
||||
#reduce feature axis to match the expected number of partials
|
||||
intermediate = self.feature_reduction(intermediate)
|
||||
|
||||
|
||||
return intermediate
|
||||
|
||||
|
||||
class EstimandNN(torch.nn.Module):
|
||||
"""
|
||||
This neural network takes the current states as input values and returns both
|
||||
the partial derivatives of the value function and the launch function.
|
||||
"""
|
||||
def __init__(self
|
||||
,batch_size
|
||||
,number_states
|
||||
,number_choices
|
||||
,number_constellations
|
||||
,layer_size=12
|
||||
):
|
||||
super().__init__()
|
||||
|
||||
|
||||
self.partials_estimator = PartialDerivativesOfValueEstimand(batch_size, number_constellations, number_states, layer_size)
|
||||
self.launch_estimator = ChoiceFunction(batch_size, number_states, number_choices, number_constellations, layer_size)
|
||||
|
||||
def forward(self, input_values):
|
||||
pass
|
||||
partials = self.partials_estimator(input_values)
|
||||
launch = self.launch_estimator(input_values)
|
||||
|
||||
return EstimandInterface(partials,launch)
|
||||
@ -0,0 +1,203 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "working-peeing",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import torch"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "decimal-boundary",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"The purpose of this notebook is to allow me to investigate proper shaping of inputs.\n",
|
||||
"\n",
|
||||
"Typically pytorch chooses a tensor specification\n",
|
||||
"$$\n",
|
||||
"(N, .*)\n",
|
||||
"$$\n",
|
||||
"where $N$ is the batch size.\n",
|
||||
"For example a Convolutional NN layer expects\n",
|
||||
"$$\n",
|
||||
" NCHW\n",
|
||||
"$$\n",
|
||||
"for BatchSize,ChannelSize,Height,Width.\n",
|
||||
"On the other hand, Linear expects\n",
|
||||
"$$\n",
|
||||
" N.*H\n",
|
||||
"$$\n",
|
||||
"for BatchSize,any number of other dimensions, in_features\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"id": "eligible-isolation",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class PartialDerivativesEstimand(torch.nn.Module):\n",
|
||||
" def __init__(self,batch_size, number_constellations, number_states,scale_factor=4, layer_size=12):\n",
|
||||
" \"\"\"\n",
|
||||
" \n",
|
||||
" \"\"\"\n",
|
||||
" super().__init__()\n",
|
||||
" self.batch_size = batch_size\n",
|
||||
" self.number_constellations = number_constellations\n",
|
||||
" self.number_states = number_states\n",
|
||||
" self.scale_factor = scale_factor\n",
|
||||
" self.layer_size = layer_size\n",
|
||||
" \n",
|
||||
" \n",
|
||||
" #preprocess (single linear layer in case there is anything that needs to happen to all states)\n",
|
||||
" self.preprocess = torch.nn.Sequential(\n",
|
||||
" torch.nn.ReLU() #cleanup as states must be positive\n",
|
||||
" ,torch.nn.Linear(in_features = self.number_states, out_features=self.number_states)\n",
|
||||
" )\n",
|
||||
" #upscale to get the basic dimensionality correct. From (batch,State) to (batch, constellation, state). Includes a reshape\n",
|
||||
" self.upsample = lambda x: torch.nn.Upsample(scale_factor=self.number_constellations)(x).view(self.batch_size\n",
|
||||
" ,self.number_constellations\n",
|
||||
" ,self.number_states)\n",
|
||||
" \n",
|
||||
" #sequential steps\n",
|
||||
" self.sequential = torch.nn.Sequential(\n",
|
||||
" torch.nn.Linear(in_features=number_states, out_features=layer_size)\n",
|
||||
" #who knows if a convolution might help here.\n",
|
||||
" ,torch.nn.Linear(in_features=layer_size, out_features=layer_size)\n",
|
||||
" ,torch.nn.Linear(in_features=layer_size, out_features=layer_size)\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" #reduce axis to match expectation\n",
|
||||
" self.feature_reduction = torch.nn.Linear(in_features=layer_size, out_features=number_states)\n",
|
||||
" \n",
|
||||
" def forward(self, input_values):\n",
|
||||
" #Note that the input values are just going to be the state variables\n",
|
||||
" #TODO:check that input values match the prepared dimension?\n",
|
||||
" \n",
|
||||
" #preprocess\n",
|
||||
" intermediate = self.preprocess(input_values)\n",
|
||||
" \n",
|
||||
" #upscale the input values\n",
|
||||
" intermediate = self.upsample(intermediate)\n",
|
||||
" \n",
|
||||
" #intermediate processing\n",
|
||||
" intermediate = self.sequential(intermediate)\n",
|
||||
" \n",
|
||||
" #reduce feature axis to match the expected number of partials\n",
|
||||
" intermediate = self.feature_reduction(intermediate)\n",
|
||||
" \n",
|
||||
" \n",
|
||||
" return intermediate"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 19,
|
||||
"id": "literary-desktop",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"batch_size = 2\n",
|
||||
"constellations = 2\n",
|
||||
"number_states = constellations+1\n",
|
||||
"\n",
|
||||
"#initialize the NN\n",
|
||||
"a = PartialDerivativesEstimand(batch_size,constellations,number_states,scale_factor=2)\n",
|
||||
"\n",
|
||||
"#example state\n",
|
||||
"s = torch.rand(size=(batch_size,1,number_states))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 22,
|
||||
"id": "second-graduation",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"tensor([[[0.9283, 0.9414, 0.3426]],\n",
|
||||
"\n",
|
||||
" [[0.1902, 0.0369, 0.4699]]])"
|
||||
]
|
||||
},
|
||||
"execution_count": 22,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"s"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 23,
|
||||
"id": "reliable-alberta",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"tensor([[[-0.1991, 0.1335, 0.2821],\n",
|
||||
" [-0.3549, 0.0213, 0.2322]],\n",
|
||||
"\n",
|
||||
" [[-0.1701, 0.1557, 0.2954],\n",
|
||||
" [-0.3017, 0.0690, 0.2419]]], grad_fn=<AddBackward0>)"
|
||||
]
|
||||
},
|
||||
"execution_count": 23,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"a(s)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "horizontal-judges",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "instant-lindsay",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@ -0,0 +1,793 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "french-experiment",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import torch\n",
|
||||
"from torch.autograd.functional import jacobian\n",
|
||||
"import itertools\n",
|
||||
"import math\n",
|
||||
"import abc\n",
|
||||
"\n",
|
||||
"class EconomicAgent(metaclass=abc.ABCMeta):\n",
|
||||
" @abc.abstractmethod\n",
|
||||
" def period_benefit(self,state,estimand_interface):\n",
|
||||
" pass\n",
|
||||
" @abc.abstractmethod\n",
|
||||
" def _period_benefit(self):\n",
|
||||
" pass\n",
|
||||
" @abc.abstractmethod\n",
|
||||
" def period_benefit_jacobian_wrt_states(self):\n",
|
||||
" pass\n",
|
||||
" @abc.abstractmethod\n",
|
||||
" def _period_benefit_jacobian_wrt_states(self):\n",
|
||||
" pass\n",
|
||||
" @abc.abstractmethod\n",
|
||||
" def period_benefit_jacobian_wrt_launches(self):\n",
|
||||
" pass\n",
|
||||
" @abc.abstractmethod\n",
|
||||
" def _period_benefit_jacobian_wrt_launches(self):\n",
|
||||
" pass\n",
|
||||
"\n",
|
||||
"class LinearProfit(EconomicAgent):\n",
|
||||
" \"\"\"\n",
|
||||
" The simplest type of profit function available.\n",
|
||||
" \"\"\"\n",
|
||||
" def __init__(self, constellation_number, discount_factor, benefit_weight, launch_cost, deorbit_cost=0):\n",
|
||||
" #track which constellation this is.\n",
|
||||
" self.constellation_number = constellation_number\n",
|
||||
"\n",
|
||||
" #parameters describing the agent's situation\n",
|
||||
" self.discount_factor = discount_factor\n",
|
||||
" self.benefit_weights = benefit_weight\n",
|
||||
" self.launch_cost = launch_cost\n",
|
||||
" self.deorbit_cost = deorbit_cost\n",
|
||||
"\n",
|
||||
" def __str__(self):\n",
|
||||
" return \"LinearProfit\\n Benefit weights:\\t{}\\n launch cost:\\t{}\\n Deorbit cost:\\t{}\".format(self.benefit_weights, self.launch_cost, self.deorbit_cost)\n",
|
||||
"\n",
|
||||
" def period_benefit(self,state,estimand_interface):\n",
|
||||
" return self._period_benefit(state.stocks, state.debris, estimand_interface.choices)\n",
|
||||
" \n",
|
||||
" def _period_benefit(self,stocks,debris,choice):\n",
|
||||
" profits = self.benefit_weights @ stocks \\\n",
|
||||
" - self.launch_cost * choice[self.constellation_number] #\\ \n",
|
||||
" #- deorbit_cost @ deorbits[self.constellation_number]\n",
|
||||
" return profits\n",
|
||||
"\n",
|
||||
" def period_benefit_jacobian_wrt_states(self, states, estimand_interface):\n",
|
||||
" return self._period_benefit_jacobian_wrt_states(states.stocks, states.debris, estimand_interface.choices)\n",
|
||||
"\n",
|
||||
" def _period_benefit_jacobian_wrt_states(self, stocks, debris, launches):\n",
|
||||
" jac = jacobian(self._period_benefit, (stocks,debris,launches))\n",
|
||||
" return torch.cat((jac[0], jac[1]))\n",
|
||||
" \n",
|
||||
" def period_benefit_jacobian_wrt_launches(self, states, estimand_interface):\n",
|
||||
" return self._period_benefit_jacobian_wrt_launches(states.stocks, states.debris, estimand_interface.choices)\n",
|
||||
"\n",
|
||||
" def _period_benefit_jacobian_wrt_launches(self,stocks,debris,launches):\n",
|
||||
" jac = jacobian(self._period_benefit, (stocks,debris,launches))\n",
|
||||
" return jac[2]\n",
|
||||
"\n",
|
||||
"class States():\n",
|
||||
" \"\"\"\n",
|
||||
" This is supposed to capture the state variables of the model, to create a common interface \n",
|
||||
" when passing between functions.\n",
|
||||
" \"\"\"\n",
|
||||
" def __init__(self, stocks,debris):\n",
|
||||
" self.stocks = stocks\n",
|
||||
" self.debris = debris\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" def __str__(self):\n",
|
||||
" return \"stocks\\t{} \\ndebris\\t {}\".format(self.stocks,self.debris)\n",
|
||||
"\n",
|
||||
" @property\n",
|
||||
" def number_constellations(self):\n",
|
||||
" return len(self.stocks)\n",
|
||||
" @property\n",
|
||||
" def number_debris_trackers(self):\n",
|
||||
" return len(self.debris)\n",
|
||||
"\n",
|
||||
" \n",
|
||||
"class EstimandInterface():\n",
|
||||
" \"\"\"\n",
|
||||
" This defines a clean interface for working with the estimand (i.e. thing we are trying to estimate).\n",
|
||||
" In general, we are trying to estimate the choice variables and the partial derivatives of the value functions.\n",
|
||||
" This \n",
|
||||
"\n",
|
||||
" This class wraps output for the neural network (or other estimand), allowing me to \n",
|
||||
" - easily substitute various types of launch functions by having a common interface\n",
|
||||
" - this eases testing\n",
|
||||
" - check dimensionality etc without dealing with randomness\n",
|
||||
" - again, easing testing\n",
|
||||
" - reason more cleanly about the component pieces\n",
|
||||
" - easing programming\n",
|
||||
" - provide a clean interface to find constellation level launch decisions etc.\n",
|
||||
"\n",
|
||||
" It takes inputs of two general categories:\n",
|
||||
" - the choice function results\n",
|
||||
" - the partial derivatives of the value function\n",
|
||||
" \"\"\"\n",
|
||||
" def __init__(self, partials, choices, deorbits=None):\n",
|
||||
" self.partials = partials\n",
|
||||
" self.choices = choices\n",
|
||||
" \n",
|
||||
" @property\n",
|
||||
" def number_constellations(self):\n",
|
||||
" pass #fix this\n",
|
||||
" return self.choices.shape[-1]\n",
|
||||
" @property\n",
|
||||
" def number_states(self):\n",
|
||||
" pass #fix this\n",
|
||||
" return self.partials.shape[-1] #This depends on the debris trackers technically.\n",
|
||||
"\n",
|
||||
" def choice_single(self, constellation):\n",
|
||||
" #returns the launch decision for the constellation of interest\n",
|
||||
" \n",
|
||||
" filter_tensor = torch.zeros(self.number_constellations)\n",
|
||||
" filter_tensor[constellation] = 1.0\n",
|
||||
" \n",
|
||||
" return self.choices @ filter_tensor\n",
|
||||
" \n",
|
||||
" def choice_vector(self, constellation):\n",
|
||||
" #returns the launch decision for the constellation of interest as a vector\n",
|
||||
" \n",
|
||||
" filter_tensor = torch.zeros(self.number_constellations)\n",
|
||||
" filter_tensor[constellation] = 1.0\n",
|
||||
" \n",
|
||||
" return self.choices * filter_tensor\n",
|
||||
" \n",
|
||||
" def partial_vector(self, constellation):\n",
|
||||
" #returns the partials of the value function corresponding to the constellation of interest\n",
|
||||
" \n",
|
||||
" filter_tensor = torch.zeros(self.number_states)\n",
|
||||
" filter_tensor[constellation] = 1.0\n",
|
||||
" \n",
|
||||
" return self.partials @ filter_tensor\n",
|
||||
" \n",
|
||||
" def partial_matrix(self, constellation):\n",
|
||||
" #returns the partials of the value function corresponding to \n",
|
||||
" #the constellation of interest as a matrix\n",
|
||||
" \n",
|
||||
" filter_tensor = torch.zeros(self.number_states)\n",
|
||||
" filter_tensor[constellation] = 1.0\n",
|
||||
" \n",
|
||||
" return self.partials * filter_tensor\n",
|
||||
" \n",
|
||||
" def __str__(self):\n",
|
||||
" #just a human readable descriptor\n",
|
||||
" return \"Launch Decisions and Partial Derivativs of value function with\\n\\tlaunches\\n\\t\\t {}\\n\\tPartials\\n\\t\\t{}\".format(self.choices,self.partials)\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"class ChoiceFunction(torch.nn.Module):\n",
|
||||
" \"\"\"\n",
|
||||
" This is used to estimate the launch function\n",
|
||||
" \"\"\"\n",
|
||||
" def __init__(self\n",
|
||||
" ,batch_size\n",
|
||||
" ,number_states\n",
|
||||
" ,number_choices\n",
|
||||
" ,number_constellations\n",
|
||||
" ,layer_size=12\n",
|
||||
" ):\n",
|
||||
" super().__init__()\n",
|
||||
" \n",
|
||||
" #preprocess\n",
|
||||
" self.preprocess = torch.nn.Linear(in_features=number_states, out_features=layer_size)\n",
|
||||
" \n",
|
||||
" #upsample\n",
|
||||
" self.upsample = lambda x: torch.nn.Upsample(scale_factor=number_constellations)(x).view(batch_size\n",
|
||||
" ,number_constellations\n",
|
||||
" ,layer_size)\n",
|
||||
" \n",
|
||||
" self.relu = torch.nn.ReLU() #used for coersion to the state space we care about.\n",
|
||||
" \n",
|
||||
" \n",
|
||||
" #sequential steps\n",
|
||||
" self.sequential = torch.nn.Sequential(\n",
|
||||
" torch.nn.Linear(in_features=layer_size, out_features=layer_size)\n",
|
||||
" #who knows if a convolution might help here.\n",
|
||||
" ,torch.nn.Linear(in_features=layer_size, out_features=layer_size)\n",
|
||||
" ,torch.nn.Linear(in_features=layer_size, out_features=layer_size)\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" #reduce the feature axis to match expected results\n",
|
||||
" self.feature_reduction = torch.nn.Linear(in_features=layer_size, out_features=number_choices)\n",
|
||||
"\n",
|
||||
" \n",
|
||||
" def forward(self, input_values):\n",
|
||||
" \n",
|
||||
" intermediate_values = self.relu(input_values) #states should be positive anyway.\n",
|
||||
" \n",
|
||||
" intermediate_values = self.preprocess(intermediate_values)\n",
|
||||
" intermediate_values = self.upsample(intermediate_values)\n",
|
||||
" intermediate_values = self.sequential(intermediate_values)\n",
|
||||
" intermediate_values = self.feature_reduction(intermediate_values)\n",
|
||||
" \n",
|
||||
" intermediate_values = self.relu(intermediate_values) #launches are always positive, this may need removed for other types of choices.\n",
|
||||
" \n",
|
||||
" return intermediate_values\n",
|
||||
"\n",
|
||||
"class PartialDerivativesOfValueEstimand(torch.nn.Module):\n",
|
||||
" \"\"\"\n",
|
||||
" This is used to estimate the partial derivatives of the value functions\n",
|
||||
" \"\"\"\n",
|
||||
" def __init__(self\n",
|
||||
" ,batch_size\n",
|
||||
" , number_constellations\n",
|
||||
" , number_states\n",
|
||||
" , layer_size=12):\n",
|
||||
" super().__init__()\n",
|
||||
" self.batch_size = batch_size #used for upscaling\n",
|
||||
" self.number_constellations = number_constellations\n",
|
||||
" self.number_states = number_states\n",
|
||||
" self.layer_size = layer_size\n",
|
||||
" \n",
|
||||
" \n",
|
||||
" #preprocess (single linear layer in case there is anything that needs to happen to all states)\n",
|
||||
" self.preprocess = torch.nn.Sequential(\n",
|
||||
" torch.nn.ReLU() #cleanup as states must be positive\n",
|
||||
" ,torch.nn.Linear(in_features = self.number_states, out_features=self.number_states)\n",
|
||||
" )\n",
|
||||
" \n",
|
||||
" #upsample to get the basic dimensionality correct. From (batch,State) to (batch, constellation, state). Includes a reshape\n",
|
||||
" self.upsample = lambda x: torch.nn.Upsample(scale_factor=self.number_constellations)(x).view(self.batch_size\n",
|
||||
" ,self.number_constellations\n",
|
||||
" ,self.number_states)\n",
|
||||
" \n",
|
||||
" #sequential steps\n",
|
||||
" self.sequential = torch.nn.Sequential(\n",
|
||||
" torch.nn.Linear(in_features=number_states, out_features=layer_size)\n",
|
||||
" #who knows if a convolution or other layer type might help here.\n",
|
||||
" ,torch.nn.Linear(in_features=layer_size, out_features=layer_size)\n",
|
||||
" ,torch.nn.Linear(in_features=layer_size, out_features=layer_size)\n",
|
||||
" )\n",
|
||||
"\n",
|
||||
" #reduce the feature axis to match expected results\n",
|
||||
" self.feature_reduction = torch.nn.Linear(in_features=layer_size, out_features=number_states)\n",
|
||||
" \n",
|
||||
" def forward(self, states):\n",
|
||||
" #Note that the input values are just going to be the state variables\n",
|
||||
" #TODO:check that input values match the prepared dimension?\n",
|
||||
" \n",
|
||||
" #preprocess\n",
|
||||
" intermediate = self.preprocess(states)\n",
|
||||
" \n",
|
||||
" #upscale the input values\n",
|
||||
" intermediate = self.upsample(intermediate)\n",
|
||||
" \n",
|
||||
" #intermediate processing\n",
|
||||
" intermediate = self.sequential(intermediate)\n",
|
||||
" \n",
|
||||
" #reduce feature axis to match the expected number of partials\n",
|
||||
" intermediate = self.feature_reduction(intermediate)\n",
|
||||
" \n",
|
||||
" \n",
|
||||
" return intermediate\n",
|
||||
" "
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "suited-nothing",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"class EstimandNN(torch.nn.Module):\n",
|
||||
" \"\"\"\n",
|
||||
" This neural network takes the current states as input values and returns both\n",
|
||||
" the partial derivatives of the value function and the launch function.\n",
|
||||
" \"\"\"\n",
|
||||
" def __init__(self\n",
|
||||
" ,batch_size\n",
|
||||
" ,number_states\n",
|
||||
" ,number_choices\n",
|
||||
" ,number_constellations\n",
|
||||
" ,layer_size=12\n",
|
||||
" ):\n",
|
||||
" super().__init__()\n",
|
||||
" \n",
|
||||
"\n",
|
||||
" self.partials_estimator = PartialDerivativesOfValueEstimand(batch_size, number_constellations, number_states, layer_size)\n",
|
||||
" self.launch_estimator = ChoiceFunction(batch_size, number_states, number_choices, number_constellations, layer_size)\n",
|
||||
" \n",
|
||||
" def forward(self, input_values):\n",
|
||||
" pass\n",
|
||||
" partials = self.partials_estimator(input_values)\n",
|
||||
" launch = self.launch_estimator(input_values)\n",
|
||||
" \n",
|
||||
" return EstimandInterface(partials,launch)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "recognized-story",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"# Testing\n",
|
||||
"\n",
|
||||
"Test if states can handle the dimensionality needed."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "smart-association",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"batch_size,states,choices = 5,3,1\n",
|
||||
"constellations = states -1 #determined by debris tracking\n",
|
||||
"max_start_state = 100\n",
|
||||
"\n",
|
||||
"stocks_and_debris = torch.randint(max_start_state,(batch_size,1,states),dtype=torch.float32)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 84,
|
||||
"id": "unsigned-hungary",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"torch.Size([5, 1, 3])"
|
||||
]
|
||||
},
|
||||
"execution_count": 84,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"stocks_and_debris.size()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "regulated-conversation",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Launch Decisions and Partial Derivativs of value function with\n",
|
||||
"\tlaunches\n",
|
||||
"\t\t tensor([[[0.0000],\n",
|
||||
" [0.0000]],\n",
|
||||
"\n",
|
||||
" [[2.0907],\n",
|
||||
" [0.1053]],\n",
|
||||
"\n",
|
||||
" [[2.9730],\n",
|
||||
" [2.2000]],\n",
|
||||
"\n",
|
||||
" [[2.3975],\n",
|
||||
" [1.2877]],\n",
|
||||
"\n",
|
||||
" [[4.2107],\n",
|
||||
" [2.0752]]], grad_fn=<ReluBackward0>)\n",
|
||||
"\tPartials\n",
|
||||
"\t\ttensor([[[ 0.1939, 0.3954, 0.0730],\n",
|
||||
" [-0.9428, 0.6145, -0.9247]],\n",
|
||||
"\n",
|
||||
" [[ 1.1686, 3.0170, 0.3393],\n",
|
||||
" [-7.1474, 2.3495, -7.0566]],\n",
|
||||
"\n",
|
||||
" [[-2.0849, 3.0883, -3.3791],\n",
|
||||
" [-0.6664, 0.0361, -2.2530]],\n",
|
||||
"\n",
|
||||
" [[-0.7117, 2.5474, -1.6458],\n",
|
||||
" [-2.1937, 0.6897, -3.0382]],\n",
|
||||
"\n",
|
||||
" [[-1.0262, 4.5973, -2.6606],\n",
|
||||
" [-5.4307, 1.4510, -6.6972]]], grad_fn=<AddBackward0>)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(a := enn.forward(stocks_and_debris))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "rental-detection",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def lossb(a):\n",
|
||||
" #test loss function\n",
|
||||
" return (a**2).sum()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 30,
|
||||
"id": "mechanical-joshua",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"ch = ChoiceFunction(batch_size\n",
|
||||
" ,states\n",
|
||||
" ,choices\n",
|
||||
" ,constellations\n",
|
||||
" ,12)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 31,
|
||||
"id": "charged-request",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"tensor(46.8100, grad_fn=<SumBackward0>)\n",
|
||||
"tensor(82442.4219, grad_fn=<SumBackward0>)\n",
|
||||
"tensor(0., grad_fn=<SumBackward0>)\n",
|
||||
"tensor(0., grad_fn=<SumBackward0>)\n",
|
||||
"tensor(0., grad_fn=<SumBackward0>)\n",
|
||||
"tensor(0., grad_fn=<SumBackward0>)\n",
|
||||
"tensor(0., grad_fn=<SumBackward0>)\n",
|
||||
"tensor(0., grad_fn=<SumBackward0>)\n",
|
||||
"tensor(0., grad_fn=<SumBackward0>)\n",
|
||||
"tensor(0., grad_fn=<SumBackward0>)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"tensor([[[0.],\n",
|
||||
" [0.]],\n",
|
||||
"\n",
|
||||
" [[0.],\n",
|
||||
" [0.]],\n",
|
||||
"\n",
|
||||
" [[0.],\n",
|
||||
" [0.]],\n",
|
||||
"\n",
|
||||
" [[0.],\n",
|
||||
" [0.]],\n",
|
||||
"\n",
|
||||
" [[0.],\n",
|
||||
" [0.]]], grad_fn=<ReluBackward0>)"
|
||||
]
|
||||
},
|
||||
"execution_count": 31,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"optimizer = torch.optim.SGD(ch.parameters(),lr=0.01)\n",
|
||||
"\n",
|
||||
"for i in range(10):\n",
|
||||
" #training loop\n",
|
||||
" optimizer.zero_grad()\n",
|
||||
"\n",
|
||||
" output = ch.forward(stocks_and_debris)\n",
|
||||
"\n",
|
||||
" l = lossb(output)\n",
|
||||
"\n",
|
||||
" l.backward()\n",
|
||||
"\n",
|
||||
" optimizer.step()\n",
|
||||
"\n",
|
||||
" print(l)\n",
|
||||
" \n",
|
||||
"\n",
|
||||
"ch.forward(stocks_and_debris)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 45,
|
||||
"id": "perceived-permit",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def lossc(a):\n",
|
||||
" #test loss function\n",
|
||||
" return (a**2).sum()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 53,
|
||||
"id": "atomic-variance",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"pd = PartialDerivativesOfValueEstimand(\n",
|
||||
" batch_size\n",
|
||||
" ,constellations\n",
|
||||
" ,states\n",
|
||||
" ,12)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 74,
|
||||
"id": "biological-badge",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"tensor(1.9948e-06, grad_fn=<SumBackward0>)\n",
|
||||
"tensor(1.7427e-05, grad_fn=<SumBackward0>)\n",
|
||||
"tensor(5.7993e-06, grad_fn=<SumBackward0>)\n",
|
||||
"tensor(2.9985e-06, grad_fn=<SumBackward0>)\n",
|
||||
"tensor(6.5281e-06, grad_fn=<SumBackward0>)\n",
|
||||
"tensor(7.8818e-06, grad_fn=<SumBackward0>)\n",
|
||||
"tensor(4.4327e-06, grad_fn=<SumBackward0>)\n",
|
||||
"tensor(1.1240e-06, grad_fn=<SumBackward0>)\n",
|
||||
"tensor(1.2478e-06, grad_fn=<SumBackward0>)\n",
|
||||
"tensor(3.5818e-06, grad_fn=<SumBackward0>)\n",
|
||||
"tensor(4.3732e-06, grad_fn=<SumBackward0>)\n",
|
||||
"tensor(2.7699e-06, grad_fn=<SumBackward0>)\n",
|
||||
"tensor(8.9659e-07, grad_fn=<SumBackward0>)\n",
|
||||
"tensor(5.7541e-07, grad_fn=<SumBackward0>)\n",
|
||||
"tensor(1.5010e-06, grad_fn=<SumBackward0>)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"tensor([[[ 0.0002, -0.0002, -0.0003],\n",
|
||||
" [ 0.0001, -0.0003, -0.0002]],\n",
|
||||
"\n",
|
||||
" [[ 0.0002, -0.0003, -0.0003],\n",
|
||||
" [ 0.0003, -0.0004, -0.0002]],\n",
|
||||
"\n",
|
||||
" [[ 0.0002, -0.0003, -0.0003],\n",
|
||||
" [ 0.0002, -0.0003, -0.0003]],\n",
|
||||
"\n",
|
||||
" [[ 0.0002, -0.0002, -0.0004],\n",
|
||||
" [ 0.0003, -0.0003, -0.0003]],\n",
|
||||
"\n",
|
||||
" [[ 0.0003, -0.0003, -0.0002],\n",
|
||||
" [ 0.0003, -0.0003, -0.0002]]], grad_fn=<AddBackward0>)"
|
||||
]
|
||||
},
|
||||
"execution_count": 74,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"optimizer = torch.optim.Adam(pd.parameters(),lr=0.0001)\n",
|
||||
"\n",
|
||||
"for i in range(15):\n",
|
||||
" #training loop\n",
|
||||
" optimizer.zero_grad()\n",
|
||||
"\n",
|
||||
" output = pd.forward(stocks_and_debris)\n",
|
||||
"\n",
|
||||
" l = lossc(output)\n",
|
||||
"\n",
|
||||
" l.backward()\n",
|
||||
"\n",
|
||||
" optimizer.step()\n",
|
||||
"\n",
|
||||
" print(l)\n",
|
||||
" \n",
|
||||
"\n",
|
||||
"pd.forward(stocks_and_debris)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 78,
|
||||
"id": "compliant-johnson",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"def lossa(a):\n",
|
||||
" #test loss function\n",
|
||||
" return (a.choices**2).sum() + (a.partials**2).sum()"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 81,
|
||||
"id": "alive-potato",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"enn = EstimandNN(batch_size\n",
|
||||
" ,states\n",
|
||||
" ,choices\n",
|
||||
" ,constellations\n",
|
||||
" ,12)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 83,
|
||||
"id": "changed-instruction",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"0 tensor(112.1970, grad_fn=<AddBackward0>)\n",
|
||||
"10 tensor(79.8152, grad_fn=<AddBackward0>)\n",
|
||||
"20 tensor(55.6422, grad_fn=<AddBackward0>)\n",
|
||||
"30 tensor(38.5636, grad_fn=<AddBackward0>)\n",
|
||||
"40 tensor(26.9156, grad_fn=<AddBackward0>)\n",
|
||||
"50 tensor(18.9986, grad_fn=<AddBackward0>)\n",
|
||||
"60 tensor(13.6606, grad_fn=<AddBackward0>)\n",
|
||||
"70 tensor(10.1881, grad_fn=<AddBackward0>)\n",
|
||||
"80 tensor(8.0395, grad_fn=<AddBackward0>)\n",
|
||||
"90 tensor(6.7618, grad_fn=<AddBackward0>)\n",
|
||||
"100 tensor(6.0101, grad_fn=<AddBackward0>)\n",
|
||||
"110 tensor(5.5517, grad_fn=<AddBackward0>)\n",
|
||||
"120 tensor(5.2434, grad_fn=<AddBackward0>)\n",
|
||||
"130 tensor(5.0054, grad_fn=<AddBackward0>)\n",
|
||||
"140 tensor(4.7988, grad_fn=<AddBackward0>)\n",
|
||||
"150 tensor(4.6069, grad_fn=<AddBackward0>)\n",
|
||||
"160 tensor(4.4235, grad_fn=<AddBackward0>)\n",
|
||||
"170 tensor(4.2468, grad_fn=<AddBackward0>)\n",
|
||||
"180 tensor(4.0763, grad_fn=<AddBackward0>)\n",
|
||||
"190 tensor(3.9117, grad_fn=<AddBackward0>)\n",
|
||||
"200 tensor(3.7532, grad_fn=<AddBackward0>)\n",
|
||||
"210 tensor(3.6005, grad_fn=<AddBackward0>)\n",
|
||||
"220 tensor(3.4535, grad_fn=<AddBackward0>)\n",
|
||||
"230 tensor(3.3121, grad_fn=<AddBackward0>)\n",
|
||||
"240 tensor(3.1761, grad_fn=<AddBackward0>)\n",
|
||||
"250 tensor(3.0454, grad_fn=<AddBackward0>)\n",
|
||||
"260 tensor(2.9198, grad_fn=<AddBackward0>)\n",
|
||||
"270 tensor(2.7991, grad_fn=<AddBackward0>)\n",
|
||||
"280 tensor(2.6832, grad_fn=<AddBackward0>)\n",
|
||||
"290 tensor(2.5720, grad_fn=<AddBackward0>)\n",
|
||||
"300 tensor(2.4653, grad_fn=<AddBackward0>)\n",
|
||||
"310 tensor(2.3629, grad_fn=<AddBackward0>)\n",
|
||||
"320 tensor(2.2646, grad_fn=<AddBackward0>)\n",
|
||||
"330 tensor(2.1704, grad_fn=<AddBackward0>)\n",
|
||||
"340 tensor(2.0800, grad_fn=<AddBackward0>)\n",
|
||||
"350 tensor(1.9933, grad_fn=<AddBackward0>)\n",
|
||||
"360 tensor(1.9103, grad_fn=<AddBackward0>)\n",
|
||||
"370 tensor(1.8306, grad_fn=<AddBackward0>)\n",
|
||||
"380 tensor(1.7543, grad_fn=<AddBackward0>)\n",
|
||||
"390 tensor(1.6812, grad_fn=<AddBackward0>)\n",
|
||||
"400 tensor(1.6111, grad_fn=<AddBackward0>)\n",
|
||||
"410 tensor(1.5440, grad_fn=<AddBackward0>)\n",
|
||||
"420 tensor(1.4797, grad_fn=<AddBackward0>)\n",
|
||||
"430 tensor(1.4180, grad_fn=<AddBackward0>)\n",
|
||||
"440 tensor(1.3590, grad_fn=<AddBackward0>)\n",
|
||||
"450 tensor(1.3025, grad_fn=<AddBackward0>)\n",
|
||||
"460 tensor(1.2484, grad_fn=<AddBackward0>)\n",
|
||||
"470 tensor(1.1965, grad_fn=<AddBackward0>)\n",
|
||||
"480 tensor(1.1469, grad_fn=<AddBackward0>)\n",
|
||||
"490 tensor(1.0994, grad_fn=<AddBackward0>)\n",
|
||||
"500 tensor(1.0540, grad_fn=<AddBackward0>)\n",
|
||||
"510 tensor(1.0104, grad_fn=<AddBackward0>)\n",
|
||||
"520 tensor(0.9688, grad_fn=<AddBackward0>)\n",
|
||||
"530 tensor(0.9290, grad_fn=<AddBackward0>)\n",
|
||||
"540 tensor(0.8908, grad_fn=<AddBackward0>)\n",
|
||||
"550 tensor(0.8544, grad_fn=<AddBackward0>)\n",
|
||||
"560 tensor(0.8195, grad_fn=<AddBackward0>)\n",
|
||||
"570 tensor(0.7861, grad_fn=<AddBackward0>)\n",
|
||||
"580 tensor(0.7542, grad_fn=<AddBackward0>)\n",
|
||||
"590 tensor(0.7237, grad_fn=<AddBackward0>)\n",
|
||||
"600 tensor(0.6945, grad_fn=<AddBackward0>)\n",
|
||||
"610 tensor(0.6667, grad_fn=<AddBackward0>)\n",
|
||||
"620 tensor(0.6400, grad_fn=<AddBackward0>)\n",
|
||||
"630 tensor(0.6146, grad_fn=<AddBackward0>)\n",
|
||||
"640 tensor(0.5903, grad_fn=<AddBackward0>)\n",
|
||||
"650 tensor(0.5671, grad_fn=<AddBackward0>)\n",
|
||||
"660 tensor(0.5449, grad_fn=<AddBackward0>)\n",
|
||||
"670 tensor(0.5237, grad_fn=<AddBackward0>)\n",
|
||||
"680 tensor(0.5035, grad_fn=<AddBackward0>)\n",
|
||||
"690 tensor(0.4842, grad_fn=<AddBackward0>)\n",
|
||||
"700 tensor(0.4658, grad_fn=<AddBackward0>)\n",
|
||||
"710 tensor(0.4482, grad_fn=<AddBackward0>)\n",
|
||||
"720 tensor(0.4315, grad_fn=<AddBackward0>)\n",
|
||||
"730 tensor(0.4155, grad_fn=<AddBackward0>)\n",
|
||||
"740 tensor(0.4002, grad_fn=<AddBackward0>)\n",
|
||||
"750 tensor(0.3857, grad_fn=<AddBackward0>)\n",
|
||||
"760 tensor(0.3718, grad_fn=<AddBackward0>)\n",
|
||||
"770 tensor(0.3586, grad_fn=<AddBackward0>)\n",
|
||||
"780 tensor(0.3460, grad_fn=<AddBackward0>)\n",
|
||||
"790 tensor(0.3340, grad_fn=<AddBackward0>)\n",
|
||||
"800 tensor(0.3226, grad_fn=<AddBackward0>)\n",
|
||||
"810 tensor(0.3117, grad_fn=<AddBackward0>)\n",
|
||||
"820 tensor(0.3013, grad_fn=<AddBackward0>)\n",
|
||||
"830 tensor(0.2914, grad_fn=<AddBackward0>)\n",
|
||||
"840 tensor(0.2820, grad_fn=<AddBackward0>)\n",
|
||||
"850 tensor(0.2730, grad_fn=<AddBackward0>)\n",
|
||||
"860 tensor(0.2645, grad_fn=<AddBackward0>)\n",
|
||||
"870 tensor(0.2564, grad_fn=<AddBackward0>)\n",
|
||||
"880 tensor(0.2486, grad_fn=<AddBackward0>)\n",
|
||||
"890 tensor(0.2413, grad_fn=<AddBackward0>)\n",
|
||||
"900 tensor(0.2342, grad_fn=<AddBackward0>)\n",
|
||||
"910 tensor(0.2276, grad_fn=<AddBackward0>)\n",
|
||||
"920 tensor(0.2212, grad_fn=<AddBackward0>)\n",
|
||||
"930 tensor(0.2151, grad_fn=<AddBackward0>)\n",
|
||||
"940 tensor(0.2094, grad_fn=<AddBackward0>)\n",
|
||||
"950 tensor(0.2039, grad_fn=<AddBackward0>)\n",
|
||||
"960 tensor(0.1986, grad_fn=<AddBackward0>)\n",
|
||||
"970 tensor(0.1936, grad_fn=<AddBackward0>)\n",
|
||||
"980 tensor(0.1889, grad_fn=<AddBackward0>)\n",
|
||||
"990 tensor(0.1844, grad_fn=<AddBackward0>)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"<__main__.EstimandInterface at 0x7f85609fce20>"
|
||||
]
|
||||
},
|
||||
"execution_count": 83,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"optimizer = torch.optim.Adam(enn.parameters(),lr=0.0001) #note the use of enn in the optimizer\n",
|
||||
"\n",
|
||||
"for i in range(1000):\n",
|
||||
" #training loop\n",
|
||||
" optimizer.zero_grad()\n",
|
||||
"\n",
|
||||
" output = enn.forward(stocks_and_debris)\n",
|
||||
"\n",
|
||||
" l = lossa(output)\n",
|
||||
"\n",
|
||||
" l.backward()\n",
|
||||
"\n",
|
||||
" optimizer.step()\n",
|
||||
"\n",
|
||||
" if i%10==0:\n",
|
||||
" print(i, l)\n",
|
||||
" \n",
|
||||
"\n",
|
||||
"enn.forward(stocks_and_debris)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "proved-amsterdam",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@ -0,0 +1,239 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "standing-catch",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import torch\n",
|
||||
"from torch.autograd.functional import jacobian\n",
|
||||
"import itertools\n",
|
||||
"import math"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "going-accident",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import combined as c"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "severe-employment",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"So, this contains a bunch of initial tests of my abstractions. I eventually need to change these to assertions and package them."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"id": "ranking-family",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#Instantiate some objects\n",
|
||||
"pm = c.PhysicalModel(1.0,1e-6,0.01,2.0,1e-8)\n",
|
||||
"s = c.States(torch.tensor([1.0,2,3]), torch.tensor([0.0]))\n",
|
||||
"lp = c.LinearProfit(0,0.95,torch.tensor([1.0,0,0]), 5)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "premium-brisbane",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"(tensor([-7.5676e+05, 2.8893e+05, 4.6783e+05, 1.5236e+00]),\n",
|
||||
" <combined.States at 0x7f31f0146c10>)"
|
||||
]
|
||||
},
|
||||
"execution_count": 5,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"c.single_transition(pm,lp,s,est_int)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "raised-worthy",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"tensor([-718920.5625, 274490.1562, 444444.6250])"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"minimand, iterated_partials, iterated_state = c.optimality(pm,lp,s,est_int)\n",
|
||||
"minimand"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "horizontal-insight",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"tensor([-2285563., -2285557., -2285557.])"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"est_int2 = c.EstimandInterface(iterated_partials,torch.ones(3))\n",
|
||||
"\n",
|
||||
"minimand2, iterated_partials2, iterated_state2 = c.optimality(pm,lp,iterated_state,est_int2)\n",
|
||||
"minimand2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "automatic-builder",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"tensor([-2405858.5000, -2405852.5000, -2405852.5000])"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"est_int3 = c.EstimandInterface(iterated_partials2,torch.ones(3))\n",
|
||||
"\n",
|
||||
"minimand3, iterated_partials3, iterated_state3 = c.optimality(pm,lp,iterated_state2,est_int3)\n",
|
||||
"minimand3"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "changing-mainland",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"So, this succesfylly let me link the two. I'm going to move to another notebook, clean up, and start integrating the system"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "assumed-midwest",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"model = DoubleNetwork(input_size = 5, output_size=5, layers_size=15)\n",
|
||||
"\n",
|
||||
"data_in = torch.tensor([1.5,2,3,4,5])\n",
|
||||
"\n",
|
||||
"data_in\n",
|
||||
"\n",
|
||||
"target = torch.zeros(5)\n",
|
||||
"\n",
|
||||
"def loss_fn2(output,target):\n",
|
||||
" return sum((output[1] +output[0] - target)**2)\n",
|
||||
" #could add a simplicity assumption i.e. l1 on parameters.\n",
|
||||
"\n",
|
||||
"#Prep Optimizer\n",
|
||||
"optimizer = torch.optim.SGD(model.parameters(),lr=0.01)\n",
|
||||
"\n",
|
||||
"for i in range(20):\n",
|
||||
" #training loop\n",
|
||||
" optimizer.zero_grad()\n",
|
||||
"\n",
|
||||
" output = model.forward(data_in)\n",
|
||||
" output\n",
|
||||
"\n",
|
||||
" l = loss_fn2(output, target)\n",
|
||||
"\n",
|
||||
" l.backward()\n",
|
||||
"\n",
|
||||
" optimizer.step()\n",
|
||||
"\n",
|
||||
" print(\"\\n\",l)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "surprising-fundamentals",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "parliamentary-delta",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "emotional-castle",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "substantial-exhibit",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@ -0,0 +1,383 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "pleasant-equation",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import torch\n",
|
||||
"from torch.autograd.functional import jacobian\n",
|
||||
"import itertools\n",
|
||||
"import math"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "moved-christian",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"import combined as c"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "pressed-slope",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"So, this contains a bunch of initial tests of my abstractions. I eventually need to change these to assertions and package them."
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 3,
|
||||
"id": "capable-equality",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#Instantiate some objects\n",
|
||||
"pm = c.PhysicalModel(1.0,1e-6,0.01,2.0,1e-8)\n",
|
||||
"s = c.States(torch.tensor([1.0,2,3]), torch.tensor([0.0]))\n",
|
||||
"lp = c.LinearProfit(0,0.95,torch.tensor([1.0,0,0]), 5)\n",
|
||||
"est_int = c.EstimandInterface(torch.tensor([[1.0,2,3,2]\n",
|
||||
" ,[4,5,6,2]\n",
|
||||
" ,[7,8,9,2]\n",
|
||||
" ,[1,3,5,7]]\n",
|
||||
" ),torch.ones(3))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 4,
|
||||
"id": "written-experience",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"stocks\ttensor([1., 2., 3.]) \n",
|
||||
"debris\t tensor([0.])\n",
|
||||
"3\n",
|
||||
"1\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"#test State object \n",
|
||||
"print(s)\n",
|
||||
"print(s.number_constellations)\n",
|
||||
"print(s.number_debris_trackers)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "twelve-arthur",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"Launch Decisions and Partial Derivativs of value function with\n",
|
||||
"\tlaunches\n",
|
||||
"\t\t tensor([1., 1., 1.])\n",
|
||||
"\tPartials\n",
|
||||
"\t\ttensor([[1., 2., 3., 2.],\n",
|
||||
" [4., 5., 6., 2.],\n",
|
||||
" [7., 8., 9., 2.],\n",
|
||||
" [1., 3., 5., 7.]])\n",
|
||||
"tensor([1., 1., 1.]) tensor([[1., 2., 3., 2.],\n",
|
||||
" [4., 5., 6., 2.],\n",
|
||||
" [7., 8., 9., 2.],\n",
|
||||
" [1., 3., 5., 7.]])\n",
|
||||
"tensor(1.)\n",
|
||||
"tensor([0., 1., 0.])\n",
|
||||
"tensor([2., 5., 8., 3.])\n",
|
||||
"tensor([[0., 2., 0., 0.],\n",
|
||||
" [0., 5., 0., 0.],\n",
|
||||
" [0., 8., 0., 0.],\n",
|
||||
" [0., 3., 0., 0.]])\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"#Test estimand interface\n",
|
||||
"print(est_int)\n",
|
||||
"print(est_int.launches,est_int.partials)\n",
|
||||
"\n",
|
||||
"print(est_int.launch_single(1))\n",
|
||||
"print(est_int.launch_vector(1))\n",
|
||||
"print(est_int.partial_vector(1)) \n",
|
||||
"print(est_int.partial_matrix(1)) #TODO: double check orientation"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "impressive-tribe",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"\n",
|
||||
"1.0\n",
|
||||
"1e-06\n",
|
||||
"0.01\n",
|
||||
"2.0\n",
|
||||
"1e-08\n",
|
||||
"tensor([1.0133e-06, 2.0266e-06, 2.9802e-06])\n",
|
||||
"tensor([1., 2., 3.]) tensor([0.])\n",
|
||||
"tensor([1.0000, 1.0000, 1.0000]) tensor([12.0000])\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"#Test physical model methods\n",
|
||||
"print(pm)\n",
|
||||
"print(pm.survival(s))\n",
|
||||
"s2 = pm.transition(s,est_int)\n",
|
||||
"print(s.stocks,s.debris)\n",
|
||||
"print(s2.stocks,s2.debris)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "stretch-reward",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"LinearProfit\n",
|
||||
" Benefit weights:\ttensor([1., 0., 0.])\n",
|
||||
" launch cost:\t5\n",
|
||||
" Deorbit cost:\t0\n",
|
||||
"tensor(-4.)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"#test linear profit object\n",
|
||||
"print(lp)\n",
|
||||
"print(lp.period_benefit(s,est_int))"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 8,
|
||||
"id": "advance-folder",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"tensor([1., 0., 0., 0.])"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"lp._period_benefit_jacobian_wrt_states( s.stocks, s.debris, est_int.launches)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 9,
|
||||
"id": "posted-subscriber",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"tensor([-5., 0., 0.])"
|
||||
]
|
||||
},
|
||||
"execution_count": 9,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"lp._period_benefit_jacobian_wrt_launches( s.stocks, s.debris, est_int.launches)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 10,
|
||||
"id": "divine-agenda",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"tensor([1., 0., 0., 0.])"
|
||||
]
|
||||
},
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"lp.period_benefit_jacobian_wrt_states( s, est_int)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 11,
|
||||
"id": "surgical-direction",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"tensor([[1., 2., 3., 2.],\n",
|
||||
" [4., 5., 6., 2.],\n",
|
||||
" [7., 8., 9., 2.],\n",
|
||||
" [1., 3., 5., 7.]])"
|
||||
]
|
||||
},
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"est_int.partials"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"id": "mounted-roots",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"(tensor([-7.5676e+05, 2.8893e+05, 4.6783e+05, 1.5236e+00]),\n",
|
||||
" <combined.States at 0x7f8c3c9c54f0>)"
|
||||
]
|
||||
},
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"c.single_transition(pm,lp,s,est_int)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 24,
|
||||
"id": "pediatric-iceland",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"tensor([-718920.5625, 274490.1562, 444444.6250])"
|
||||
]
|
||||
},
|
||||
"execution_count": 24,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"minimand, iterated_partials, iterated_state = c.optimality(pm,lp,s,est_int)\n",
|
||||
"minimand"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"id": "isolated-cleveland",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"tensor([-2285563., -2285557., -2285557.])"
|
||||
]
|
||||
},
|
||||
"execution_count": 25,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"est_int2 = c.EstimandInterface(iterated_partials,torch.ones(3))\n",
|
||||
"\n",
|
||||
"minimand2, iterated_partials2, iterated_state2 = c.optimality(pm,lp,iterated_state,est_int2)\n",
|
||||
"minimand2"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 27,
|
||||
"id": "relevant-romance",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"tensor([-2405858.5000, -2405852.5000, -2405852.5000])"
|
||||
]
|
||||
},
|
||||
"execution_count": 27,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"est_int3 = c.EstimandInterface(iterated_partials2,torch.ones(3))\n",
|
||||
"\n",
|
||||
"minimand3, iterated_partials3, iterated_state3 = c.optimality(pm,lp,iterated_state2,est_int3)\n",
|
||||
"minimand3"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "markdown",
|
||||
"id": "israeli-oracle",
|
||||
"metadata": {},
|
||||
"source": [
|
||||
"So, this succesfylly let me link the two. I'm going to move to another notebook, clean up, and start integrating the system"
|
||||
]
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Python 3",
|
||||
"language": "python",
|
||||
"name": "python3"
|
||||
},
|
||||
"language_info": {
|
||||
"codemirror_mode": {
|
||||
"name": "ipython",
|
||||
"version": 3
|
||||
},
|
||||
"file_extension": ".py",
|
||||
"mimetype": "text/x-python",
|
||||
"name": "python",
|
||||
"nbconvert_exporter": "python",
|
||||
"pygments_lexer": "ipython3",
|
||||
"version": "3.8.8"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
@ -0,0 +1,117 @@
|
||||
\documentclass{article}
|
||||
|
||||
%Setup Subfiles
|
||||
\usepackage{subfiles}
|
||||
|
||||
|
||||
|
||||
%Include preambles
|
||||
\input{assets/preambles/MathPreamble}
|
||||
%\input{assets/preambles/TikzitPreamble}
|
||||
\input{assets/preambles/BibPreamble}
|
||||
\input{assets/preambles/GeneralPreamble}
|
||||
|
||||
\title{Summary of my work on Satellite Constellations}
|
||||
\author{William King}
|
||||
|
||||
\begin{document}
|
||||
\maketitle
|
||||
|
||||
\section{Introduction}
|
||||
\subfile{sections/00_Introduction} %Currently from past semesters.
|
||||
%roughly done 2021-07-15
|
||||
|
||||
%Describe sections
|
||||
The paper is organized as follows.
|
||||
Section \ref{SEC:Models}, describes the laws of motion
|
||||
governing satellites and debris(\ref{SEC:Laws})%,
|
||||
%places limits on various measures of risk (\ref{SEC:Survival}),
|
||||
and reviews various definitions of kessler syndrome in
|
||||
the context of dynamic modeling, their advantages, and their disadvanteges (\ref{SEC:Kessler}).
|
||||
It then describes the dynamic problem faced by constellation operators
|
||||
(\ref{SEC:Operator}) and social planners (\ref{SEC:Planner}).
|
||||
Section \ref{SEC:Computation} describes the computational approach and the
|
||||
results are reported in \cref{SEC:Results}.
|
||||
Section \ref{SEC:Conclusion} concludes with a discussion of limitations, concerns,
|
||||
and remaining policy questions.
|
||||
|
||||
%\section{Modeling the Environment}\label{SEC:Environment}
|
||||
\section{Model}\label{SEC:Models}
|
||||
\subsection{Laws of motion}\label{SEC:Laws}
|
||||
\subfile{sections/01_LawsOfMotion} %Roughly done 2021-07-15
|
||||
|
||||
%\subsection{Marginal survival rates}\label{SEC:Survival}
|
||||
%\subfile{sections/03_SurvivalAnalysis} %roughly done 2021-07-14
|
||||
% Thoughts on removal: This doesn't add much to the actual questions.
|
||||
% It is interesting, but in a paper is just too much.
|
||||
% I'll keep it here to add it back easily.
|
||||
|
||||
\subsection{Kessler Syndrome}\label{SEC:Kessler}
|
||||
% Kessler syndrome follows laws of motion because it is the main
|
||||
% threat of orbital pollution and needs to be included.
|
||||
% Also, there is not really a better place to place it.
|
||||
\subfile{sections/02_KesslerSyndrome} %roughly done before 2021-07-14
|
||||
\subfile{sections/06_KesslerRegion} %roughly done before 2021-07-14
|
||||
|
||||
\subsection{Constellation Operator's Program}\label{SEC:Operator}
|
||||
\subfile{sections/04_ConstellationOperator} %Reasonably done.
|
||||
|
||||
\subsection{Social Planner's Program}\label{SEC:Planner}
|
||||
\subfile{sections/05_SocialPlanner} %Reasonably done?
|
||||
|
||||
\section{Computation}\label{SEC:Computation}
|
||||
\subfile{sections/07_ComputationalApproach} %needs some clarifications.
|
||||
|
||||
\section{Results}\label{SEC:Results}
|
||||
\subfile{sections/09_Results} %TODO
|
||||
|
||||
\section{Conclusion}\label{SEC:Conclusion}
|
||||
\subfile{sections/08_Conclusion} %TODO
|
||||
|
||||
|
||||
\newpage
|
||||
\section{References}
|
||||
\printbibliography
|
||||
\newpage
|
||||
\section{Appedicies}
|
||||
\subsection{Mathematical Notation}
|
||||
Needs completed.
|
||||
%\subsection{Deriving Marginal Survival Rates}\label{APX:Derivations:SurvivalRates}
|
||||
%\subfile{sections/apx_01_MarginalSurvivalRates}
|
||||
|
||||
\subsection{Deriving Euler Equations}\label{APX:Derivations:EulerEquations}
|
||||
\subfile{sections/apx_02_GeneralizedEuEqSteps}
|
||||
|
||||
|
||||
\subsection{Collected Assumptions and Caveats}\label{APX:CollectedAssumptions}
|
||||
I hope to write a section clearly explaining assumptions, caveats, and shortcomings here.
|
||||
These will later get written back into the other sections, but I want to collect them
|
||||
in a single place first.
|
||||
%time periods are long enough for debris to disperse after collisions.
|
||||
%Only a single type of debris
|
||||
%With my current computational idea; each constellation provides the same risk to each other constellation
|
||||
% That can be easily adjusted in the computational models.
|
||||
|
||||
\newpage
|
||||
%Just for simplicity, remove later
|
||||
\tableofcontents
|
||||
\end{document}
|
||||
|
||||
%%% Notes to keep track of
|
||||
% Possible other things to investigate
|
||||
% - Free-entry conditions: which of the following?
|
||||
% - When for every operator, the current stocks imply no more launches(x = 0).
|
||||
% - When for every type of operator, the current stocks plus an own stock of 0 imply no more launches(x = 0).
|
||||
%
|
||||
%
|
||||
%
|
||||
%
|
||||
%
|
||||
%
|
||||
%
|
||||
%
|
||||
%
|
||||
%
|
||||
%
|
||||
%
|
||||
%
|
||||
@ -0,0 +1,13 @@
|
||||
{
|
||||
// See https://go.microsoft.com/fwlink/?LinkId=827846 to learn about workspace recommendations.
|
||||
// Extension identifier format: ${publisher}.${name}. Example: vscode.csharp
|
||||
|
||||
// List of extensions which should be recommended for users of this workspace.
|
||||
"recommendations": [
|
||||
|
||||
],
|
||||
// List of extensions recommended by VS Code that should not be recommended for users of this workspace.
|
||||
"unwantedRecommendations": [
|
||||
|
||||
]
|
||||
}
|
||||
@ -0,0 +1,4 @@
|
||||
{
|
||||
"python.pythonPath": "/bin/python3",
|
||||
"editor.detectIndentation": false
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,523 @@
|
||||
# This file is machine-generated - editing it directly is not advised
|
||||
|
||||
[[AbstractFFTs]]
|
||||
deps = ["LinearAlgebra"]
|
||||
git-tree-sha1 = "485ee0867925449198280d4af84bdb46a2a404d0"
|
||||
uuid = "621f4979-c628-5d54-868e-fcf4e3e8185c"
|
||||
version = "1.0.1"
|
||||
|
||||
[[AbstractTrees]]
|
||||
git-tree-sha1 = "03e0550477d86222521d254b741d470ba17ea0b5"
|
||||
uuid = "1520ce14-60c1-5f80-bbc7-55ef81b5835c"
|
||||
version = "0.3.4"
|
||||
|
||||
[[Adapt]]
|
||||
deps = ["LinearAlgebra"]
|
||||
git-tree-sha1 = "84918055d15b3114ede17ac6a7182f68870c16f7"
|
||||
uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e"
|
||||
version = "3.3.1"
|
||||
|
||||
[[ArgTools]]
|
||||
uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f"
|
||||
|
||||
[[ArrayInterface]]
|
||||
deps = ["Compat", "IfElse", "LinearAlgebra", "Requires", "SparseArrays", "Static"]
|
||||
git-tree-sha1 = "e527b258413e0c6d4f66ade574744c94edef81f8"
|
||||
uuid = "4fba245c-0d91-5ea0-9b3e-6abc04ee57a9"
|
||||
version = "3.1.40"
|
||||
|
||||
[[Artifacts]]
|
||||
uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
|
||||
|
||||
[[BFloat16s]]
|
||||
deps = ["LinearAlgebra", "Printf", "Random", "Test"]
|
||||
git-tree-sha1 = "a598ecb0d717092b5539dbbe890c98bac842b072"
|
||||
uuid = "ab4f0b2a-ad5b-11e8-123f-65d77653426b"
|
||||
version = "0.2.0"
|
||||
|
||||
[[BSON]]
|
||||
git-tree-sha1 = "ebcd6e22d69f21249b7b8668351ebf42d6dc87a1"
|
||||
uuid = "fbb218c0-5317-5bc6-957e-2ee96dd4b1f0"
|
||||
version = "0.3.4"
|
||||
|
||||
[[Base64]]
|
||||
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
|
||||
|
||||
[[CEnum]]
|
||||
git-tree-sha1 = "215a9aa4a1f23fbd05b92769fdd62559488d70e9"
|
||||
uuid = "fa961155-64e5-5f13-b03f-caf6b980ea82"
|
||||
version = "0.4.1"
|
||||
|
||||
[[CUDA]]
|
||||
deps = ["AbstractFFTs", "Adapt", "BFloat16s", "CEnum", "CompilerSupportLibraries_jll", "ExprTools", "GPUArrays", "GPUCompiler", "LLVM", "LazyArtifacts", "Libdl", "LinearAlgebra", "Logging", "Printf", "Random", "Random123", "RandomNumbers", "Reexport", "Requires", "SparseArrays", "SpecialFunctions", "TimerOutputs"]
|
||||
git-tree-sha1 = "2c8329f16addffd09e6ca84c556e2185a4933c64"
|
||||
uuid = "052768ef-5323-5732-b1bb-66c8b64840ba"
|
||||
version = "3.5.0"
|
||||
|
||||
[[ChainRules]]
|
||||
deps = ["ChainRulesCore", "Compat", "LinearAlgebra", "Random", "RealDot", "Statistics"]
|
||||
git-tree-sha1 = "035ef8a5382a614b2d8e3091b6fdbb1c2b050e11"
|
||||
uuid = "082447d4-558c-5d27-93f4-14fc19e9eca2"
|
||||
version = "1.12.1"
|
||||
|
||||
[[ChainRulesCore]]
|
||||
deps = ["Compat", "LinearAlgebra", "SparseArrays"]
|
||||
git-tree-sha1 = "f885e7e7c124f8c92650d61b9477b9ac2ee607dd"
|
||||
uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4"
|
||||
version = "1.11.1"
|
||||
|
||||
[[ChangesOfVariables]]
|
||||
deps = ["LinearAlgebra", "Test"]
|
||||
git-tree-sha1 = "9a1d594397670492219635b35a3d830b04730d62"
|
||||
uuid = "9e997f8a-9a97-42d5-a9f1-ce6bfc15e2c0"
|
||||
version = "0.1.1"
|
||||
|
||||
[[CodecZlib]]
|
||||
deps = ["TranscodingStreams", "Zlib_jll"]
|
||||
git-tree-sha1 = "ded953804d019afa9a3f98981d99b33e3db7b6da"
|
||||
uuid = "944b1d66-785c-5afd-91f1-9de20f533193"
|
||||
version = "0.7.0"
|
||||
|
||||
[[ColorTypes]]
|
||||
deps = ["FixedPointNumbers", "Random"]
|
||||
git-tree-sha1 = "024fe24d83e4a5bf5fc80501a314ce0d1aa35597"
|
||||
uuid = "3da002f7-5984-5a60-b8a6-cbb66c0b333f"
|
||||
version = "0.11.0"
|
||||
|
||||
[[Colors]]
|
||||
deps = ["ColorTypes", "FixedPointNumbers", "Reexport"]
|
||||
git-tree-sha1 = "417b0ed7b8b838aa6ca0a87aadf1bb9eb111ce40"
|
||||
uuid = "5ae59095-9a9b-59fe-a467-6f913c188581"
|
||||
version = "0.12.8"
|
||||
|
||||
[[CommonSubexpressions]]
|
||||
deps = ["MacroTools", "Test"]
|
||||
git-tree-sha1 = "7b8a93dba8af7e3b42fecabf646260105ac373f7"
|
||||
uuid = "bbf7d656-a473-5ed7-a52c-81e309532950"
|
||||
version = "0.3.0"
|
||||
|
||||
[[Compat]]
|
||||
deps = ["Base64", "Dates", "DelimitedFiles", "Distributed", "InteractiveUtils", "LibGit2", "Libdl", "LinearAlgebra", "Markdown", "Mmap", "Pkg", "Printf", "REPL", "Random", "SHA", "Serialization", "SharedArrays", "Sockets", "SparseArrays", "Statistics", "Test", "UUIDs", "Unicode"]
|
||||
git-tree-sha1 = "dce3e3fea680869eaa0b774b2e8343e9ff442313"
|
||||
uuid = "34da2185-b29b-5c13-b0c7-acf172513d20"
|
||||
version = "3.40.0"
|
||||
|
||||
[[CompilerSupportLibraries_jll]]
|
||||
deps = ["Artifacts", "Libdl"]
|
||||
uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae"
|
||||
|
||||
[[DataAPI]]
|
||||
git-tree-sha1 = "cc70b17275652eb47bc9e5f81635981f13cea5c8"
|
||||
uuid = "9a962f9c-6df0-11e9-0e5d-c546b8b5ee8a"
|
||||
version = "1.9.0"
|
||||
|
||||
[[DataStructures]]
|
||||
deps = ["Compat", "InteractiveUtils", "OrderedCollections"]
|
||||
git-tree-sha1 = "7d9d316f04214f7efdbb6398d545446e246eff02"
|
||||
uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
|
||||
version = "0.18.10"
|
||||
|
||||
[[Dates]]
|
||||
deps = ["Printf"]
|
||||
uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"
|
||||
|
||||
[[DelimitedFiles]]
|
||||
deps = ["Mmap"]
|
||||
uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab"
|
||||
|
||||
[[DiffResults]]
|
||||
deps = ["StaticArrays"]
|
||||
git-tree-sha1 = "c18e98cba888c6c25d1c3b048e4b3380ca956805"
|
||||
uuid = "163ba53b-c6d8-5494-b064-1a9d43ac40c5"
|
||||
version = "1.0.3"
|
||||
|
||||
[[DiffRules]]
|
||||
deps = ["LogExpFunctions", "NaNMath", "Random", "SpecialFunctions"]
|
||||
git-tree-sha1 = "3287dacf67c3652d3fed09f4c12c187ae4dbb89a"
|
||||
uuid = "b552c78f-8df3-52c6-915a-8e097449b14b"
|
||||
version = "1.4.0"
|
||||
|
||||
[[Distributed]]
|
||||
deps = ["Random", "Serialization", "Sockets"]
|
||||
uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b"
|
||||
|
||||
[[DocStringExtensions]]
|
||||
deps = ["LibGit2"]
|
||||
git-tree-sha1 = "b19534d1895d702889b219c382a6e18010797f0b"
|
||||
uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae"
|
||||
version = "0.8.6"
|
||||
|
||||
[[Downloads]]
|
||||
deps = ["ArgTools", "LibCURL", "NetworkOptions"]
|
||||
uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6"
|
||||
|
||||
[[ExprTools]]
|
||||
git-tree-sha1 = "b7e3d17636b348f005f11040025ae8c6f645fe92"
|
||||
uuid = "e2ba6199-217a-4e67-a87a-7c52f15ade04"
|
||||
version = "0.1.6"
|
||||
|
||||
[[FillArrays]]
|
||||
deps = ["LinearAlgebra", "Random", "SparseArrays", "Statistics"]
|
||||
git-tree-sha1 = "8756f9935b7ccc9064c6eef0bff0ad643df733a3"
|
||||
uuid = "1a297f60-69ca-5386-bcde-b61e274b549b"
|
||||
version = "0.12.7"
|
||||
|
||||
[[FixedPointNumbers]]
|
||||
deps = ["Statistics"]
|
||||
git-tree-sha1 = "335bfdceacc84c5cdf16aadc768aa5ddfc5383cc"
|
||||
uuid = "53c48c17-4a7d-5ca2-90c5-79b7896eea93"
|
||||
version = "0.8.4"
|
||||
|
||||
[[Flux]]
|
||||
deps = ["AbstractTrees", "Adapt", "ArrayInterface", "CUDA", "CodecZlib", "Colors", "DelimitedFiles", "Functors", "Juno", "LinearAlgebra", "MacroTools", "NNlib", "NNlibCUDA", "Pkg", "Printf", "Random", "Reexport", "SHA", "SparseArrays", "Statistics", "StatsBase", "Test", "ZipFile", "Zygote"]
|
||||
git-tree-sha1 = "e8b37bb43c01eed0418821d1f9d20eca5ba6ab21"
|
||||
uuid = "587475ba-b771-5e3f-ad9e-33799f191a9c"
|
||||
version = "0.12.8"
|
||||
|
||||
[[ForwardDiff]]
|
||||
deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "LinearAlgebra", "LogExpFunctions", "NaNMath", "Preferences", "Printf", "Random", "SpecialFunctions", "StaticArrays"]
|
||||
git-tree-sha1 = "6406b5112809c08b1baa5703ad274e1dded0652f"
|
||||
uuid = "f6369f11-7733-5829-9624-2563aa707210"
|
||||
version = "0.10.23"
|
||||
|
||||
[[Functors]]
|
||||
git-tree-sha1 = "e4768c3b7f597d5a352afa09874d16e3c3f6ead2"
|
||||
uuid = "d9f16b24-f501-4c13-a1f2-28368ffc5196"
|
||||
version = "0.2.7"
|
||||
|
||||
[[GPUArrays]]
|
||||
deps = ["Adapt", "LinearAlgebra", "Printf", "Random", "Serialization", "Statistics"]
|
||||
git-tree-sha1 = "7772508f17f1d482fe0df72cabc5b55bec06bbe0"
|
||||
uuid = "0c68f7d7-f131-5f86-a1c3-88cf8149b2d7"
|
||||
version = "8.1.2"
|
||||
|
||||
[[GPUCompiler]]
|
||||
deps = ["ExprTools", "InteractiveUtils", "LLVM", "Libdl", "Logging", "TimerOutputs", "UUIDs"]
|
||||
git-tree-sha1 = "77d915a0af27d474f0aaf12fcd46c400a552e84c"
|
||||
uuid = "61eb1bfa-7361-4325-ad38-22787b887f55"
|
||||
version = "0.13.7"
|
||||
|
||||
[[IRTools]]
|
||||
deps = ["InteractiveUtils", "MacroTools", "Test"]
|
||||
git-tree-sha1 = "95215cd0076a150ef46ff7928892bc341864c73c"
|
||||
uuid = "7869d1d1-7146-5819-86e3-90919afe41df"
|
||||
version = "0.4.3"
|
||||
|
||||
[[IfElse]]
|
||||
git-tree-sha1 = "debdd00ffef04665ccbb3e150747a77560e8fad1"
|
||||
uuid = "615f187c-cbe4-4ef1-ba3b-2fcf58d6d173"
|
||||
version = "0.1.1"
|
||||
|
||||
[[InteractiveUtils]]
|
||||
deps = ["Markdown"]
|
||||
uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
|
||||
|
||||
[[InverseFunctions]]
|
||||
deps = ["Test"]
|
||||
git-tree-sha1 = "a7254c0acd8e62f1ac75ad24d5db43f5f19f3c65"
|
||||
uuid = "3587e190-3f89-42d0-90ee-14403ec27112"
|
||||
version = "0.1.2"
|
||||
|
||||
[[IrrationalConstants]]
|
||||
git-tree-sha1 = "7fd44fd4ff43fc60815f8e764c0f352b83c49151"
|
||||
uuid = "92d709cd-6900-40b7-9082-c6be49f344b6"
|
||||
version = "0.1.1"
|
||||
|
||||
[[JLLWrappers]]
|
||||
deps = ["Preferences"]
|
||||
git-tree-sha1 = "642a199af8b68253517b80bd3bfd17eb4e84df6e"
|
||||
uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210"
|
||||
version = "1.3.0"
|
||||
|
||||
[[Juno]]
|
||||
deps = ["Base64", "Logging", "Media", "Profile"]
|
||||
git-tree-sha1 = "07cb43290a840908a771552911a6274bc6c072c7"
|
||||
uuid = "e5e0dc1b-0480-54bc-9374-aad01c23163d"
|
||||
version = "0.8.4"
|
||||
|
||||
[[LLVM]]
|
||||
deps = ["CEnum", "LLVMExtra_jll", "Libdl", "Printf", "Unicode"]
|
||||
git-tree-sha1 = "46092047ca4edc10720ecab437c42283cd7c44f3"
|
||||
uuid = "929cbde3-209d-540e-8aea-75f648917ca0"
|
||||
version = "4.6.0"
|
||||
|
||||
[[LLVMExtra_jll]]
|
||||
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
|
||||
git-tree-sha1 = "9436f02a0c9f726d914cc6539f87850701be18fc"
|
||||
uuid = "dad2f222-ce93-54a1-a47d-0025e8a3acab"
|
||||
version = "0.0.12+0"
|
||||
|
||||
[[LazyArtifacts]]
|
||||
deps = ["Artifacts", "Pkg"]
|
||||
uuid = "4af54fe1-eca0-43a8-85a7-787d91b784e3"
|
||||
|
||||
[[LibCURL]]
|
||||
deps = ["LibCURL_jll", "MozillaCACerts_jll"]
|
||||
uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21"
|
||||
|
||||
[[LibCURL_jll]]
|
||||
deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"]
|
||||
uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0"
|
||||
|
||||
[[LibGit2]]
|
||||
deps = ["Base64", "NetworkOptions", "Printf", "SHA"]
|
||||
uuid = "76f85450-5226-5b5a-8eaa-529ad045b433"
|
||||
|
||||
[[LibSSH2_jll]]
|
||||
deps = ["Artifacts", "Libdl", "MbedTLS_jll"]
|
||||
uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8"
|
||||
|
||||
[[Libdl]]
|
||||
uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
|
||||
|
||||
[[LinearAlgebra]]
|
||||
deps = ["Libdl"]
|
||||
uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
|
||||
|
||||
[[LogExpFunctions]]
|
||||
deps = ["ChainRulesCore", "ChangesOfVariables", "DocStringExtensions", "InverseFunctions", "IrrationalConstants", "LinearAlgebra"]
|
||||
git-tree-sha1 = "be9eef9f9d78cecb6f262f3c10da151a6c5ab827"
|
||||
uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688"
|
||||
version = "0.3.5"
|
||||
|
||||
[[Logging]]
|
||||
uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"
|
||||
|
||||
[[MacroTools]]
|
||||
deps = ["Markdown", "Random"]
|
||||
git-tree-sha1 = "3d3e902b31198a27340d0bf00d6ac452866021cf"
|
||||
uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09"
|
||||
version = "0.5.9"
|
||||
|
||||
[[Markdown]]
|
||||
deps = ["Base64"]
|
||||
uuid = "d6f4376e-aef5-505a-96c1-9c027394607a"
|
||||
|
||||
[[MbedTLS_jll]]
|
||||
deps = ["Artifacts", "Libdl"]
|
||||
uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1"
|
||||
|
||||
[[Media]]
|
||||
deps = ["MacroTools", "Test"]
|
||||
git-tree-sha1 = "75a54abd10709c01f1b86b84ec225d26e840ed58"
|
||||
uuid = "e89f7d12-3494-54d1-8411-f7d8b9ae1f27"
|
||||
version = "0.5.0"
|
||||
|
||||
[[Missings]]
|
||||
deps = ["DataAPI"]
|
||||
git-tree-sha1 = "bf210ce90b6c9eed32d25dbcae1ebc565df2687f"
|
||||
uuid = "e1d29d7a-bbdc-5cf2-9ac0-f12de2c33e28"
|
||||
version = "1.0.2"
|
||||
|
||||
[[Mmap]]
|
||||
uuid = "a63ad114-7e13-5084-954f-fe012c677804"
|
||||
|
||||
[[MozillaCACerts_jll]]
|
||||
uuid = "14a3606d-f60d-562e-9121-12d972cd8159"
|
||||
|
||||
[[NNlib]]
|
||||
deps = ["Adapt", "ChainRulesCore", "Compat", "LinearAlgebra", "Pkg", "Requires", "Statistics"]
|
||||
git-tree-sha1 = "2eb305b13eaed91d7da14269bf17ce6664bfee3d"
|
||||
uuid = "872c559c-99b0-510c-b3b7-b6c96a88d5cd"
|
||||
version = "0.7.31"
|
||||
|
||||
[[NNlibCUDA]]
|
||||
deps = ["CUDA", "LinearAlgebra", "NNlib", "Random", "Statistics"]
|
||||
git-tree-sha1 = "38358632d9c277f7bf8d202c127f601e8467aa4d"
|
||||
uuid = "a00861dc-f156-4864-bf3c-e6376f28a68d"
|
||||
version = "0.1.10"
|
||||
|
||||
[[NaNMath]]
|
||||
git-tree-sha1 = "bfe47e760d60b82b66b61d2d44128b62e3a369fb"
|
||||
uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3"
|
||||
version = "0.3.5"
|
||||
|
||||
[[NetworkOptions]]
|
||||
uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908"
|
||||
|
||||
[[OpenLibm_jll]]
|
||||
deps = ["Artifacts", "Libdl"]
|
||||
uuid = "05823500-19ac-5b8b-9628-191a04bc5112"
|
||||
|
||||
[[OpenSpecFun_jll]]
|
||||
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"]
|
||||
git-tree-sha1 = "13652491f6856acfd2db29360e1bbcd4565d04f1"
|
||||
uuid = "efe28fd5-8261-553b-a9e1-b2916fc3738e"
|
||||
version = "0.5.5+0"
|
||||
|
||||
[[OrderedCollections]]
|
||||
git-tree-sha1 = "85f8e6578bf1f9ee0d11e7bb1b1456435479d47c"
|
||||
uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d"
|
||||
version = "1.4.1"
|
||||
|
||||
[[Pkg]]
|
||||
deps = ["Artifacts", "Dates", "Downloads", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"]
|
||||
uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
|
||||
|
||||
[[Preferences]]
|
||||
deps = ["TOML"]
|
||||
git-tree-sha1 = "00cfd92944ca9c760982747e9a1d0d5d86ab1e5a"
|
||||
uuid = "21216c6a-2e73-6563-6e65-726566657250"
|
||||
version = "1.2.2"
|
||||
|
||||
[[Printf]]
|
||||
deps = ["Unicode"]
|
||||
uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
|
||||
|
||||
[[Profile]]
|
||||
deps = ["Printf"]
|
||||
uuid = "9abbd945-dff8-562f-b5e8-e1ebf5ef1b79"
|
||||
|
||||
[[REPL]]
|
||||
deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"]
|
||||
uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"
|
||||
|
||||
[[Random]]
|
||||
deps = ["Serialization"]
|
||||
uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
|
||||
|
||||
[[Random123]]
|
||||
deps = ["Libdl", "Random", "RandomNumbers"]
|
||||
git-tree-sha1 = "0e8b146557ad1c6deb1367655e052276690e71a3"
|
||||
uuid = "74087812-796a-5b5d-8853-05524746bad3"
|
||||
version = "1.4.2"
|
||||
|
||||
[[RandomNumbers]]
|
||||
deps = ["Random", "Requires"]
|
||||
git-tree-sha1 = "043da614cc7e95c703498a491e2c21f58a2b8111"
|
||||
uuid = "e6cf234a-135c-5ec9-84dd-332b85af5143"
|
||||
version = "1.5.3"
|
||||
|
||||
[[RealDot]]
|
||||
deps = ["LinearAlgebra"]
|
||||
git-tree-sha1 = "9f0a1b71baaf7650f4fa8a1d168c7fb6ee41f0c9"
|
||||
uuid = "c1ae055f-0cd5-4b69-90a6-9a35b1a98df9"
|
||||
version = "0.1.0"
|
||||
|
||||
[[Reexport]]
|
||||
git-tree-sha1 = "45e428421666073eab6f2da5c9d310d99bb12f9b"
|
||||
uuid = "189a3867-3050-52da-a836-e630ba90ab69"
|
||||
version = "1.2.2"
|
||||
|
||||
[[Requires]]
|
||||
deps = ["UUIDs"]
|
||||
git-tree-sha1 = "4036a3bd08ac7e968e27c203d45f5fff15020621"
|
||||
uuid = "ae029012-a4dd-5104-9daa-d747884805df"
|
||||
version = "1.1.3"
|
||||
|
||||
[[SHA]]
|
||||
uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"
|
||||
|
||||
[[Serialization]]
|
||||
uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
|
||||
|
||||
[[SharedArrays]]
|
||||
deps = ["Distributed", "Mmap", "Random", "Serialization"]
|
||||
uuid = "1a1011a3-84de-559e-8e89-a11a2f7dc383"
|
||||
|
||||
[[Sockets]]
|
||||
uuid = "6462fe0b-24de-5631-8697-dd941f90decc"
|
||||
|
||||
[[SortingAlgorithms]]
|
||||
deps = ["DataStructures"]
|
||||
git-tree-sha1 = "b3363d7460f7d098ca0912c69b082f75625d7508"
|
||||
uuid = "a2af1166-a08f-5f64-846c-94a0d3cef48c"
|
||||
version = "1.0.1"
|
||||
|
||||
[[SparseArrays]]
|
||||
deps = ["LinearAlgebra", "Random"]
|
||||
uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
|
||||
|
||||
[[SpecialFunctions]]
|
||||
deps = ["ChainRulesCore", "IrrationalConstants", "LogExpFunctions", "OpenLibm_jll", "OpenSpecFun_jll"]
|
||||
git-tree-sha1 = "f0bccf98e16759818ffc5d97ac3ebf87eb950150"
|
||||
uuid = "276daf66-3868-5448-9aa4-cd146d93841b"
|
||||
version = "1.8.1"
|
||||
|
||||
[[Static]]
|
||||
deps = ["IfElse"]
|
||||
git-tree-sha1 = "e7bc80dc93f50857a5d1e3c8121495852f407e6a"
|
||||
uuid = "aedffcd0-7271-4cad-89d0-dc628f76c6d3"
|
||||
version = "0.4.0"
|
||||
|
||||
[[StaticArrays]]
|
||||
deps = ["LinearAlgebra", "Random", "Statistics"]
|
||||
git-tree-sha1 = "3c76dde64d03699e074ac02eb2e8ba8254d428da"
|
||||
uuid = "90137ffa-7385-5640-81b9-e52037218182"
|
||||
version = "1.2.13"
|
||||
|
||||
[[Statistics]]
|
||||
deps = ["LinearAlgebra", "SparseArrays"]
|
||||
uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
|
||||
|
||||
[[StatsAPI]]
|
||||
git-tree-sha1 = "1958272568dc176a1d881acb797beb909c785510"
|
||||
uuid = "82ae8749-77ed-4fe6-ae5f-f523153014b0"
|
||||
version = "1.0.0"
|
||||
|
||||
[[StatsBase]]
|
||||
deps = ["DataAPI", "DataStructures", "LinearAlgebra", "LogExpFunctions", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics", "StatsAPI"]
|
||||
git-tree-sha1 = "2bb0cb32026a66037360606510fca5984ccc6b75"
|
||||
uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
|
||||
version = "0.33.13"
|
||||
|
||||
[[TOML]]
|
||||
deps = ["Dates"]
|
||||
uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76"
|
||||
|
||||
[[Tar]]
|
||||
deps = ["ArgTools", "SHA"]
|
||||
uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e"
|
||||
|
||||
[[Test]]
|
||||
deps = ["InteractiveUtils", "Logging", "Random", "Serialization"]
|
||||
uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
|
||||
|
||||
[[TimerOutputs]]
|
||||
deps = ["ExprTools", "Printf"]
|
||||
git-tree-sha1 = "7cb456f358e8f9d102a8b25e8dfedf58fa5689bc"
|
||||
uuid = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f"
|
||||
version = "0.5.13"
|
||||
|
||||
[[TranscodingStreams]]
|
||||
deps = ["Random", "Test"]
|
||||
git-tree-sha1 = "216b95ea110b5972db65aa90f88d8d89dcb8851c"
|
||||
uuid = "3bb67fe8-82b1-5028-8e26-92a6c54297fa"
|
||||
version = "0.9.6"
|
||||
|
||||
[[UUIDs]]
|
||||
deps = ["Random", "SHA"]
|
||||
uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
|
||||
|
||||
[[Unicode]]
|
||||
uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
|
||||
|
||||
[[ZipFile]]
|
||||
deps = ["Libdl", "Printf", "Zlib_jll"]
|
||||
git-tree-sha1 = "3593e69e469d2111389a9bd06bac1f3d730ac6de"
|
||||
uuid = "a5390f91-8eb1-5f08-bee0-b1d1ffed6cea"
|
||||
version = "0.9.4"
|
||||
|
||||
[[Zlib_jll]]
|
||||
deps = ["Libdl"]
|
||||
uuid = "83775a58-1f1d-513f-b197-d71354ab007a"
|
||||
|
||||
[[Zygote]]
|
||||
deps = ["AbstractFFTs", "ChainRules", "ChainRulesCore", "DiffRules", "Distributed", "FillArrays", "ForwardDiff", "IRTools", "InteractiveUtils", "LinearAlgebra", "MacroTools", "NaNMath", "Random", "Requires", "SpecialFunctions", "Statistics", "ZygoteRules"]
|
||||
git-tree-sha1 = "2c30f2df0ba43c17e88c8b55b5b22c401f7cde4e"
|
||||
uuid = "e88e6eb3-aa80-5325-afca-941959d7151f"
|
||||
version = "0.6.30"
|
||||
|
||||
[[ZygoteRules]]
|
||||
deps = ["MacroTools"]
|
||||
git-tree-sha1 = "8c1a8e4dfacb1fd631745552c8db35d0deb09ea0"
|
||||
uuid = "700de1a5-db45-46bc-99cf-38207098b444"
|
||||
version = "0.2.2"
|
||||
|
||||
[[nghttp2_jll]]
|
||||
deps = ["Artifacts", "Libdl"]
|
||||
uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d"
|
||||
|
||||
[[p7zip_jll]]
|
||||
deps = ["Artifacts", "Libdl"]
|
||||
uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0"
|
||||
@ -0,0 +1,5 @@
|
||||
[deps]
|
||||
BSON = "fbb218c0-5317-5bc6-957e-2ee96dd4b1f0"
|
||||
Flux = "587475ba-b771-5e3f-ad9e-33799f191a9c"
|
||||
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
|
||||
Zygote = "e88e6eb3-aa80-5325-afca-941959d7151f"
|
||||
@ -0,0 +1 @@
|
||||
Description goes here
|
||||
@ -0,0 +1,81 @@
|
||||
include("../src/Orbits.jl")
|
||||
using .Orbits
|
||||
using LinearAlgebra
|
||||
|
||||
#Set key dimensions
|
||||
const N_constellations = 4
|
||||
const N_debris = 1
|
||||
const N_states = N_constellations + N_debris
|
||||
|
||||
#Setup Economic Models
|
||||
em2_a = LinearModel(0.95, [1 -0.02 -0.02 0], [5.0 0 0 0])
|
||||
em2_b = LinearModel(0.95, [-0.02 1 -0.02 0], [0.0 5 0 0])
|
||||
em2_c = LinearModel(0.95, [0 -0.02 1 -0.02], [0.0 0 5 0])
|
||||
em2_d = LinearModel(0.95, [0 -0.02 -0.02 1], [0.0 0 0 5])
|
||||
|
||||
#Setup Physics
|
||||
|
||||
basic_model = BasicPhysics(
|
||||
0.002
|
||||
,0.002*(ones(N_constellations,N_constellations) - LinearAlgebra.I)
|
||||
,0.01
|
||||
,0.001
|
||||
,5.0
|
||||
,0.05
|
||||
)
|
||||
|
||||
# Setup NN
|
||||
bg = BranchGenerator(N_constellations)
|
||||
operators_policy = bg(operator_policy_function_generator(N_constellations,N_debris),vcat)
|
||||
planners_policy = bg(operator_policy_function_generator(N_constellations,N_debris),vcat)
|
||||
planners_value = value_function_generator(64)
|
||||
|
||||
|
||||
# Setup Operators
|
||||
const operator_array = [
|
||||
#first operator
|
||||
OperatorLoss(
|
||||
em2_a
|
||||
,value_function_generator()
|
||||
,operators_policy #this is held by all operators
|
||||
,params(operators_policy[2][1]) #first operator gets first branch of params
|
||||
,basic_model
|
||||
)
|
||||
,OperatorLoss(
|
||||
em2_b
|
||||
,value_function_generator()
|
||||
,operators_policy #this is held by all operators
|
||||
,params(operators_policy[2][2]) #first operator gets first branch of params
|
||||
,basic_model
|
||||
)
|
||||
,OperatorLoss(
|
||||
em2_c
|
||||
,value_function_generator()
|
||||
,operators_policy #this is held by all operators
|
||||
,params(operators_policy[2][3]) #first operator gets first branch of params
|
||||
,basic_model
|
||||
)
|
||||
,OperatorLoss(
|
||||
em2_d
|
||||
,value_function_generator()
|
||||
,operators_policy #this is held by all operators
|
||||
,params(operators_policy[2][4]) #first operator gets first branch of params
|
||||
,basic_model
|
||||
)
|
||||
]
|
||||
|
||||
#sanity check time
|
||||
@assert length(operator_array) == N_constellations "Mismatch in predetermined number of constellations and the number of operators initialized"
|
||||
|
||||
# Setup Planner
|
||||
pl = PlannerLoss(
|
||||
0.95
|
||||
,operator_array
|
||||
,planners_policy
|
||||
,params(planners_policy)
|
||||
,planners_value
|
||||
,params(planners_value)
|
||||
,basic_model
|
||||
)
|
||||
|
||||
# Export Planner
|
||||
@ -0,0 +1,54 @@
|
||||
using Test,Flux
|
||||
|
||||
include("../src/Orbits.jl")
|
||||
using .Orbits
|
||||
|
||||
|
||||
|
||||
#=
|
||||
Structure:
|
||||
|
||||
This is broken into three parts
|
||||
- The test_interfaces module contains various tools used in testing
|
||||
- The test_routines includes functions that setup, run, and teardown tests
|
||||
- Everything else is a set of tests using the standard testing tools in Julia
|
||||
=#
|
||||
@testset "Overall" verbose=true begin
|
||||
@testset "TupleDuplicator" verbose=true begin
|
||||
#Check if tuple duplicator duplicates something
|
||||
td2 = Orbits.TupleDuplicator(2)
|
||||
@test typeof(td2) <: Orbits.TupleDuplicator
|
||||
|
||||
@test td2(([1,2],[3])) == (([1,2],[3]),([1,2],[3]))
|
||||
|
||||
st = State([1.0,2],[3])
|
||||
@test td2((st.stocks,st.debris)) == (([1.0,2],[3]),([1.0,2],[3]))
|
||||
|
||||
@test td2(state_to_tuple(st)) == (([1.0,2],[3]),([1.0,2],[3]))
|
||||
|
||||
@test td2(st) == ((st.stocks,st.debris),(st.stocks,st.debris))
|
||||
end
|
||||
|
||||
@testset "BranchGenerator" verbose=true begin
|
||||
st = State([1.0,2],[3])
|
||||
tp = state_to_tuple(st)
|
||||
bg = BranchGenerator(2)
|
||||
|
||||
branch = Flux.Parallel(vcat, Dense(2,1),Dense(1,1))
|
||||
|
||||
branched = bg(branch,vcat)
|
||||
|
||||
#type tests
|
||||
@test typeof(branch(tp)) <: Array{Float32}
|
||||
|
||||
@test_broken typeof(branched[2](tp)) <: Array{Float32}
|
||||
#ISSUE: what am I really looking for here?
|
||||
|
||||
#Check behaviors of the
|
||||
@test branched[1](st) == state_to_tuple(st) #Evaluation is of the wrong approach
|
||||
|
||||
|
||||
|
||||
end #branch generator
|
||||
|
||||
end #overall testset
|
||||
@ -0,0 +1,62 @@
|
||||
using Test, Flux, LinearAlgebra
|
||||
|
||||
include("../src/Orbits.jl")
|
||||
using .Orbits
|
||||
|
||||
#=
|
||||
The purpose of this document is to organize tests of the state structs and state transition functions
|
||||
=#
|
||||
|
||||
@testset "States and Physical models testing" verbose=true begin
|
||||
n_const = 2
|
||||
n_debr = 3
|
||||
n_data = 5
|
||||
|
||||
#built structs
|
||||
u = UniformDataConstructor(n_data,0,5,2,3)
|
||||
s = u(n_const,n_debr)
|
||||
b = BasicPhysics(
|
||||
0.05
|
||||
,0.02*LinearAlgebra.ones(n_const,n_const)
|
||||
,0.1
|
||||
,0.002
|
||||
,0.002
|
||||
,0.2
|
||||
)
|
||||
|
||||
a2 = ones(n_const,n_data)
|
||||
|
||||
#test that dimensions match etc
|
||||
@test size(b.satellite_collision_rates)[1] == size(s.stocks)[1]
|
||||
@test size(b.satellite_collision_rates)[2] == size(s.stocks)[1]
|
||||
@test n_data == size(s.stocks)[2]
|
||||
@test n_data == size(s.debris)[2]
|
||||
@test size(s.stocks) == size(a2)
|
||||
|
||||
@testset "DataConstructor and states" begin
|
||||
@test u.N == 5
|
||||
|
||||
@test length(s.debris) != 3
|
||||
@test length(s.stocks) != 2
|
||||
|
||||
@test length(s.stocks) == 10
|
||||
@test length(s.debris) == 15
|
||||
|
||||
@test size(s.stocks) == (2,5)
|
||||
@test size(s.debris) == (3,5)
|
||||
end
|
||||
|
||||
@testset "BasicPhysics" begin
|
||||
@testset "Survival Functions" verbose = true begin
|
||||
@test survival_rates_1(s,b) <: AbstractArray
|
||||
end
|
||||
|
||||
@testset "Transitions" begin
|
||||
@test true
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
|
||||
|
||||
end #States and physcial models testing
|
||||
@ -0,0 +1,8 @@
|
||||
{
|
||||
"folders": [
|
||||
{
|
||||
"path": ".."
|
||||
}
|
||||
],
|
||||
"settings": {}
|
||||
}
|
||||
@ -0,0 +1,327 @@
|
||||
{
|
||||
"cells": [
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 1,
|
||||
"id": "0b5021da-575c-4db3-9e01-dc043a7c64b3",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"using DiffEqFlux,Flux,Zygote, LinearAlgebra"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 2,
|
||||
"id": "32ca6032-9d48-4bb2-b16e-4a66473464cd",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"2"
|
||||
]
|
||||
},
|
||||
"execution_count": 2,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"const N_constellations = 1\n",
|
||||
"const N_debris = 1\n",
|
||||
"const N_states= N_constellations + N_debris"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 5,
|
||||
"id": "77213ba3-1645-45b2-903f-b7f2817cbb47",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"#setup physical model\n",
|
||||
"struct BasicModel\n",
|
||||
" #rate at which debris hits satellites\n",
|
||||
" debris_collision_rate\n",
|
||||
" #rate at which satellites of different constellations collide\n",
|
||||
" satellite_collision_rates\n",
|
||||
" #rate at which debris exits orbits\n",
|
||||
" decay_rate\n",
|
||||
" #rate at which satellites\n",
|
||||
" autocatalysis_rate\n",
|
||||
" #ratio at which a collision between satellites produced debris\n",
|
||||
" satellite_collision_debris_ratio\n",
|
||||
" #Ratio at which launches produce debris\n",
|
||||
" launch_debris_ratio\n",
|
||||
"end\n",
|
||||
"\n",
|
||||
"#Getting loss parameters together.\n",
|
||||
"loss_param = 2e-3;\n",
|
||||
"loss_weights = loss_param*(ones(N_constellations,N_constellations) - I);\n",
|
||||
"\n",
|
||||
"#orbital decay rate\n",
|
||||
"decay_param = 0.01;\n",
|
||||
"\n",
|
||||
"#debris generation parameters\n",
|
||||
"autocatalysis_param = 0.001;\n",
|
||||
"satellite_loss_debris_rate = 5.0;\n",
|
||||
"launch_debris_rate = 0.05;\n",
|
||||
"\n",
|
||||
"#Todo, wrap physical model as a struct with the parameters\n",
|
||||
"bm = BasicModel(\n",
|
||||
" loss_param\n",
|
||||
" ,loss_weights\n",
|
||||
" ,decay_param\n",
|
||||
" ,autocatalysis_param\n",
|
||||
" ,satellite_loss_debris_rate\n",
|
||||
" ,launch_debris_rate\n",
|
||||
");\n",
|
||||
"\n",
|
||||
"#implement tranistion function\n",
|
||||
"#percentage survival function\n",
|
||||
"function survival(stocks,debris,physical_model) \n",
|
||||
" exp.(-physical_model.satellite_collision_rates*stocks .- (physical_model.debris_collision_rate*debris));\n",
|
||||
"end\n",
|
||||
"\n",
|
||||
"#stock update rules\n",
|
||||
"function G(stocks,debris,launches, physical_model)\n",
|
||||
" return diagm(survival(stocks,debris,physical_model) .- physical_model.decay_rate)*stocks + launches\n",
|
||||
"end;\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"#debris evolution \n",
|
||||
"function H(stocks,debris,launches,physical_model)\n",
|
||||
" #get changes in debris from natural dynamics\n",
|
||||
" natural_debris_dynamics = (1-physical_model.decay_rate+physical_model.autocatalysis_rate) * debris \n",
|
||||
" \n",
|
||||
" #get changes in debris from satellite loss\n",
|
||||
" satellite_loss_debris = physical_model.satellite_collision_debris_ratio * (1 .- survival(stocks,debris,physical_model))'*stocks \n",
|
||||
" \n",
|
||||
" #get changes in debris from launches\n",
|
||||
" launch_debris = physical_model.launch_debris_ratio*sum(launches)\n",
|
||||
" \n",
|
||||
" #return total debris level\n",
|
||||
" return natural_debris_dynamics + satellite_loss_debris + launch_debris\n",
|
||||
"end;\n",
|
||||
"\n",
|
||||
"\n",
|
||||
"#implement reward function\n",
|
||||
"const payoff = 3*I - 0.02*ones(N_constellations,N_constellations)\n",
|
||||
"\n",
|
||||
"#Define the market profit function\n",
|
||||
"F(stocks,debris,launches) = payoff*stocks + 3.0*launches .+ (debris*-0.2)\n",
|
||||
"\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 18,
|
||||
"id": "998a1ce8-a6ba-427d-a5d1-fece358146da",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": [
|
||||
"# Launch function\n",
|
||||
"launches = Chain(\n",
|
||||
" Parallel(vcat\n",
|
||||
" #parallel joins together stocks and debris, along with intermediate interpretation\n",
|
||||
" ,Chain(Dense(N_constellations, N_states*2,relu)\n",
|
||||
" ,Dense(N_states*2, N_states*2,relu)\n",
|
||||
" )\n",
|
||||
" ,Chain(Dense(N_debris, N_states,relu)\n",
|
||||
" ,Dense(N_states, N_states,relu)\n",
|
||||
" )\n",
|
||||
" #chain gets applied to parallel\n",
|
||||
" ,Dense(N_states*3,128,relu)\n",
|
||||
" #,Dense(128,128,relu)\n",
|
||||
" ,Dense(128,N_constellations,relu)\n",
|
||||
" )\n",
|
||||
");\n",
|
||||
"\n",
|
||||
"#Value functions\n",
|
||||
"∂value = Chain(\n",
|
||||
" Parallel(vcat\n",
|
||||
" #parallel joins together stocks and debris, along with intermediate interpretation\n",
|
||||
" ,Chain(Dense(N_constellations, N_states*2,relu)\n",
|
||||
" ,Dense(N_states*2, N_states*2,relu)\n",
|
||||
" )\n",
|
||||
" ,Chain(Dense(N_debris, N_states,relu)\n",
|
||||
" ,Dense(N_states, N_states,relu)\n",
|
||||
" )\n",
|
||||
" #chain gets applied to parallel\n",
|
||||
" ,Dense(N_states*3,128,relu)\n",
|
||||
" #,Dense(128,128,relu)\n",
|
||||
" ,Dense(128,N_states,relu)\n",
|
||||
" )\n",
|
||||
");"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 6,
|
||||
"id": "b4409af2-7f41-45bc-b7eb-4bda019e4092",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"1-element Vector{Float64}:\n",
|
||||
" 0.0"
|
||||
]
|
||||
},
|
||||
"execution_count": 6,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"#Extract parameter sets\n",
|
||||
"\n",
|
||||
"#= initialize Algorithm Parameters\n",
|
||||
"Chose these randomly\n",
|
||||
"=#\n",
|
||||
"λʷ = 0.5\n",
|
||||
"αʷ = 5.0\n",
|
||||
"λᶿ = 0.5\n",
|
||||
"αᶿ = 5.0\n",
|
||||
"αʳ = 10\n",
|
||||
"\n",
|
||||
"# initialitze averaging returns\n",
|
||||
"r = zeros(N_constellations)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 7,
|
||||
"id": "0529c209-55c0-49c7-815b-47578b029593",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"1-element Vector{Int64}:\n",
|
||||
" 3"
|
||||
]
|
||||
},
|
||||
"execution_count": 7,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"# initial states\n",
|
||||
"S₀ = rand(1:5,N_constellations)\n",
|
||||
"D₀ = rand(1:3, N_debris)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 16,
|
||||
"id": "ae7d4152-77b0-42ff-92f6-9d5d83d6a39d",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Params([Float32[-0.110574484; 1.0583764; 0.039519094; 0.2908444], Float32[0.0, 0.0, 0.0, 0.0], Float32[0.4765299 0.5994208 -0.43710196 0.2269359; 0.5550531 0.5423604 -0.796175 0.76214457; 0.59269524 0.7436546 0.02525105 0.85908467; 0.3774994 -0.111040816 0.84196734 -0.18133782], Float32[0.0, 0.0, 0.0, 0.0], Float32[-1.2003294; -1.24031], Float32[0.0, 0.0], Float32[-0.004074011 -0.84631246; -0.5459394 1.1513239], Float32[0.0, 0.0], Float32[-0.19545768 -0.20670874 … 0.06923863 -0.09825141; 0.097166725 0.06564395 … -0.1928437 0.19962357; … ; 0.025075339 -0.06016964 … 0.0838129 -0.11523932; 0.20085223 0.16679004 … 0.016495213 -0.1548977], Float32[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 … 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], Float32[-0.21479565 0.0090183215 … -0.2022802 -0.19925424], Float32[0.0]])"
|
||||
]
|
||||
},
|
||||
"execution_count": 16,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"launch_params = Flux.params(launches)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 21,
|
||||
"id": "232c0a44-be74-4431-a86e-dbc71e83c17a",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"loss (generic function with 1 method)"
|
||||
]
|
||||
},
|
||||
"execution_count": 21,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"function loss(stocks,debris)\n",
|
||||
" sum(launches((stocks,debris)))\n",
|
||||
"end"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 25,
|
||||
"id": "e1e1cdef-b164-43b6-a80e-b8665bdf9b14",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Grads(...)"
|
||||
]
|
||||
},
|
||||
"execution_count": 25,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"g = Flux.gradient(() -> loss(S₀,D₀), launch_params)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 28,
|
||||
"id": "35d1763b-4650-4916-957d-fbb436280e1f",
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"Params([Float32[-0.110574484; 1.0583764; 0.039519094; 0.2908444], Float32[0.0, 0.0, 0.0, 0.0], Float32[0.4765299 0.5994208 -0.43710196 0.2269359; 0.5550531 0.5423604 -0.796175 0.76214457; 0.59269524 0.7436546 0.02525105 0.85908467; 0.3774994 -0.111040816 0.84196734 -0.18133782], Float32[0.0, 0.0, 0.0, 0.0], Float32[-1.2003294; -1.24031], Float32[0.0, 0.0], Float32[-0.004074011 -0.84631246; -0.5459394 1.1513239], Float32[0.0, 0.0], Float32[-0.19545768 -0.20670874 … 0.06923863 -0.09825141; 0.097166725 0.06564395 … -0.1928437 0.19962357; … ; 0.025075339 -0.06016964 … 0.0838129 -0.11523932; 0.20085223 0.16679004 … 0.016495213 -0.1548977], Float32[0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 … 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], Float32[-0.21479565 0.0090183215 … -0.2022802 -0.19925424], Float32[0.0]])"
|
||||
]
|
||||
},
|
||||
"execution_count": 28,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"launch_params"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
"id": "eaad2871-54ed-4674-8405-d4ebb950851d",
|
||||
"metadata": {},
|
||||
"outputs": [],
|
||||
"source": []
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"kernelspec": {
|
||||
"display_name": "Julia 1.6.2",
|
||||
"language": "julia",
|
||||
"name": "julia-1.6"
|
||||
},
|
||||
"language_info": {
|
||||
"file_extension": ".jl",
|
||||
"mimetype": "application/julia",
|
||||
"name": "julia",
|
||||
"version": "1.6.2"
|
||||
}
|
||||
},
|
||||
"nbformat": 4,
|
||||
"nbformat_minor": 5
|
||||
}
|
||||
Binary file not shown.
Loading…
Reference in New Issue