You cannot select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
Orbits/Code/NeuralNetworkSpecifications.py

208 lines
8.3 KiB
Python

import torch
from torch.autograd.functional import jacobian
import itertools
import math
import abc
class EstimandInterface():
"""
This defines a clean interface for working with the estimand (i.e. thing we are trying to estimate).
In general, we are trying to estimate the choice variables and the partial derivatives of the value functions.
This
This class wraps output for the neural network (or other estimand), allowing me to
- easily substitute various types of launch functions by having a common interface
- this eases testing
- check dimensionality etc without dealing with randomness
- again, easing testing
- reason more cleanly about the component pieces
- easing programming
- provide a clean interface to find constellation level launch decisions etc.
It takes inputs of two general categories:
- the choice function results
- the partial derivatives of the value function
"""
def __init__(self, partials, choices, deorbits=None):
self.partials = partials
self.choices = choices
@property
def number_constellations(self):
pass #fix this
return self.choices.shape[-1]
@property
def number_states(self):
pass #fix this
return self.partials.shape[-1] #This depends on the debris trackers technically.
def choice_single(self, constellation):
#returns the launch decision for the constellation of interest
filter_tensor = torch.zeros(self.number_constellations)
filter_tensor[constellation] = 1.0
return self.choices @ filter_tensor
def choice_vector(self, constellation):
#returns the launch decision for the constellation of interest as a vector
filter_tensor = torch.zeros(self.number_constellations)
filter_tensor[constellation] = 1.0
return self.choices * filter_tensor
def partial_vector(self, constellation):
#returns the partials of the value function corresponding to the constellation of interest
filter_tensor = torch.zeros(self.number_states)
filter_tensor[constellation] = 1.0
return self.partials @ filter_tensor
def partial_matrix(self, constellation):
#returns the partials of the value function corresponding to
#the constellation of interest as a matrix
filter_tensor = torch.zeros(self.number_states)
filter_tensor[constellation] = 1.0
return self.partials * filter_tensor
def __str__(self):
#just a human readable descriptor
return "Launch Decisions and Partial Derivativs of value function with\n\tlaunches\n\t\t {}\n\tPartials\n\t\t{}".format(self.choices,self.partials)
class ChoiceFunction(torch.nn.Module):
"""
This is used to estimate the launch function
"""
def __init__(self
,batch_size
,number_states
,number_choices
,number_constellations
,layer_size=12
):
super().__init__()
#preprocess
self.preprocess = torch.nn.Linear(in_features=number_states, out_features=layer_size)
#upsample
self.upsample = lambda x: torch.nn.Upsample(scale_factor=number_constellations)(x).view(batch_size
,number_constellations
,layer_size)
self.relu = torch.nn.ReLU() #used for coersion to the state space we care about.
#sequential steps
self.sequential = torch.nn.Sequential(
torch.nn.Linear(in_features=layer_size, out_features=layer_size)
#who knows if a convolution might help here.
,torch.nn.Linear(in_features=layer_size, out_features=layer_size)
,torch.nn.Linear(in_features=layer_size, out_features=layer_size)
)
#reduce the feature axis to match expected results
self.feature_reduction = torch.nn.Linear(in_features=layer_size, out_features=number_choices)
def forward(self, input_values):
intermediate_values = self.relu(input_values) #states should be positive anyway.
intermediate_values = self.preprocess(intermediate_values)
intermediate_values = self.upsample(intermediate_values)
intermediate_values = self.sequential(intermediate_values)
intermediate_values = self.feature_reduction(intermediate_values)
intermediate_values = self.relu(intermediate_values) #launches are always positive, this may need removed for other types of choices.
return intermediate_values
class PartialDerivativesOfValueEstimand(torch.nn.Module):
"""
This is used to estimate the partial derivatives of the value functions
"""
def __init__(self
,batch_size
,number_constellations
,number_states
,layer_size=12):
super().__init__()
self.batch_size = batch_size #used for upscaling
self.number_constellations = number_constellations
self.number_states = number_states
self.layer_size = layer_size
#preprocess (single linear layer in case there is anything that needs to happen to all states)
self.preprocess = torch.nn.Sequential(
torch.nn.ReLU() #cleanup as states must be positive
,torch.nn.Linear(in_features = self.number_states, out_features=self.number_states)
)
#upsample to get the basic dimensionality correct. From (batch,State) to (batch, constellation, state). Includes a reshape
self.upsample = lambda x: torch.nn.Upsample(scale_factor=self.number_constellations)(x).view(self.batch_size
,self.number_constellations
,self.number_states)
#sequential steps
self.sequential = torch.nn.Sequential(
torch.nn.Linear(in_features=number_states, out_features=layer_size)
#who knows if a convolution or other layer type might help here.
,torch.nn.Linear(in_features=layer_size, out_features=layer_size)
,torch.nn.Linear(in_features=layer_size, out_features=layer_size)
)
#reduce the feature axis to match expected results
self.feature_reduction = torch.nn.Linear(in_features=layer_size, out_features=number_states)
def forward(self, states):
#Note that the input values are just going to be the state variables
#TODO:check that input values match the prepared dimension?
#preprocess
intermediate = self.preprocess(states)
#upscale the input values
intermediate = self.upsample(intermediate)
#intermediate processing
intermediate = self.sequential(intermediate)
#reduce feature axis to match the expected number of partials
intermediate = self.feature_reduction(intermediate)
return intermediate
class EstimandNN(torch.nn.Module):
"""
This neural network takes the current states as input values and returns both
the partial derivatives of the value function and the launch function.
"""
def __init__(self
,batch_size
,number_states
,number_choices
,number_constellations
,layer_size=12
):
super().__init__()
self.partials_estimator = PartialDerivativesOfValueEstimand(batch_size, number_constellations, number_states, layer_size)
self.launch_estimator = ChoiceFunction(batch_size, number_states, number_choices, number_constellations, layer_size)
def forward(self, input_values):
pass
partials = self.partials_estimator(input_values)
launch = self.launch_estimator(input_values)
return EstimandInterface(partials,launch)