You cannot select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
204 lines
5.9 KiB
Plaintext
204 lines
5.9 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 1,
|
|
"id": "working-peeing",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"import torch"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "decimal-boundary",
|
|
"metadata": {},
|
|
"source": [
|
|
"The purpose of this notebook is to allow me to investigate proper shaping of inputs.\n",
|
|
"\n",
|
|
"Typically pytorch chooses a tensor specification\n",
|
|
"$$\n",
|
|
"(N, .*)\n",
|
|
"$$\n",
|
|
"where $N$ is the batch size.\n",
|
|
"For example a Convolutional NN layer expects\n",
|
|
"$$\n",
|
|
" NCHW\n",
|
|
"$$\n",
|
|
"for BatchSize,ChannelSize,Height,Width.\n",
|
|
"On the other hand, Linear expects\n",
|
|
"$$\n",
|
|
" N.*H\n",
|
|
"$$\n",
|
|
"for BatchSize,any number of other dimensions, in_features\n",
|
|
"\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 15,
|
|
"id": "eligible-isolation",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"class PartialDerivativesEstimand(torch.nn.Module):\n",
|
|
" def __init__(self,batch_size, number_constellations, number_states,scale_factor=4, layer_size=12):\n",
|
|
" \"\"\"\n",
|
|
" \n",
|
|
" \"\"\"\n",
|
|
" super().__init__()\n",
|
|
" self.batch_size = batch_size\n",
|
|
" self.number_constellations = number_constellations\n",
|
|
" self.number_states = number_states\n",
|
|
" self.scale_factor = scale_factor\n",
|
|
" self.layer_size = layer_size\n",
|
|
" \n",
|
|
" \n",
|
|
" #preprocess (single linear layer in case there is anything that needs to happen to all states)\n",
|
|
" self.preprocess = torch.nn.Sequential(\n",
|
|
" torch.nn.ReLU() #cleanup as states must be positive\n",
|
|
" ,torch.nn.Linear(in_features = self.number_states, out_features=self.number_states)\n",
|
|
" )\n",
|
|
" #upscale to get the basic dimensionality correct. From (batch,State) to (batch, constellation, state). Includes a reshape\n",
|
|
" self.upsample = lambda x: torch.nn.Upsample(scale_factor=self.number_constellations)(x).view(self.batch_size\n",
|
|
" ,self.number_constellations\n",
|
|
" ,self.number_states)\n",
|
|
" \n",
|
|
" #sequential steps\n",
|
|
" self.sequential = torch.nn.Sequential(\n",
|
|
" torch.nn.Linear(in_features=number_states, out_features=layer_size)\n",
|
|
" #who knows if a convolution might help here.\n",
|
|
" ,torch.nn.Linear(in_features=layer_size, out_features=layer_size)\n",
|
|
" ,torch.nn.Linear(in_features=layer_size, out_features=layer_size)\n",
|
|
" )\n",
|
|
"\n",
|
|
" #reduce axis to match expectation\n",
|
|
" self.feature_reduction = torch.nn.Linear(in_features=layer_size, out_features=number_states)\n",
|
|
" \n",
|
|
" def forward(self, input_values):\n",
|
|
" #Note that the input values are just going to be the state variables\n",
|
|
" #TODO:check that input values match the prepared dimension?\n",
|
|
" \n",
|
|
" #preprocess\n",
|
|
" intermediate = self.preprocess(input_values)\n",
|
|
" \n",
|
|
" #upscale the input values\n",
|
|
" intermediate = self.upsample(intermediate)\n",
|
|
" \n",
|
|
" #intermediate processing\n",
|
|
" intermediate = self.sequential(intermediate)\n",
|
|
" \n",
|
|
" #reduce feature axis to match the expected number of partials\n",
|
|
" intermediate = self.feature_reduction(intermediate)\n",
|
|
" \n",
|
|
" \n",
|
|
" return intermediate"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 19,
|
|
"id": "literary-desktop",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"batch_size = 2\n",
|
|
"constellations = 2\n",
|
|
"number_states = constellations+1\n",
|
|
"\n",
|
|
"#initialize the NN\n",
|
|
"a = PartialDerivativesEstimand(batch_size,constellations,number_states,scale_factor=2)\n",
|
|
"\n",
|
|
"#example state\n",
|
|
"s = torch.rand(size=(batch_size,1,number_states))"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 22,
|
|
"id": "second-graduation",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"tensor([[[0.9283, 0.9414, 0.3426]],\n",
|
|
"\n",
|
|
" [[0.1902, 0.0369, 0.4699]]])"
|
|
]
|
|
},
|
|
"execution_count": 22,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"s"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 23,
|
|
"id": "reliable-alberta",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"tensor([[[-0.1991, 0.1335, 0.2821],\n",
|
|
" [-0.3549, 0.0213, 0.2322]],\n",
|
|
"\n",
|
|
" [[-0.1701, 0.1557, 0.2954],\n",
|
|
" [-0.3017, 0.0690, 0.2419]]], grad_fn=<AddBackward0>)"
|
|
]
|
|
},
|
|
"execution_count": 23,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"a(s)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "horizontal-judges",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": []
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "instant-lindsay",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": []
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "Python 3",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.8.8"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 5
|
|
}
|