You cannot select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
595 lines
19 KiB
Plaintext
595 lines
19 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "electric-scratch",
|
|
"metadata": {},
|
|
"source": [
|
|
"Note on pytorch. NN optimization acts imperitively/by side effect as follows.\n",
|
|
" - Define model\n",
|
|
" - loop\n",
|
|
" - Calculate loss\n",
|
|
" - Zero gradients\n",
|
|
" - backprop to model\n",
|
|
" - check conditions for exit\n",
|
|
" - report diagnostics\n",
|
|
" - disect results\n",
|
|
" \n",
|
|
" \n",
|
|
"## Split result from NN\n",
|
|
"Goal is to train the NN and then get a couple of outputs at the end that can be used to split between value function partials and launch functions."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 1,
|
|
"id": "outdoor-essay",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"import torch\n",
|
|
"import combined as c"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 2,
|
|
"id": "played-reward",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"outputs": [],
|
|
"source": [
|
|
"class DoubleNetwork(torch.nn.Module):\n",
|
|
" def __init__(self, input_size,output_size,layers_size):\n",
|
|
" super().__init__()\n",
|
|
" \n",
|
|
" #So, this next section constructs different layers within the NN\n",
|
|
" #sinlge linear section\n",
|
|
" self.linear_step_1a = torch.nn.Linear(input_size,layers_size)\n",
|
|
" \n",
|
|
" #single linear section\n",
|
|
" self.linear_step_2a = torch.nn.Linear(layers_size,output_size)\n",
|
|
" self.linear_step_2b = torch.nn.Linear(layers_size,output_size)\n",
|
|
" \n",
|
|
" def forward(self, input_values):\n",
|
|
" \n",
|
|
" intermediate_values_a = self.linear_step_1a(input_values)\n",
|
|
" \n",
|
|
" out_values_a = self.linear_step_2a(intermediate_values_a)\n",
|
|
" out_values_b = self.linear_step_2b(intermediate_values_a)\n",
|
|
" \n",
|
|
" return out_values_a,out_values_b"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 3,
|
|
"id": "tribal-manor",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"\n",
|
|
" tensor(4.8121, grad_fn=<AddBackward0>)\n",
|
|
"\n",
|
|
" tensor(17.3775, grad_fn=<AddBackward0>)\n",
|
|
"\n",
|
|
" tensor(30.0737, grad_fn=<AddBackward0>)\n",
|
|
"\n",
|
|
" tensor(0.6026, grad_fn=<AddBackward0>)\n",
|
|
"\n",
|
|
" tensor(0.3996, grad_fn=<AddBackward0>)\n",
|
|
"\n",
|
|
" tensor(0.3020, grad_fn=<AddBackward0>)\n",
|
|
"\n",
|
|
" tensor(0.2092, grad_fn=<AddBackward0>)\n",
|
|
"\n",
|
|
" tensor(0.1412, grad_fn=<AddBackward0>)\n",
|
|
"\n",
|
|
" tensor(0.0893, grad_fn=<AddBackward0>)\n",
|
|
"\n",
|
|
" tensor(0.0561, grad_fn=<AddBackward0>)\n",
|
|
"\n",
|
|
" tensor(0.0341, grad_fn=<AddBackward0>)\n",
|
|
"\n",
|
|
" tensor(0.0208, grad_fn=<AddBackward0>)\n",
|
|
"\n",
|
|
" tensor(0.0125, grad_fn=<AddBackward0>)\n",
|
|
"\n",
|
|
" tensor(0.0075, grad_fn=<AddBackward0>)\n",
|
|
"\n",
|
|
" tensor(0.0045, grad_fn=<AddBackward0>)\n",
|
|
"\n",
|
|
" tensor(0.0027, grad_fn=<AddBackward0>)\n",
|
|
"\n",
|
|
" tensor(0.0016, grad_fn=<AddBackward0>)\n",
|
|
"\n",
|
|
" tensor(0.0010, grad_fn=<AddBackward0>)\n",
|
|
"\n",
|
|
" tensor(0.0006, grad_fn=<AddBackward0>)\n",
|
|
"\n",
|
|
" tensor(0.0003, grad_fn=<AddBackward0>)\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"model = DoubleNetwork(input_size = 5, output_size=5, layers_size=15)\n",
|
|
"\n",
|
|
"data_in = torch.tensor([1.5,2,3,4,5])\n",
|
|
"\n",
|
|
"data_in\n",
|
|
"\n",
|
|
"target = torch.zeros(5)\n",
|
|
"\n",
|
|
"def loss_fn2(output,target):\n",
|
|
" return sum((output[1] +output[0] - target)**2)\n",
|
|
" #could add a simplicity assumption i.e. l1 on parameters.\n",
|
|
"\n",
|
|
"#Prep Optimizer\n",
|
|
"optimizer = torch.optim.SGD(model.parameters(),lr=0.01)\n",
|
|
"\n",
|
|
"for i in range(20):\n",
|
|
" #training loop\n",
|
|
" optimizer.zero_grad()\n",
|
|
"\n",
|
|
" output = model.forward(data_in)\n",
|
|
" output\n",
|
|
"\n",
|
|
" l = loss_fn2(output, target)\n",
|
|
"\n",
|
|
" l.backward()\n",
|
|
"\n",
|
|
" optimizer.step()\n",
|
|
"\n",
|
|
" print(\"\\n\",l)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 4,
|
|
"id": "uniform-union",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"class SplitNetwork(torch.nn.Module):\n",
|
|
" def __init__(self, input_size,output_size_a,output_size_b,layers_size):\n",
|
|
" super().__init__()\n",
|
|
" \n",
|
|
" #So, this next section constructs different layers within the NN\n",
|
|
" #sinlge linear section\n",
|
|
" self.linear_step_1 = torch.nn.Linear(input_size,layers_size)\n",
|
|
" self.linear_step_2 = torch.nn.Linear(layers_size,layers_size)\n",
|
|
" self.linear_step_3 = torch.nn.Linear(layers_size,layers_size)\n",
|
|
" self.linear_step_4 = torch.nn.Linear(layers_size,layers_size)\n",
|
|
" \n",
|
|
" #single linear section\n",
|
|
" self.linear_step_split_a = torch.nn.Linear(layers_size,output_size_a)\n",
|
|
" self.linear_step_split_b = torch.nn.Linear(layers_size,output_size_b)\n",
|
|
" \n",
|
|
" def forward(self, input_values):\n",
|
|
" \n",
|
|
" intermediate_values = self.linear_step_1(input_values)\n",
|
|
" intermediate_values = self.linear_step_2(intermediate_values)\n",
|
|
" intermediate_values = self.linear_step_3(intermediate_values)\n",
|
|
" intermediate_values = self.linear_step_4(intermediate_values)\n",
|
|
" \n",
|
|
" out_values_a = self.linear_step_split_a(intermediate_values)\n",
|
|
" out_values_b = self.linear_step_split_b(intermediate_values)\n",
|
|
" \n",
|
|
" return out_values_a,out_values_b"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 5,
|
|
"id": "grand-vietnamese",
|
|
"metadata": {
|
|
"tags": []
|
|
},
|
|
"outputs": [
|
|
{
|
|
"ename": "NameError",
|
|
"evalue": "name 'loss_fn3' is not defined",
|
|
"output_type": "error",
|
|
"traceback": [
|
|
"\u001b[0;31m---------------------------------------\u001b[0m",
|
|
"\u001b[0;31mNameError\u001b[0mTraceback (most recent call last)",
|
|
"\u001b[0;32m<ipython-input-5-4ecf63ceaaa2>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0moutput\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 11\u001b[0;31m \u001b[0ml\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mloss_fn3\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0moutput\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget_a\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtarget_b\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 12\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 13\u001b[0m \u001b[0ml\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
|
"\u001b[0;31mNameError\u001b[0m: name 'loss_fn3' is not defined"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"#Prep Optimizer\n",
|
|
"optimizer = torch.optim.SGD(model.parameters(),lr=0.01)\n",
|
|
"\n",
|
|
"for i in range(25):\n",
|
|
" #training loop\n",
|
|
" optimizer.zero_grad()\n",
|
|
"\n",
|
|
" output = model.forward(data_in)\n",
|
|
" output\n",
|
|
"\n",
|
|
" l = loss_fn3(output, target_a, target_b)\n",
|
|
"\n",
|
|
" l.backward()\n",
|
|
"\n",
|
|
" optimizer.step()\n",
|
|
"\n",
|
|
" print(\"\\n\",l)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "settled-maple",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": []
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 6,
|
|
"id": "appointed-sandwich",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"#This is a custom upscale module.\n",
|
|
"class PartialDerivativesEstimand(torch.nn.Module):\n",
|
|
" def __init__(self, state_tensor_size,layers_size,number_constellations):\n",
|
|
" \"\"\"\n",
|
|
" Description\n",
|
|
" \"\"\"\n",
|
|
" super().__init__()\n",
|
|
" self.number_constellations = number_constellations\n",
|
|
" \n",
|
|
" #Scale up the input from just the tensor of states to the layer_size X number_constellations\n",
|
|
" \n",
|
|
" #Increase to the layer size\n",
|
|
" self.linear_step_1 = torch.nn.Linear(in_features=state_tensor_size, out_features=layers_size)\n",
|
|
" #Upscale the tensor to be able to estimate for each constellation\n",
|
|
" #TODO: change to the standard upscaler\n",
|
|
" self.upscale_step = lambda x: torch.nn.functional.interpolate(x, scale_factor=self.number_constellations).view(x.numel(), self.number_constellations)\n",
|
|
" \n",
|
|
" #start adding useful layers (start small).\n",
|
|
" self.relu_3 = torch.nn.ReLU()\n",
|
|
" self.relu_4 = torch.nn.ReLU() #TODO:swap to linear or something like that.\n",
|
|
" #TODO:downscale to match the proper output values\n",
|
|
" \n",
|
|
" def forward(self, input_values):\n",
|
|
" \n",
|
|
" intermediate_values = self.linear_step_1(input_values)\n",
|
|
" intermediate_values = self.upscale_step(intermediate_values)\n",
|
|
" intermediate_values = self.relu_3(intermediate_values)\n",
|
|
" \n",
|
|
" intermediate_values = self.relu_4(intermediate_values)\n",
|
|
" \n",
|
|
" return intermediate_values"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 7,
|
|
"id": "complete-gather",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"tensor([[0.0000, 0.0000, 0.0000],\n",
|
|
" [0.4215, 0.4215, 0.4215],\n",
|
|
" [0.5668, 0.5668, 0.5668],\n",
|
|
" [0.0000, 0.0000, 0.0000],\n",
|
|
" [0.0675, 0.0675, 0.0675],\n",
|
|
" [1.8888, 1.8888, 1.8888],\n",
|
|
" [0.0000, 0.0000, 0.0000],\n",
|
|
" [0.0000, 0.0000, 0.0000],\n",
|
|
" [0.0000, 0.0000, 0.0000],\n",
|
|
" [0.0000, 0.0000, 0.0000],\n",
|
|
" [0.0000, 0.0000, 0.0000],\n",
|
|
" [0.0000, 0.0000, 0.0000]], grad_fn=<ReluBackward0>)"
|
|
]
|
|
},
|
|
"execution_count": 7,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"nn = PartialDerivativesEstimand(3,12,3)\n",
|
|
"\n",
|
|
"test = torch.tensor([[[1.0,3,4]]])\n",
|
|
"\n",
|
|
"t = nn.forward(test)\n",
|
|
"t"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 8,
|
|
"id": "iraqi-italic",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"tensor(30.5458, grad_fn=<AddBackward0>)\n",
|
|
"tensor(26.5162, grad_fn=<AddBackward0>)\n",
|
|
"tensor(24.9672, grad_fn=<AddBackward0>)\n",
|
|
"tensor(24.3718, grad_fn=<AddBackward0>)\n",
|
|
"tensor(24.1429, grad_fn=<AddBackward0>)\n",
|
|
"tensor(24.0549, grad_fn=<AddBackward0>)\n",
|
|
"tensor(24.0211, grad_fn=<AddBackward0>)\n",
|
|
"tensor(24.0081, grad_fn=<AddBackward0>)\n",
|
|
"tensor(24.0031, grad_fn=<AddBackward0>)\n",
|
|
"tensor(24.0012, grad_fn=<AddBackward0>)\n"
|
|
]
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"tensor([[0.0000, 0.0000, 0.0000],\n",
|
|
" [0.9951, 0.9951, 0.9951],\n",
|
|
" [0.9964, 0.9964, 0.9964],\n",
|
|
" [0.0000, 0.0000, 0.0000],\n",
|
|
" [0.9922, 0.9922, 0.9922],\n",
|
|
" [1.0075, 1.0075, 1.0075],\n",
|
|
" [0.0000, 0.0000, 0.0000],\n",
|
|
" [0.0000, 0.0000, 0.0000],\n",
|
|
" [0.0000, 0.0000, 0.0000],\n",
|
|
" [0.0000, 0.0000, 0.0000],\n",
|
|
" [0.0000, 0.0000, 0.0000],\n",
|
|
" [0.0000, 0.0000, 0.0000]], grad_fn=<ReluBackward0>)"
|
|
]
|
|
},
|
|
"execution_count": 8,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"#Prep Optimizer\n",
|
|
"optimizer = torch.optim.SGD(nn.parameters(),lr=0.01)\n",
|
|
"\n",
|
|
"#get loss function\n",
|
|
"def loss_fn4(output):\n",
|
|
" return sum(sum((1-output)**2))\n",
|
|
"\n",
|
|
"for i in range(10):\n",
|
|
" #training loop\n",
|
|
" optimizer.zero_grad()\n",
|
|
"\n",
|
|
" output = nn.forward(test)\n",
|
|
"\n",
|
|
" l = loss_fn4(output)\n",
|
|
"\n",
|
|
" l.backward()\n",
|
|
"\n",
|
|
" optimizer.step()\n",
|
|
"\n",
|
|
" print(l)\n",
|
|
"\n",
|
|
"nn.forward(test)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 9,
|
|
"id": "adaptive-period",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"#This is a custom upscale module.\n",
|
|
"class LaunchFnEstimand(torch.nn.Module):\n",
|
|
" def __init__(self, state_tensor_size,layers_size,number_constellations):\n",
|
|
" \"\"\"\n",
|
|
" Description\n",
|
|
" \"\"\"\n",
|
|
" super().__init__()\n",
|
|
" self.number_constellations = number_constellations\n",
|
|
" \n",
|
|
" #Scale up the input from just the tensor of states to the layer_size X number_constellations\n",
|
|
" \n",
|
|
" #Increase to the layer size\n",
|
|
" self.linear_1 = torch.nn.Linear(in_features=state_tensor_size, out_features=layers_size)\n",
|
|
" self.relu = torch.nn.ReLU()\n",
|
|
" self.linear_3 = torch.nn.Linear(in_features=layers_size, out_features=layers_size)\n",
|
|
" self.linear_5 = torch.nn.Linear(in_features=layers_size, out_features=number_constellations)\n",
|
|
"\n",
|
|
" \n",
|
|
" def forward(self, input_values):\n",
|
|
" \n",
|
|
" intermediate_values = self.linear_1(input_values)\n",
|
|
" intermediate_values = self.relu(intermediate_values)\n",
|
|
" intermediate_values = self.linear_3(intermediate_values)\n",
|
|
" intermediate_values = self.relu(intermediate_values)\n",
|
|
" intermediate_values = self.linear_5(intermediate_values)\n",
|
|
" intermediate_values = self.relu(intermediate_values) #launches are always positive\n",
|
|
" \n",
|
|
" return intermediate_values"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 10,
|
|
"id": "northern-vault",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"tensor(0.0315, grad_fn=<AddBackward0>)\n",
|
|
"tensor(0.0241, grad_fn=<AddBackward0>)\n",
|
|
"tensor(0.0184, grad_fn=<AddBackward0>)\n",
|
|
"tensor(0.0141, grad_fn=<AddBackward0>)\n",
|
|
"tensor(0.0107, grad_fn=<AddBackward0>)\n",
|
|
"tensor(0.0082, grad_fn=<AddBackward0>)\n",
|
|
"tensor(0.0062, grad_fn=<AddBackward0>)\n",
|
|
"tensor(0.0048, grad_fn=<AddBackward0>)\n",
|
|
"tensor(0.0036, grad_fn=<AddBackward0>)\n",
|
|
"tensor(0.0028, grad_fn=<AddBackward0>)\n"
|
|
]
|
|
},
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"tensor([[[0.0457, 0.0000]]], grad_fn=<ReluBackward0>)"
|
|
]
|
|
},
|
|
"execution_count": 10,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"launch = LaunchFnEstimand(3,12,2)\n",
|
|
"\n",
|
|
"#Prep Optimizer\n",
|
|
"optimizer = torch.optim.SGD(launch.parameters(),lr=0.01)\n",
|
|
"\n",
|
|
"#get loss function\n",
|
|
"def loss_fn5(output):\n",
|
|
" return sum(sum(sum((output)**2)))\n",
|
|
"\n",
|
|
"for i in range(10):\n",
|
|
" #training loop\n",
|
|
" optimizer.zero_grad()\n",
|
|
"\n",
|
|
" output = launch.forward(test)\n",
|
|
"\n",
|
|
" l = loss_fn5(output)\n",
|
|
"\n",
|
|
" l.backward()\n",
|
|
"\n",
|
|
" optimizer.step()\n",
|
|
"\n",
|
|
" print(l)\n",
|
|
" \n",
|
|
"\n",
|
|
"launch.forward(test)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 11,
|
|
"id": "sensitive-pennsylvania",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"tensor([[[0.0457, 0.0000]]], grad_fn=<ReluBackward0>)"
|
|
]
|
|
},
|
|
"execution_count": 11,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"launch(test)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 12,
|
|
"id": "utility-giant",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"class EstimandNN(torch.nn.Module):\n",
|
|
" def __init__(self, state_tensor_size,layers_size,number_constellations):\n",
|
|
" super().__init__()\n",
|
|
" \n",
|
|
" #So, this next section constructs different layers within the NN\n",
|
|
" #sinlge linear section\n",
|
|
" \n",
|
|
" self.partials_estimator = PartialDerivativesEstimand(state_tensor_size,layers_size,number_constellations)\n",
|
|
" self.launch_estimator = LaunchFnEstimand(state_tensor_size,layers_size,number_constellations)\n",
|
|
" \n",
|
|
" def forward(self, input_values):\n",
|
|
" partials = self.partials_estimator(input_values)\n",
|
|
" launch = self.launch_estimator(input_values)\n",
|
|
" return c.EstimandInterface(partials,launch)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 13,
|
|
"id": "molecular-factory",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"enn = EstimandNN(3,12,2)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 15,
|
|
"id": "artistic-washer",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"Launch Decisions and Partial Derivativs of value function with\n",
|
|
"\tlaunches\n",
|
|
"\t\t tensor([[[0.0000, 0.0020]]], grad_fn=<ReluBackward0>)\n",
|
|
"\tPartials\n",
|
|
"\t\ttensor([[0.0000, 0.0000],\n",
|
|
" [1.7938, 1.7938],\n",
|
|
" [0.0000, 0.0000],\n",
|
|
" [2.8751, 2.8751],\n",
|
|
" [1.4894, 1.4894],\n",
|
|
" [1.4614, 1.4614],\n",
|
|
" [0.0000, 0.0000],\n",
|
|
" [2.9800, 2.9800],\n",
|
|
" [0.0000, 0.0000],\n",
|
|
" [0.0000, 0.0000],\n",
|
|
" [0.0000, 0.0000],\n",
|
|
" [0.0000, 0.0000]], grad_fn=<ReluBackward0>)\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"print(enn(test))"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "purple-filling",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": []
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "Python 3",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.8.8"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 5
|
|
}
|