You cannot select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
391 lines
8.8 KiB
Plaintext
391 lines
8.8 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 1,
|
|
"id": "dense-italic",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"import torch\n",
|
|
"import numpy\n"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 2,
|
|
"id": "adult-cargo",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"a = torch.tensor([2., 3.], requires_grad=True)\n",
|
|
"b = torch.tensor([6., 4.], requires_grad=True)\n",
|
|
"c = torch.tensor([2., 3.], requires_grad=False)\n",
|
|
"d = torch.tensor([6., 4.], requires_grad=False)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 3,
|
|
"id": "photographic-miniature",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def f(a,b):\n",
|
|
" return a**3+a**4 + a*b**2 -b**2 -b**5"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "charming-plate",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": []
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 4,
|
|
"id": "fitting-horizontal",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"a.grad"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 5,
|
|
"id": "secret-oasis",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"b.grad"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 6,
|
|
"id": "comic-context",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"\n",
|
|
"Begin demo \n",
|
|
"\n",
|
|
"x = tensor([4.], requires_grad=True)\n",
|
|
"y = tensor([29.], grad_fn=<AddBackward0>)\n",
|
|
"\n",
|
|
"df = <AddBackward0 object at 0x7fd45460eb20>\n",
|
|
"\n",
|
|
"gradient of func(x) = \n",
|
|
"tensor([11.])\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"# gradient_demo.py\n",
|
|
"\n",
|
|
"import torch as T\n",
|
|
"device = T.device(\"cpu\")\n",
|
|
"\n",
|
|
"def some_func(x):\n",
|
|
" result = (x * x) + (3 * x) + 1\n",
|
|
" return result\n",
|
|
"\n",
|
|
"def main():\n",
|
|
" print(\"\\nBegin demo \\n\")\n",
|
|
"\n",
|
|
" x = T.tensor([4.0], dtype=T.float32,\n",
|
|
" requires_grad=True).to(device)\n",
|
|
" y = some_func(x)\n",
|
|
"\n",
|
|
" print(\"x = \" + str(x))\n",
|
|
" print(\"y = \" + str(y))\n",
|
|
" print(\"\")\n",
|
|
"\n",
|
|
" df = y.grad_fn\n",
|
|
" print(\"df = \" + str(df))\n",
|
|
" print(\"\")\n",
|
|
"\n",
|
|
" y.backward() # compute grad of some_func(4)\n",
|
|
" print(\"gradient of func(x) = \")\n",
|
|
" print(x.grad) # 2(4) + 3 = 11\n",
|
|
"\n",
|
|
"if __name__ == \"__main__\":\n",
|
|
" main()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "chinese-family",
|
|
"metadata": {},
|
|
"source": [
|
|
"# Try this\n",
|
|
" - define a bellman like function \n",
|
|
" - use `def` \n",
|
|
" - Find the derivatives with respect to X\n",
|
|
" - Find the derivatives with respect to $\\theta$\n",
|
|
" - Invert this to find the time-transition function?\n",
|
|
" - Inversion is probably impossible. Instead, solve both the policy and value of derivatives together?\n",
|
|
" \n",
|
|
" \n",
|
|
"## Math for conditions\n",
|
|
"### Optimality\n",
|
|
"Stationarity and Complementary Slackness conditions\n",
|
|
"$$\n",
|
|
"0 = \\frac{\\partial F}{\\partial x} + \\beta \\frac{\\partial G}{\\partial x} \\frac{\\partial V}{\\partial G} + \\lambda \\\\\n",
|
|
"0 = \\lambda \\cdot g(x,\\theta) \\\n",
|
|
"$$\n",
|
|
"### Envelope\n",
|
|
"$$\n",
|
|
"0 = \\frac{\\partial F}{\\partial \\theta} + \\frac{\\partial G}{\\partial \\theta} \\frac{\\partial V}{\\partial G} - \\frac{\\partial V}{\\partial \\theta}\n",
|
|
"$$\n",
|
|
"\n",
|
|
"So, how do you incorporate the situation where you have to iterate multiple times?\n",
|
|
" - Just add conditions as rows?\n",
|
|
" - Solve and substitute using some theorem on the inverse of derivatives in multivariate systems?\n",
|
|
" \n",
|
|
"## Thoughts on solution\n",
|
|
"You can find $\\frac{\\partial G}{\\partial \\theta}$ through a direct construction maybe?\n",
|
|
" - This involves\n",
|
|
" - Setting up each scalar element of $G$, and then differentiating\n",
|
|
" - These would then need to be reassembled into a matrix\n",
|
|
" - Pros\n",
|
|
" - Will work\n",
|
|
" - Cons\n",
|
|
" "
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 7,
|
|
"id": "exceptional-amount",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def utility(c,a=2):\n",
|
|
" return (c**(1-a))/(1-a)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 8,
|
|
"id": "biblical-convertible",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"(tensor([[0.0156, 0.0000],\n",
|
|
" [0.0000, 0.0123]]),\n",
|
|
" tensor([[0.0056, 0.0000],\n",
|
|
" [0.0000, 0.0177]]))"
|
|
]
|
|
},
|
|
"execution_count": 8,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"torch.autograd.functional.jacobian(utility, (a,b))"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 9,
|
|
"id": "classified-crisis",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"tensor([10., 15.], grad_fn=<MulBackward0>)"
|
|
]
|
|
},
|
|
"execution_count": 9,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"def complicated(c):\n",
|
|
" return c.sum()*c\n",
|
|
"\n",
|
|
"complicated(a)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 10,
|
|
"id": "charged-locator",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"tensor([[7., 2.],\n",
|
|
" [3., 8.]], grad_fn=<ViewBackward>)"
|
|
]
|
|
},
|
|
"execution_count": 10,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"torch.autograd.functional.jacobian(complicated, a, create_graph=True)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 11,
|
|
"id": "cheap-necessity",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"tensor([50., 50.], grad_fn=<AddBackward0>)"
|
|
]
|
|
},
|
|
"execution_count": 11,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"def more_complicated(c,d):\n",
|
|
" return c.sum()*d + d.sum()*c\n",
|
|
"more_complicated(a,b)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 12,
|
|
"id": "flexible-nightlife",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"(tensor([[16., 6.],\n",
|
|
" [ 4., 14.]], grad_fn=<ViewBackward>),\n",
|
|
" tensor([[7., 2.],\n",
|
|
" [3., 8.]], grad_fn=<ViewBackward>))"
|
|
]
|
|
},
|
|
"execution_count": 12,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"x = torch.autograd.functional.jacobian(more_complicated, (a,b), create_graph=True)\n",
|
|
"x"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 13,
|
|
"id": "heavy-duration",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def states(theta,x,c,d):\n",
|
|
" return (theta + x*c)@d * d"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 14,
|
|
"id": "quantitative-organ",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"tensor([864., 576.], grad_fn=<MulBackward0>)"
|
|
]
|
|
},
|
|
"execution_count": 14,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"states(a,b,c,d)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 16,
|
|
"id": "transparent-cartridge",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"(tensor([[36., 24.],\n",
|
|
" [24., 16.]], grad_fn=<ViewBackward>),\n",
|
|
" tensor([[72., 72.],\n",
|
|
" [48., 48.]], grad_fn=<ViewBackward>),\n",
|
|
" tensor([[216., 96.],\n",
|
|
" [144., 64.]], grad_fn=<ViewBackward>),\n",
|
|
" tensor([[228., 90.],\n",
|
|
" [ 56., 204.]], grad_fn=<ViewBackward>))"
|
|
]
|
|
},
|
|
"execution_count": 16,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"torch.autograd.functional.jacobian(states, (a,b,c,d), create_graph=True)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "adjusted-saskatchewan",
|
|
"metadata": {},
|
|
"source": [
|
|
"So, I think I can construct a gradient, and possibly invert it/choose some other solution method."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "outdoor-functionality",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": []
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "Python 3",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.8.8"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 5
|
|
}
|