You cannot select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
500 lines
11 KiB
Plaintext
500 lines
11 KiB
Plaintext
{
|
|
"cells": [
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 25,
|
|
"id": "meaningful-piece",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"import torch\n",
|
|
"import numpy\n",
|
|
"from torch.autograd.functional import jacobian"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 2,
|
|
"id": "prescribed-roller",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"a = torch.tensor([2., 3.], requires_grad=True)\n",
|
|
"b = torch.tensor([6., 4.], requires_grad=True)\n",
|
|
"c = torch.tensor([2., 3.], requires_grad=False)\n",
|
|
"d = torch.tensor([6., 4.], requires_grad=False)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 3,
|
|
"id": "amateur-taste",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def f(a,b):\n",
|
|
" return a**3+a**4 + a*b**2 -b**2 -b**5"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "accepted-bonus",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": []
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 4,
|
|
"id": "burning-peeing",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"a.grad"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 5,
|
|
"id": "special-hydrogen",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"b.grad"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 6,
|
|
"id": "played-fairy",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"name": "stdout",
|
|
"output_type": "stream",
|
|
"text": [
|
|
"\n",
|
|
"Begin demo \n",
|
|
"\n",
|
|
"x = tensor([4.], requires_grad=True)\n",
|
|
"y = tensor([29.], grad_fn=<AddBackward0>)\n",
|
|
"\n",
|
|
"df = <AddBackward0 object at 0x7fd45460eb20>\n",
|
|
"\n",
|
|
"gradient of func(x) = \n",
|
|
"tensor([11.])\n"
|
|
]
|
|
}
|
|
],
|
|
"source": [
|
|
"# gradient_demo.py\n",
|
|
"\n",
|
|
"import torch as T\n",
|
|
"device = T.device(\"cpu\")\n",
|
|
"\n",
|
|
"def some_func(x):\n",
|
|
" result = (x * x) + (3 * x) + 1\n",
|
|
" return result\n",
|
|
"\n",
|
|
"def main():\n",
|
|
" print(\"\\nBegin demo \\n\")\n",
|
|
"\n",
|
|
" x = T.tensor([4.0], dtype=T.float32,\n",
|
|
" requires_grad=True).to(device)\n",
|
|
" y = some_func(x)\n",
|
|
"\n",
|
|
" print(\"x = \" + str(x))\n",
|
|
" print(\"y = \" + str(y))\n",
|
|
" print(\"\")\n",
|
|
"\n",
|
|
" df = y.grad_fn\n",
|
|
" print(\"df = \" + str(df))\n",
|
|
" print(\"\")\n",
|
|
"\n",
|
|
" y.backward() # compute grad of some_func(4)\n",
|
|
" print(\"gradient of func(x) = \")\n",
|
|
" print(x.grad) # 2(4) + 3 = 11\n",
|
|
"\n",
|
|
"if __name__ == \"__main__\":\n",
|
|
" main()"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "scientific-egypt",
|
|
"metadata": {},
|
|
"source": [
|
|
"# Try this\n",
|
|
" - define a bellman like function \n",
|
|
" - use `def` \n",
|
|
" - Find the derivatives with respect to X\n",
|
|
" - Find the derivatives with respect to $\\theta$\n",
|
|
" - Invert this to find the time-transition function?\n",
|
|
" - Inversion is probably impossible. Instead, solve both the policy and value of derivatives together?\n",
|
|
" \n",
|
|
" \n",
|
|
"## Math for conditions\n",
|
|
"### Optimality\n",
|
|
"Stationarity and Complementary Slackness conditions\n",
|
|
"$$\n",
|
|
"0 = \\frac{\\partial F}{\\partial x} + \\beta \\frac{\\partial G}{\\partial x} \\frac{\\partial V}{\\partial G} + \\lambda \\\\\n",
|
|
"0 = \\lambda \\cdot g(x,\\theta) \\\n",
|
|
"$$\n",
|
|
"### Envelope\n",
|
|
"$$\n",
|
|
"0 = \\frac{\\partial F}{\\partial \\theta} + \\frac{\\partial G}{\\partial \\theta} \\frac{\\partial V}{\\partial G} - \\frac{\\partial V}{\\partial \\theta}\n",
|
|
"$$\n",
|
|
"\n",
|
|
"So, how do you incorporate the situation where you have to iterate multiple times?\n",
|
|
" - Just add conditions as rows?\n",
|
|
" - Solve and substitute using some theorem on the inverse of derivatives in multivariate systems?\n",
|
|
" \n",
|
|
"## Thoughts on solution\n",
|
|
"You can find $\\frac{\\partial G}{\\partial \\theta}$ through a direct construction maybe?\n",
|
|
" - This involves\n",
|
|
" - Setting up each scalar element of $G$, and then differentiating\n",
|
|
" - These would then need to be reassembled into a matrix\n",
|
|
" - Pros\n",
|
|
" - Will work\n",
|
|
" - Cons\n",
|
|
" "
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 7,
|
|
"id": "flexible-hotel",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def utility(c,a=2):\n",
|
|
" return (c**(1-a))/(1-a)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 8,
|
|
"id": "floating-alert",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"(tensor([[0.0156, 0.0000],\n",
|
|
" [0.0000, 0.0123]]),\n",
|
|
" tensor([[0.0056, 0.0000],\n",
|
|
" [0.0000, 0.0177]]))"
|
|
]
|
|
},
|
|
"execution_count": 8,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"torch.autograd.functional.jacobian(utility, (a,b))"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 9,
|
|
"id": "historical-elevation",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"tensor([10., 15.], grad_fn=<MulBackward0>)"
|
|
]
|
|
},
|
|
"execution_count": 9,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"def complicated(c):\n",
|
|
" return c.sum()*c\n",
|
|
"\n",
|
|
"complicated(a)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 10,
|
|
"id": "proud-brake",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"tensor([[7., 2.],\n",
|
|
" [3., 8.]], grad_fn=<ViewBackward>)"
|
|
]
|
|
},
|
|
"execution_count": 10,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"torch.autograd.functional.jacobian(complicated, a, create_graph=True)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 11,
|
|
"id": "protective-grave",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"tensor([50., 50.], grad_fn=<AddBackward0>)"
|
|
]
|
|
},
|
|
"execution_count": 11,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"def more_complicated(c,d):\n",
|
|
" return c.sum()*d + d.sum()*c\n",
|
|
"more_complicated(a,b)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 12,
|
|
"id": "coated-guard",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"(tensor([[16., 6.],\n",
|
|
" [ 4., 14.]], grad_fn=<ViewBackward>),\n",
|
|
" tensor([[7., 2.],\n",
|
|
" [3., 8.]], grad_fn=<ViewBackward>))"
|
|
]
|
|
},
|
|
"execution_count": 12,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"x = torch.autograd.functional.jacobian(more_complicated, (a,b), create_graph=True)\n",
|
|
"x"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 13,
|
|
"id": "competitive-acting",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def states(theta,x,c,d):\n",
|
|
" return (theta + x*c)@d * d"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 14,
|
|
"id": "statutory-starter",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"tensor([864., 576.], grad_fn=<MulBackward0>)"
|
|
]
|
|
},
|
|
"execution_count": 14,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"states(a,b,c,d)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 16,
|
|
"id": "engaged-teddy",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"(tensor([[36., 24.],\n",
|
|
" [24., 16.]], grad_fn=<ViewBackward>),\n",
|
|
" tensor([[72., 72.],\n",
|
|
" [48., 48.]], grad_fn=<ViewBackward>),\n",
|
|
" tensor([[216., 96.],\n",
|
|
" [144., 64.]], grad_fn=<ViewBackward>),\n",
|
|
" tensor([[228., 90.],\n",
|
|
" [ 56., 204.]], grad_fn=<ViewBackward>))"
|
|
]
|
|
},
|
|
"execution_count": 16,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"torch.autograd.functional.jacobian(states, (a,b,c,d), create_graph=True)"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "stretch-mercury",
|
|
"metadata": {},
|
|
"source": [
|
|
"So, I think I can construct a gradient, and possibly invert it/choose some other solution method."
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "markdown",
|
|
"id": "disciplinary-opposition",
|
|
"metadata": {},
|
|
"source": [
|
|
"basic transition functions\n",
|
|
"$$\n",
|
|
"s^i(s,D)\\prime = (1-D^{\\sum s}) s^i + x\\\\\n",
|
|
"D\\prime = (1-\\delta)D + D^{\\alpha} +\\gamma D^{\\sum s} +\\Gamma x\n",
|
|
"$$"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 54,
|
|
"id": "missing-robinson",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": [
|
|
"def satellite_loss(d,s):\n",
|
|
" return d**(s.sum())\n",
|
|
"\n",
|
|
"def gs(d,s,x):\n",
|
|
" return (1-d**(s.sum()))*s + x\n",
|
|
"\n",
|
|
"#Parameters\n",
|
|
"gamma, alpah, Gamma, delta = 0.1, 4000, 0.001, 0.1\n",
|
|
"\n",
|
|
"def gd(d,s,x):\n",
|
|
" return d*(1-delta) + gamma *s.sum()*d**(s.sum()) + Gamma*x"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 51,
|
|
"id": "driving-wright",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"(tensor([7.9941, 6.9961], grad_fn=<AddBackward0>),\n",
|
|
" tensor([6., 4.], requires_grad=True),\n",
|
|
" tensor([2., 3.], requires_grad=True))"
|
|
]
|
|
},
|
|
"execution_count": 51,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"D = torch.Tensor([0.5])\n",
|
|
"gs(D,b,a),b,a"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 52,
|
|
"id": "infectious-plaintiff",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"(tensor([[-0.1172],\n",
|
|
" [-0.0781]]),\n",
|
|
" tensor([[1.0031, 0.0041],\n",
|
|
" [0.0027, 1.0017]]),\n",
|
|
" tensor([[1., 0.],\n",
|
|
" [0., 1.]]))"
|
|
]
|
|
},
|
|
"execution_count": 52,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"jacobian(gs,(D,b,a))"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": 53,
|
|
"id": "proud-dance",
|
|
"metadata": {},
|
|
"outputs": [
|
|
{
|
|
"data": {
|
|
"text/plain": [
|
|
"(tensor([[0.9195],\n",
|
|
" [0.9195]]),\n",
|
|
" tensor([[-0.0006, -0.0006],\n",
|
|
" [-0.0006, -0.0006]]),\n",
|
|
" tensor([[0.0010, 0.0000],\n",
|
|
" [0.0000, 0.0010]]))"
|
|
]
|
|
},
|
|
"execution_count": 53,
|
|
"metadata": {},
|
|
"output_type": "execute_result"
|
|
}
|
|
],
|
|
"source": [
|
|
"jacobian(gd,(D,b,a))"
|
|
]
|
|
},
|
|
{
|
|
"cell_type": "code",
|
|
"execution_count": null,
|
|
"id": "satisfied-briefs",
|
|
"metadata": {},
|
|
"outputs": [],
|
|
"source": []
|
|
}
|
|
],
|
|
"metadata": {
|
|
"kernelspec": {
|
|
"display_name": "Python 3",
|
|
"language": "python",
|
|
"name": "python3"
|
|
},
|
|
"language_info": {
|
|
"codemirror_mode": {
|
|
"name": "ipython",
|
|
"version": 3
|
|
},
|
|
"file_extension": ".py",
|
|
"mimetype": "text/x-python",
|
|
"name": "python",
|
|
"nbconvert_exporter": "python",
|
|
"pygments_lexer": "ipython3",
|
|
"version": "3.8.8"
|
|
}
|
|
},
|
|
"nbformat": 4,
|
|
"nbformat_minor": 5
|
|
}
|