diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 8fd4a67..4eea785 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -26,13 +26,13 @@ If you're wishing for a feature that doesn't exist yet in MXFusion, there are pr If you're thinking about adding code to MXFusion, here are some guidelines to get you started. -* If the change is a major feature, create a [design proposal](design_proposal/design_proposal_guidelines) in the design_proposals folder and post it as a PR, optionally with a prototype implementation of your proposed changes. This is to get community feedback on the changes and document the design reasoning of MXFusion for future reference. +* If the change is a major feature, create a [design proposal](docs/design_proposals/design_proposal_guidelines.md) in the design_proposals folder and post it as a PR, optionally with a prototype implementation of your proposed changes. This is to get community feedback on the changes and document the design reasoning of MXFusion for future reference. * Keep pull requests small, preferably one feature per pull request. This lowers the bar to entry for a reviewer, and keeps feedback focused for each feature. Some major areas where we appreciate contributions: * [Adding new Distributions/Functions/Modules](examples/notebooks/writing_a_new_distribution.ipynb) -* [Adding new Inference Algorithms](design_documents/inference) +* [Adding new Inference Algorithms](docs/design_documents/inference.md) * Example notebooks showing how to build/train a particular model. If you're still not sure where to begin, have a look at our [issues](issues TODO) page for open work. @@ -73,7 +73,7 @@ Before submitting the pull request, please go through this checklist to make the * Do all public functions have docstrings including examples? If you added a new module, did you add it to the Sphinx docstring in the ```__init__.py``` file of the module's folder? * Is the code style correct (PEP8)? * Is the commit message formatted correctly? -* If this is a large addition, is there a tutorial or more extensive module-level description? Did you discuss the addition in a [design proposal](design_proposals/design_proposal_guidelines)? Is there an issue related to the change? If so, please link the issue or design doc. +* If this is a large addition, is there a tutorial or more extensive module-level description? Did you discuss the addition in a [design proposal](docs/design_proposals/design_proposal_guidelines.md)? Is there an issue related to the change? If so, please link the issue or design doc. ## Setting up a development environment diff --git a/examples/notebooks/gp_regression.ipynb b/examples/notebooks/gp_regression.ipynb index b306327..107148a 100644 --- a/examples/notebooks/gp_regression.ipynb +++ b/examples/notebooks/gp_regression.ipynb @@ -6,7 +6,7 @@ "source": [ "# Gaussian Process Regression\n", "\n", - "**Zhenwen Dai (2018-11-2)**" + "**Zhenwen Dai (2019-05-29)**" ] }, { @@ -68,7 +68,7 @@ "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYYAAAD8CAYAAABzTgP2AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4wLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvqOYd8AAAGoRJREFUeJzt3X2QVfWd5/H3R0SIzygdRQFhssRSQVG7caqymm1Fw4xToImLuEnEmhgm7LqTrSlBHaeBhZhSTK0ZN1lKTFQyw0SRjLFTMymj0o6morEbhxHBMeJzpzW2+DBEwIB+9497Gu653U0/nNt9nz6vqlv3nnN+597vbej76fM75/5+igjMzMy6HFTqAszMrLw4GMzMLMXBYGZmKQ4GMzNLcTCYmVmKg8HMzFIcDGZmluJgMDOzFAeDmZmlHFzqAgZj7NixMWnSpFKXYWZWUTZu3PhORNT11a4ig2HSpEm0tbWVugwzs4oi6bX+tHNXkpmZpTgYzMwsxcFgZmYpRTnHIOku4M+AtyNiag/bBfwt8KfATuCqiHgm2TYf+Juk6bciYs1gatizZw/t7e3s3r17MLvbAIwePZrx48czcuTIUpdiZkOgWCef7wG+B/yol+1/AkxJbucAq4BzJB0DLAXqgQA2SmqOiPcGWkB7eztHHHEEkyZNIpdDNhQigu3bt9Pe3s7kyZNLXY6ZDYGidCVFxOPAuwdoMgf4UeQ8BRwtaRzwBeDhiHg3CYOHgVmDqWH37t0ce+yxDoUhJoljjz3WR2ZWXlauhJaW9LqWltx6G7DhOsdwIvBG3nJ7sq639YPiUBge/jlbyRUGQUMDXHIJ/MVf5JZbWmDu3Nx6G7DhCoaePkniAOu7P4G0QFKbpLbOzs6iFmdmFaahIffBnx8OEtx7LyxZktu2bh00Npauxgo2XMHQDkzIWx4PdBxgfTcRsToi6iOivq6uzy/uldyyZcv4zne+c8A2P/3pT9m6deuQ1tHR0cFll13WZ7tvf/vbQ1qHWVE1NuY++OfO3R8EDzwA3/wmrFgBCxc6FDIYrmBoBq5Uzh8DH0TEm8BDwEWSxkgaA1yUrBtaZdIfORzBcMIJJ7B+/fo+2zkYbMgM1e9bY2MuALqCAGDVKmhqyt0Xvqb1W1GCQdKPgSeBkyW1S/qapG9I+kbS5J+Bl4FtwJ3AfweIiHeBFUBrcluerBtahYehReqPvOmmmzj55JOZOXMmL7zwwr71d955Jw0NDZxxxhl86UtfYufOnfzqV7+iubmZRYsWMX36dF566aUe2xVatmwZX/3qVzn//POZMmUKd955J5C7WmjRokVMnTqVadOmcd999wHw6quvMnVq7grie+65hy9+8YvMmjWLKVOmsHjxYgCuv/56du3axfTp0/nyl7/Mhx9+yMUXX8wZZ5zB1KlT9z2X2aAM0e8bLS37g+D223PnGNatg+XL9x9NOBwGJyIq7nb22WdHoa1bt3Zbd0AbNkSMHRvR1JS737BhYPsXaGtri6lTp8aHH34YH3zwQXzmM5+JW2+9NSIi3nnnnX3tbrzxxrj99tsjImL+/Plx//3379vWW7t8S5cujdNPPz127twZnZ2dMX78+Pjtb38b69evj5kzZ8bevXvjrbfeigkTJkRHR0e88sorcdppp0VExN133x2TJ0+O999/P3bt2hUTJ06M119/PSIiDjvssH2vsX79+rj66qv3Lb///vvd6hjwz9tqW5F/3/Y9X9fzLFgQcdRR6efdsCHilluyvU6VAdqiH5+xtfvN58LD0Iz9kU888QSXXnophx56KEceeSSzZ8/et+25557j3HPPZdq0aaxdu5YtW7b0+Bz9bTdnzhw+9alPMXbsWBobG3n66af55S9/yRVXXMGIESM47rjj+PznP09ra2u3fS+44AKOOuooRo8ezamnnsprr3UfU2vatGk88sgjXHfddTzxxBMcddRRg/ypmCWK/PtGa2v65PIdd+TOMeT/n29shOSo2AamdoMh/zC0SP2RvV3GedVVV/G9732PzZs3s3Tp0l6/A9DfdoWvI4ncHwN9GzVq1L7HI0aMYO/evd3afPazn2Xjxo1MmzaNG264geXLl/fruc16Vezft8WLu4eLg6BoajMYuvo4i9gfed555/HAAw+wa9cuduzYwc9+9rN923bs2MG4cePYs2cPa9eu3bf+iCOOYMeOHX22K/Tggw+ye/dutm/fzmOPPUZDQwPnnXce9913Hx9//DGdnZ08/vjjzJgxo9/1jxw5kj179gC5K5kOPfRQvvKVr3DttdfyzDPPDORHYZY2BL9vNrQqcj6GzAoPQ7sufWttHfQh7llnncXll1/O9OnTOemkkzj33HP3bVuxYgXnnHMOJ510EtOmTdsXBvPmzePrX/86t99+O+vXr++1XaEZM2Zw8cUX8/rrr9PU1MQJJ5zApZdeypNPPskZZ5yBJFauXMnxxx/Pq6++2q/6FyxYwOmnn85ZZ53FlVdeyaJFizjooIMYOXIkq1atGtTPxAwYkt83G1rqbxdEOamvr4/CiXqef/55TjnllBJVNHyWLVvG4YcfzrXXXlvSOmrl521WTSRtjIj6vtrVZleSmZn1qja7kirYsmXLSl2CmVW5qjpiqMRusUrkn7NZdauaYBg9ejTbt2/3h9YQi2Q+htGjR5e6FDMbIlXTlTR+/Hja29vxyKtDr2sGNzOrTlUTDCNHjvSMYmZmRVA1XUlmZlYcDgYzM0txMJhZbSqTeVnKkYPBzGrTQOeJqKEgcTCYWU4NffABPU8PeqB5oodqwqEy5GAws5wa+uDbZyDzRAw0SCqYg8HMcmrog2+fgc4TUewJh8qUg8HM9quRDz5gcPNEDMEEX+XIwWBm+9XIBx9w4HkielJDEw4VZT4GSbOAvwVGAD+IiJsLtt8GdP3pcSjw6Yg4Otn2MbA52fZ6RMymDz3Nx2BmGeV/8DU2dl+udStX5s635P8sWlpyQVIhU4r2dz6GzMEgaQTwG+BCoB1oBa6IiK29tP+fwJkR8efJ8u8j4vCBvKaDwWwIVMEHnx1Yf4OhGGMlzQC2RcTLyQvfC8wBegwG4ApgaRFe18yKqacP/8ZGHy3UoGKcYzgReCNvuT1Z142kk4DJwIa81aMltUl6StIlRajHzMwyKMYRg3pY11v/1DxgfUR8nLduYkR0SPojYIOkzRHxUrcXkRYACwAmTpyYtWYzM+tFMY4Y2oEJecvjgY5e2s4Dfpy/IiI6kvuXgceAM3vaMSJWR0R9RNTX1dVlrdnMzHpRjGBoBaZImizpEHIf/s2FjSSdDIwBnsxbN0bSqOTxWOBz9H5uwszMhkHmrqSI2CvpGuAhcper3hURWyQtB9oioiskrgDujfRlUKcAd0j6hFxI3dzb1UxmZjY8ivI9huHmy1XNzAauv5er+pvPZmaW4mAwM8uqyoYsdzCYmWVVZUOWF+N7DGZmtS1/yPKFC3MDEFbwGFM+YjAzK4YqGrLcwWBmVgxVNGS5g8HMLKuBzNVQASeqHQxmZlkNZNKfCjhR7S+4mZkNt64wGOYT1f6Cm5lZuSrzE9UOBrNqUwF92DWvzE9UOxjMqk0F9GHXtIGcqC4RB4NZtcn/stWSJfs/hMqsu6JmDeREdYn45LNZtVqyJNeH3dSU+8vUap5PPpvVsjLvw7by5mAwqzYV0Idt5c3BYFZtKqAP28qbzzGYmdUIn2MwM7NBcTCYmVlKUYJB0ixJL0jaJun6HrZfJalT0qbkdnXetvmSXkxu84tRj5mZDV7mGdwkjQC+D1wItAOtkpojYmtB0/si4pqCfY8BlgL1QAAbk33fy1qXmZkNTjGOGGYA2yLi5Yj4A3AvMKef+34BeDgi3k3C4GFgVhFqMjOzQSpGMJwIvJG33J6sK/QlSc9KWi9pwgD3NTOzYVKMYFAP6wqvgf0ZMCkiTgceAdYMYN9cQ2mBpDZJbZ2dnYMu1qwiecRUG0bFCIZ2YELe8nigI79BRGyPiI+SxTuBs/u7b95zrI6I+oior6urK0LZZhXEI6bWrhL8UVCMYGgFpkiaLOkQYB7QnN9A0ri8xdnA88njh4CLJI2RNAa4KFlnZvk8YmrtKsEfBZmvSoqIvZKuIfeBPgK4KyK2SFoOtEVEM/CXkmYDe4F3gauSfd+VtIJcuAAsj4h3s9ZkVpXyZ/1qanIo1Ir8PwqGaSpQD4lhVilKNE+wlYkiDKPuITHMqolHTK1twzyMuoPBrBJ4xNTaVYI/CtyVZGZWzlauzJ1ozu82bGnJ/VGwePGAnqq/XUkOBjOzGuFzDGZmNigOBjMzS3EwmJlZioPBzMxSHAxm5cSD5VkZcDCYlRMPlmdlIPNYSWZWRCUYF8eskI8YzMpN/mB5Cxc6FGzYORjMys0wj4tjVsjBYFZOPFielQEHg1k58WB5VgY8VpKZWY3wWElmZjYoDgYzM0txMJiZWYqDwczMUooSDJJmSXpB0jZJ1/ew/a8kbZX0rKRHJZ2Ut+1jSZuSW3Mx6jEzs8HLPCSGpBHA94ELgXagVVJzRGzNa/avQH1E7JS0EFgJXJ5s2xUR07PWYWZmxVGMI4YZwLaIeDki/gDcC8zJbxARLRGxM1l8ChhfhNc1M7MhUIxgOBF4I2+5PVnXm68BP89bHi2pTdJTki4pQj1mZpZBMYJBPazr8Vtzkr4C1AO35q2emHzh4r8B35X0mV72XZAESFtnZ2fWms3Ki+dhsDJSjGBoBybkLY8HOgobSZoJ3AjMjoiPutZHREdy/zLwGHBmTy8SEasjoj4i6uvq6opQtlkZ8TwMVkaKEQytwBRJkyUdAswDUlcXSToTuINcKLydt36MpFHJ47HA54D8k9ZmtSF/HoYlS/YPpOcht60EMl+VFBF7JV0DPASMAO6KiC2SlgNtEdFMruvocOB+SQCvR8Rs4BTgDkmfkAupmwuuZjKrHfnzMDQ1ORSsZDyInlm56Oo+8sxtNkQ8iJ5ZJfE8DFZGHAxm5cDzMFgZcTCYDbeeLk1taOgeAo2NsHjx8NVllnAwmA03X5pqZS7zVUlmNkD5l6b6RLOVIR8xmJVC/qWpCxc6FKysOBjMSqGlJXek0NSUu/fVR1ZGHAxmw82XplqZczCYDTdfmmplzt98NjOrEf7ms5mZDYqDwczMUhwMZmaW4mAwM7MUB4OZmaU4GMzMLMXBYGZmKQ4GMzNLcTCYmVmKg8HMzFKKEgySZkl6QdI2Sdf3sH2UpPuS7b+WNClv2w3J+hckfaEY9ZiZ2eBlDgZJI4DvA38CnApcIenUgmZfA96LiP8E3Abckux7KjAPOA2YBfy/5PnMzKxEinHEMAPYFhEvR8QfgHuBOQVt5gBrksfrgQskKVl/b0R8FBGvANuS5zMzsxIpRjCcCLyRt9yerOuxTUTsBT4Aju3nvmZmNoyKEQzqYV3hWN69tenPvrknkBZIapPU1tnZOcASzcysv4oRDO3AhLzl8UBHb20kHQwcBbzbz30BiIjVEVEfEfV1dXVFKNvMzHpSjGBoBaZImizpEHInk5sL2jQD85PHlwEbIjdDUDMwL7lqaTIwBXi6CDWZmdkgHZz1CSJir6RrgIeAEcBdEbFF0nKgLSKagR8CfydpG7kjhXnJvlskrQO2AnuB/xERH2etyczMBs9Te5qZ1QhP7WlmZoPiYDAzsxQHg5mZpTgYzMwsxcFgZmYpDgYzM0txMJiZWYqDwczMUhwMZmaW4mAwM7MUB4OZmaU4GMzMLMXBYGZmKQ4GMzNLcTCYmVmKg8HMzFIcDGZmluJgMDOzFAeDmZmlOBjMzCwlUzBIOkbSw5JeTO7H9NBmuqQnJW2R9Kyky/O23SPpFUmbktv0LPWYmVl2WY8YrgcejYgpwKPJcqGdwJURcRowC/iupKPzti+KiOnJbVPGeszMLKOswTAHWJM8XgNcUtggIn4TES8mjzuAt4G6jK9rZmZDJGswHBcRbwIk958+UGNJM4BDgJfyVt+UdDHdJmlUxnrMzCyjg/tqIOkR4PgeNt04kBeSNA74O2B+RHySrL4BeItcWKwGrgOW97L/AmABwMSJEwfy0mZmNgB9BkNEzOxtm6TfSRoXEW8mH/xv99LuSOCfgL+JiKfynvvN5OFHku4Grj1AHavJhQf19fXRV91mZjY4WbuSmoH5yeP5wIOFDSQdAjwA/Cgi7i/YNi65F7nzE89lrMfMzDLKGgw3AxdKehG4MFlGUr2kHyRt5gLnAVf1cFnqWkmbgc3AWOBbGevp2cqV0NKSXtfSkltvZmYpfXYlHUhEbAcu6GF9G3B18vjvgb/vZf/zs7x+vzU0wNy5sG4dNDbmQqFr2czMUmrjm8+NjbkQmDsXlixJh4TVNh9NmnVTG8EAuRBYuBBWrMjdOxQM9h9NdoVD19FkQ0Np6zIrodoJhpYWWLUKmppy94V/JVpt8tGkWTe1EQz55xSWL9//QeBwMPDRpFmB2giG1tb0X4FdfyW2tpa2LisPPpo0S1FE5X1XrL6+Ptra2kpdhlWD/KPJwivWfORgVUbSxoio76tdbRwxmPXGR5Nm3fiIwcysRviIwczMBsXBYGZmKQ4GMzNLcTCYmVmKg8HMzFIcDGZmluJgMDOzFAeDmZmlOBjMzCzFwWBmZikOBjMzS3EwmJlZSqZgkHSMpIclvZjcj+ml3ceSNiW35rz1kyX9Otn/PkmHZKnHzMyyy3rEcD3waERMAR5NlnuyKyKmJ7fZeetvAW5L9n8P+FrGeszMLKOswTAHWJM8XgNc0t8dJQk4H1g/mP3NzGxoZA2G4yLiTYDk/tO9tBstqU3SU5K6PvyPBd6PiL3JcjtwYsZ6zMwso4P7aiDpEeD4HjbdOIDXmRgRHZL+CNggaTPwHz2063XWIEkLgAUAEydOHMBLF8HKldDQkJ7qsaUlN8vX4sXDW4uZ2RDr84ghImZGxNQebg8Cv5M0DiC5f7uX5+hI7l8GHgPOBN4BjpbUFU7jgY4D1LE6Iuojor6urm4Ab7EIGhpy8wB3TRLfNS9wQ8Pw1mFmNgyydiU1A/OTx/OBBwsbSBojaVTyeCzwOWBr5OYUbQEuO9D+ZaFrHuC5c2HJEk8Wb2ZVLWsw3AxcKOlF4MJkGUn1kn6QtDkFaJP0b+SC4OaI2Jpsuw74K0nbyJ1z+GHGeoZOYyMsXAgrVuTuHQpmVqWU+8O9stTX10dbW9vwvmhX99HChbBqlY8YzKziSNoYEfV9tfM3n/ujKxTWrYPly/d3K3Wdc7DKsHJl93+zlpbcejPbx8HQH62t6SOErnMOra2lrcsGxhcRmPWLu5KstrhL0GqYu5KGg7smKo8vIjDrk4MhC3dNVJ6WltyRQlNT7t7nicy6cTBk4e83VBZfRGDWLw6GrNw1UTl8EYFZv/jkc1Y+mWlmFcInn4eDuybMrAo5GLJw14SZVSF3JZmZ1Qh3JZmZ2aA4GMzMLMXBYGZmKQ4GMzNLcTCYmVmKg8HMzFIcDFaZPLKt2ZBxMFhl8si2ZkPm4FIXYDYo+SPbepwqs6LKdMQg6RhJD0t6Mbkf00ObRkmb8m67JV2SbLtH0it526ZnqcdqjEe2NRsSWbuSrgcejYgpwKPJckpEtETE9IiYDpwP7AR+kddkUdf2iNiUsR6rJZ50x2xIZA2GOcCa5PEa4JI+2l8G/DwidmZ8Xat1HtnWbMhkDYbjIuJNgOT+0320nwf8uGDdTZKelXSbpFEZ67Fa4ZFtzYZMn6OrSnoEOL6HTTcCayLi6Ly270VEt/MMybZxwLPACRGxJ2/dW8AhwGrgpYhY3sv+C4AFABMnTjz7tdde6+OtmZlZvv6OrtrnVUkRMfMAL/I7SeMi4s3kQ/7tAzzVXOCBrlBInvvN5OFHku4Grj1AHavJhQf19fWVN1a4mVmFyNqV1AzMTx7PBx48QNsrKOhGSsIESSJ3fuK5jPWYmVlGWYPhZuBCSS8CFybLSKqX9IOuRpImAROAfynYf62kzcBmYCzwrYz1mJlZRpm+4BYR24ELeljfBlydt/wqcGIP7c7P8vpmZlZ8HhLDzMxSKnLOZ0mdQF+XJY0F3hmGcoZTNb4n8PuqNNX4vqrxPUH393VSRNT1tVNFBkN/SGrrz2VZlaQa3xP4fVWaanxf1fieYPDvy11JZmaW4mAwM7OUag6G1aUuYAhU43sCv69KU43vqxrfEwzyfVXtOQYzMxucaj5iMDOzQajaYJC0Ihm1dZOkX0g6odQ1FYOkWyX9e/LeHpB0dN97lT9J/1XSFkmfSKroq0MkzZL0gqRtkrrNUVKpJN0l6W1JVTN0jaQJklokPZ/8//tmqWsqBkmjJT0t6d+S9/W/B7R/tXYlSToyIv4jefyXwKkR8Y0Sl5WZpIuADRGxV9ItABFxXYnLykzSKcAnwB3Atcm35yuOpBHAb8gNEdMOtAJXRMTWkhZWBJLOA34P/Cgippa6nmJIxmsbFxHPSDoC2AhcUun/Xsn4c4dFxO8ljQR+CXwzIp7qz/5Ve8TQFQqJw4CqSMCI+EVE7E0WnwLGl7KeYomI5yPihVLXUQQzgG0R8XJE/AG4l9yEVhUvIh4H3i11HcUUEW9GxDPJ4x3A8/QwfE+liZzfJ4sjk1u/PwOrNhgAJN0k6Q3gy8CSUtczBP4c+Hmpi7CUE4E38pbbqYIPmlqQDPZ5JvDr0lZSHJJGSNpEbjqEhyOi3++rooNB0iOSnuvhNgcgIm6MiAnAWuCa0lbbf329r6TNjcBecu+tIvTnfVUB9bCuKo5Wq5mkw4GfAP+roLehYkXExxExnVyvwgxJ/e7+yzS6aqkdaBKhAv8A/BOwdAjLKZq+3pek+cCfARdEBZ0kGsC/VyVrJzfEfJfxQEeJarF+SPrgfwKsjYh/LHU9xRYR70t6DJhFP+e8qegjhgORNCVvcTbw76WqpZgkzQKuA2ZHxM5S12PdtAJTJE2WdAi5ec6bS1yT9SI5SftD4PmI+D+lrqdYJNV1XbEo6VPATAbwGVjNVyX9BDiZ3JUurwHfiIjflraq7CRtA0YB25NVT1XJ1VaXAv8XqAPeBzZFxBdKW9XgSPpT4LvACOCuiLipxCUVhaQfA/+F3IidvwOWRsQPS1pURpL+M/AEucnCPklW/3VE/HPpqspO0unAGnL/Bw8C1kXE8n7vX63BYGZmg1O1XUlmZjY4DgYzM0txMJiZWYqDwczMUhwMZmaW4mAwM7MUB4OZmaU4GMzMLOX/A/xoEVK9w+cWAAAAAElFTkSuQmCC\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYYAAAD8CAYAAABzTgP2AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAGoRJREFUeJzt3X2QVfWd5/H3R0SIzygdRQFhssRSQVG7caqymm1Fw4xToImLuEnEmhgm7LqTrSlBHaeBhZhSTK0ZN1lKTFQyw0SRjLFTMymj0o6morEbhxHBMeJzpzW2+DBEwIB+9497Gu653U0/nNt9nz6vqlv3nnN+597vbej76fM75/5+igjMzMy6HFTqAszMrLw4GMzMLMXBYGZmKQ4GMzNLcTCYmVmKg8HMzFIcDGZmluJgMDOzFAeDmZmlHFzqAgZj7NixMWnSpFKXYWZWUTZu3PhORNT11a4ig2HSpEm0tbWVugwzs4oi6bX+tHNXkpmZpTgYzMwsxcFgZmYpRTnHIOku4M+AtyNiag/bBfwt8KfATuCqiHgm2TYf+Juk6bciYs1gatizZw/t7e3s3r17MLvbAIwePZrx48czcuTIUpdiZkOgWCef7wG+B/yol+1/AkxJbucAq4BzJB0DLAXqgQA2SmqOiPcGWkB7eztHHHEEkyZNIpdDNhQigu3bt9Pe3s7kyZNLXY6ZDYGidCVFxOPAuwdoMgf4UeQ8BRwtaRzwBeDhiHg3CYOHgVmDqWH37t0ce+yxDoUhJoljjz3WR2ZWXlauhJaW9LqWltx6G7DhOsdwIvBG3nJ7sq639YPiUBge/jlbyRUGQUMDXHIJ/MVf5JZbWmDu3Nx6G7DhCoaePkniAOu7P4G0QFKbpLbOzs6iFmdmFaahIffBnx8OEtx7LyxZktu2bh00Npauxgo2XMHQDkzIWx4PdBxgfTcRsToi6iOivq6uzy/uldyyZcv4zne+c8A2P/3pT9m6deuQ1tHR0cFll13WZ7tvf/vbQ1qHWVE1NuY++OfO3R8EDzwA3/wmrFgBCxc6FDIYrmBoBq5Uzh8DH0TEm8BDwEWSxkgaA1yUrBtaZdIfORzBcMIJJ7B+/fo+2zkYbMgM1e9bY2MuALqCAGDVKmhqyt0Xvqb1W1GCQdKPgSeBkyW1S/qapG9I+kbS5J+Bl4FtwJ3AfweIiHeBFUBrcluerBtahYehReqPvOmmmzj55JOZOXMmL7zwwr71d955Jw0NDZxxxhl86UtfYufOnfzqV7+iubmZRYsWMX36dF566aUe2xVatmwZX/3qVzn//POZMmUKd955J5C7WmjRokVMnTqVadOmcd999wHw6quvMnVq7grie+65hy9+8YvMmjWLKVOmsHjxYgCuv/56du3axfTp0/nyl7/Mhx9+yMUXX8wZZ5zB1KlT9z2X2aAM0e8bLS37g+D223PnGNatg+XL9x9NOBwGJyIq7nb22WdHoa1bt3Zbd0AbNkSMHRvR1JS737BhYPsXaGtri6lTp8aHH34YH3zwQXzmM5+JW2+9NSIi3nnnnX3tbrzxxrj99tsjImL+/Plx//3379vWW7t8S5cujdNPPz127twZnZ2dMX78+Pjtb38b69evj5kzZ8bevXvjrbfeigkTJkRHR0e88sorcdppp0VExN133x2TJ0+O999/P3bt2hUTJ06M119/PSIiDjvssH2vsX79+rj66qv3Lb///vvd6hjwz9tqW5F/3/Y9X9fzLFgQcdRR6efdsCHilluyvU6VAdqiH5+xtfvN58LD0Iz9kU888QSXXnophx56KEceeSSzZ8/et+25557j3HPPZdq0aaxdu5YtW7b0+Bz9bTdnzhw+9alPMXbsWBobG3n66af55S9/yRVXXMGIESM47rjj+PznP09ra2u3fS+44AKOOuooRo8ezamnnsprr3UfU2vatGk88sgjXHfddTzxxBMcddRRg/ypmCWK/PtGa2v65PIdd+TOMeT/n29shOSo2AamdoMh/zC0SP2RvV3GedVVV/G9732PzZs3s3Tp0l6/A9DfdoWvI4ncHwN9GzVq1L7HI0aMYO/evd3afPazn2Xjxo1MmzaNG264geXLl/fruc16Vezft8WLu4eLg6BoajMYuvo4i9gfed555/HAAw+wa9cuduzYwc9+9rN923bs2MG4cePYs2cPa9eu3bf+iCOOYMeOHX22K/Tggw+ye/dutm/fzmOPPUZDQwPnnXce9913Hx9//DGdnZ08/vjjzJgxo9/1jxw5kj179gC5K5kOPfRQvvKVr3DttdfyzDPPDORHYZY2BL9vNrQqcj6GzAoPQ7sufWttHfQh7llnncXll1/O9OnTOemkkzj33HP3bVuxYgXnnHMOJ510EtOmTdsXBvPmzePrX/86t99+O+vXr++1XaEZM2Zw8cUX8/rrr9PU1MQJJ5zApZdeypNPPskZZ5yBJFauXMnxxx/Pq6++2q/6FyxYwOmnn85ZZ53FlVdeyaJFizjooIMYOXIkq1atGtTPxAwYkt83G1rqbxdEOamvr4/CiXqef/55TjnllBJVNHyWLVvG4YcfzrXXXlvSOmrl521WTSRtjIj6vtrVZleSmZn1qja7kirYsmXLSl2CmVW5qjpiqMRusUrkn7NZdauaYBg9ejTbt2/3h9YQi2Q+htGjR5e6FDMbIlXTlTR+/Hja29vxyKtDr2sGNzOrTlUTDCNHjvSMYmZmRVA1XUlmZlYcDgYzM0txMJhZbSqTeVnKkYPBzGrTQOeJqKEgcTCYWU4NffABPU8PeqB5oodqwqEy5GAws5wa+uDbZyDzRAw0SCqYg8HMcmrog2+fgc4TUewJh8qUg8HM9quRDz5gcPNEDMEEX+XIwWBm+9XIBx9w4HkielJDEw4VZT4GSbOAvwVGAD+IiJsLtt8GdP3pcSjw6Yg4Otn2MbA52fZ6RMymDz3Nx2BmGeV/8DU2dl+udStX5s635P8sWlpyQVIhU4r2dz6GzMEgaQTwG+BCoB1oBa6IiK29tP+fwJkR8efJ8u8j4vCBvKaDwWwIVMEHnx1Yf4OhGGMlzQC2RcTLyQvfC8wBegwG4ApgaRFe18yKqacP/8ZGHy3UoGKcYzgReCNvuT1Z142kk4DJwIa81aMltUl6StIlRajHzMwyKMYRg3pY11v/1DxgfUR8nLduYkR0SPojYIOkzRHxUrcXkRYACwAmTpyYtWYzM+tFMY4Y2oEJecvjgY5e2s4Dfpy/IiI6kvuXgceAM3vaMSJWR0R9RNTX1dVlrdnMzHpRjGBoBaZImizpEHIf/s2FjSSdDIwBnsxbN0bSqOTxWOBz9H5uwszMhkHmrqSI2CvpGuAhcper3hURWyQtB9oioiskrgDujfRlUKcAd0j6hFxI3dzb1UxmZjY8ivI9huHmy1XNzAauv5er+pvPZmaW4mAwM8uqyoYsdzCYmWVVZUOWF+N7DGZmtS1/yPKFC3MDEFbwGFM+YjAzK4YqGrLcwWBmVgxVNGS5g8HMLKuBzNVQASeqHQxmZlkNZNKfCjhR7S+4mZkNt64wGOYT1f6Cm5lZuSrzE9UOBrNqUwF92DWvzE9UOxjMqk0F9GHXtIGcqC4RB4NZtcn/stWSJfs/hMqsu6JmDeREdYn45LNZtVqyJNeH3dSU+8vUap5PPpvVsjLvw7by5mAwqzYV0Idt5c3BYFZtKqAP28qbzzGYmdUIn2MwM7NBcTCYmVlKUYJB0ixJL0jaJun6HrZfJalT0qbkdnXetvmSXkxu84tRj5mZDV7mGdwkjQC+D1wItAOtkpojYmtB0/si4pqCfY8BlgL1QAAbk33fy1qXmZkNTjGOGGYA2yLi5Yj4A3AvMKef+34BeDgi3k3C4GFgVhFqMjOzQSpGMJwIvJG33J6sK/QlSc9KWi9pwgD3NTOzYVKMYFAP6wqvgf0ZMCkiTgceAdYMYN9cQ2mBpDZJbZ2dnYMu1qwiecRUG0bFCIZ2YELe8nigI79BRGyPiI+SxTuBs/u7b95zrI6I+oior6urK0LZZhXEI6bWrhL8UVCMYGgFpkiaLOkQYB7QnN9A0ri8xdnA88njh4CLJI2RNAa4KFlnZvk8YmrtKsEfBZmvSoqIvZKuIfeBPgK4KyK2SFoOtEVEM/CXkmYDe4F3gauSfd+VtIJcuAAsj4h3s9ZkVpXyZ/1qanIo1Ir8PwqGaSpQD4lhVilKNE+wlYkiDKPuITHMqolHTK1twzyMuoPBrBJ4xNTaVYI/CtyVZGZWzlauzJ1ozu82bGnJ/VGwePGAnqq/XUkOBjOzGuFzDGZmNigOBjMzS3EwmJlZioPBzMxSHAxm5cSD5VkZcDCYlRMPlmdlIPNYSWZWRCUYF8eskI8YzMpN/mB5Cxc6FGzYORjMys0wj4tjVsjBYFZOPFielQEHg1k58WB5VgY8VpKZWY3wWElmZjYoDgYzM0txMJiZWYqDwczMUooSDJJmSXpB0jZJ1/ew/a8kbZX0rKRHJZ2Ut+1jSZuSW3Mx6jEzs8HLPCSGpBHA94ELgXagVVJzRGzNa/avQH1E7JS0EFgJXJ5s2xUR07PWYWZmxVGMI4YZwLaIeDki/gDcC8zJbxARLRGxM1l8ChhfhNc1M7MhUIxgOBF4I2+5PVnXm68BP89bHi2pTdJTki4pQj1mZpZBMYJBPazr8Vtzkr4C1AO35q2emHzh4r8B35X0mV72XZAESFtnZ2fWms3Ki+dhsDJSjGBoBybkLY8HOgobSZoJ3AjMjoiPutZHREdy/zLwGHBmTy8SEasjoj4i6uvq6opQtlkZ8TwMVkaKEQytwBRJkyUdAswDUlcXSToTuINcKLydt36MpFHJ47HA54D8k9ZmtSF/HoYlS/YPpOcht60EMl+VFBF7JV0DPASMAO6KiC2SlgNtEdFMruvocOB+SQCvR8Rs4BTgDkmfkAupmwuuZjKrHfnzMDQ1ORSsZDyInlm56Oo+8sxtNkQ8iJ5ZJfE8DFZGHAxm5cDzMFgZcTCYDbeeLk1taOgeAo2NsHjx8NVllnAwmA03X5pqZS7zVUlmNkD5l6b6RLOVIR8xmJVC/qWpCxc6FKysOBjMSqGlJXek0NSUu/fVR1ZGHAxmw82XplqZczCYDTdfmmplzt98NjOrEf7ms5mZDYqDwczMUhwMZmaW4mAwM7MUB4OZmaU4GMzMLMXBYGZmKQ4GMzNLcTCYmVmKg8HMzFKKEgySZkl6QdI2Sdf3sH2UpPuS7b+WNClv2w3J+hckfaEY9ZiZ2eBlDgZJI4DvA38CnApcIenUgmZfA96LiP8E3Abckux7KjAPOA2YBfy/5PnMzKxEinHEMAPYFhEvR8QfgHuBOQVt5gBrksfrgQskKVl/b0R8FBGvANuS5zMzsxIpRjCcCLyRt9yerOuxTUTsBT4Aju3nvmZmNoyKEQzqYV3hWN69tenPvrknkBZIapPU1tnZOcASzcysv4oRDO3AhLzl8UBHb20kHQwcBbzbz30BiIjVEVEfEfV1dXVFKNvMzHpSjGBoBaZImizpEHInk5sL2jQD85PHlwEbIjdDUDMwL7lqaTIwBXi6CDWZmdkgHZz1CSJir6RrgIeAEcBdEbFF0nKgLSKagR8CfydpG7kjhXnJvlskrQO2AnuB/xERH2etyczMBs9Te5qZ1QhP7WlmZoPiYDAzsxQHg5mZpTgYzMwsxcFgZmYpDgYzM0txMJiZWYqDwczMUhwMZmaW4mAwM7MUB4OZmaU4GMzMLMXBYGZmKQ4GMzNLcTCYmVmKg8HMzFIcDGZmluJgMDOzFAeDmZmlOBjMzCwlUzBIOkbSw5JeTO7H9NBmuqQnJW2R9Kyky/O23SPpFUmbktv0LPWYmVl2WY8YrgcejYgpwKPJcqGdwJURcRowC/iupKPzti+KiOnJbVPGeszMLKOswTAHWJM8XgNcUtggIn4TES8mjzuAt4G6jK9rZmZDJGswHBcRbwIk958+UGNJM4BDgJfyVt+UdDHdJmlUxnrMzCyjg/tqIOkR4PgeNt04kBeSNA74O2B+RHySrL4BeItcWKwGrgOW97L/AmABwMSJEwfy0mZmNgB9BkNEzOxtm6TfSRoXEW8mH/xv99LuSOCfgL+JiKfynvvN5OFHku4Grj1AHavJhQf19fXRV91mZjY4WbuSmoH5yeP5wIOFDSQdAjwA/Cgi7i/YNi65F7nzE89lrMfMzDLKGgw3AxdKehG4MFlGUr2kHyRt5gLnAVf1cFnqWkmbgc3AWOBbGevp2cqV0NKSXtfSkltvZmYpfXYlHUhEbAcu6GF9G3B18vjvgb/vZf/zs7x+vzU0wNy5sG4dNDbmQqFr2czMUmrjm8+NjbkQmDsXlixJh4TVNh9NmnVTG8EAuRBYuBBWrMjdOxQM9h9NdoVD19FkQ0Np6zIrodoJhpYWWLUKmppy94V/JVpt8tGkWTe1EQz55xSWL9//QeBwMPDRpFmB2giG1tb0X4FdfyW2tpa2LisPPpo0S1FE5X1XrL6+Ptra2kpdhlWD/KPJwivWfORgVUbSxoio76tdbRwxmPXGR5Nm3fiIwcysRviIwczMBsXBYGZmKQ4GMzNLcTCYmVmKg8HMzFIcDGZmluJgMDOzFAeDmZmlOBjMzCzFwWBmZikOBjMzS3EwmJlZSqZgkHSMpIclvZjcj+ml3ceSNiW35rz1kyX9Otn/PkmHZKnHzMyyy3rEcD3waERMAR5NlnuyKyKmJ7fZeetvAW5L9n8P+FrGeszMLKOswTAHWJM8XgNc0t8dJQk4H1g/mP3NzGxoZA2G4yLiTYDk/tO9tBstqU3SU5K6PvyPBd6PiL3JcjtwYsZ6zMwso4P7aiDpEeD4HjbdOIDXmRgRHZL+CNggaTPwHz2063XWIEkLgAUAEydOHMBLF8HKldDQkJ7qsaUlN8vX4sXDW4uZ2RDr84ghImZGxNQebg8Cv5M0DiC5f7uX5+hI7l8GHgPOBN4BjpbUFU7jgY4D1LE6Iuojor6urm4Ab7EIGhpy8wB3TRLfNS9wQ8Pw1mFmNgyydiU1A/OTx/OBBwsbSBojaVTyeCzwOWBr5OYUbQEuO9D+ZaFrHuC5c2HJEk8Wb2ZVLWsw3AxcKOlF4MJkGUn1kn6QtDkFaJP0b+SC4OaI2Jpsuw74K0nbyJ1z+GHGeoZOYyMsXAgrVuTuHQpmVqWU+8O9stTX10dbW9vwvmhX99HChbBqlY8YzKziSNoYEfV9tfM3n/ujKxTWrYPly/d3K3Wdc7DKsHJl93+zlpbcejPbx8HQH62t6SOErnMOra2lrcsGxhcRmPWLu5KstrhL0GqYu5KGg7smKo8vIjDrk4MhC3dNVJ6WltyRQlNT7t7nicy6cTBk4e83VBZfRGDWLw6GrNw1UTl8EYFZv/jkc1Y+mWlmFcInn4eDuybMrAo5GLJw14SZVSF3JZmZ1Qh3JZmZ2aA4GMzMLMXBYGZmKQ4GMzNLcTCYmVmKg8HMzFIcDFaZPLKt2ZBxMFhl8si2ZkPm4FIXYDYo+SPbepwqs6LKdMQg6RhJD0t6Mbkf00ObRkmb8m67JV2SbLtH0it526ZnqcdqjEe2NRsSWbuSrgcejYgpwKPJckpEtETE9IiYDpwP7AR+kddkUdf2iNiUsR6rJZ50x2xIZA2GOcCa5PEa4JI+2l8G/DwidmZ8Xat1HtnWbMhkDYbjIuJNgOT+0320nwf8uGDdTZKelXSbpFEZ67Fa4ZFtzYZMn6OrSnoEOL6HTTcCayLi6Ly270VEt/MMybZxwLPACRGxJ2/dW8AhwGrgpYhY3sv+C4AFABMnTjz7tdde6+OtmZlZvv6OrtrnVUkRMfMAL/I7SeMi4s3kQ/7tAzzVXOCBrlBInvvN5OFHku4Grj1AHavJhQf19fWVN1a4mVmFyNqV1AzMTx7PBx48QNsrKOhGSsIESSJ3fuK5jPWYmVlGWYPhZuBCSS8CFybLSKqX9IOuRpImAROAfynYf62kzcBmYCzwrYz1mJlZRpm+4BYR24ELeljfBlydt/wqcGIP7c7P8vpmZlZ8HhLDzMxSKnLOZ0mdQF+XJY0F3hmGcoZTNb4n8PuqNNX4vqrxPUH393VSRNT1tVNFBkN/SGrrz2VZlaQa3xP4fVWaanxf1fieYPDvy11JZmaW4mAwM7OUag6G1aUuYAhU43sCv69KU43vqxrfEwzyfVXtOQYzMxucaj5iMDOzQajaYJC0Ihm1dZOkX0g6odQ1FYOkWyX9e/LeHpB0dN97lT9J/1XSFkmfSKroq0MkzZL0gqRtkrrNUVKpJN0l6W1JVTN0jaQJklokPZ/8//tmqWsqBkmjJT0t6d+S9/W/B7R/tXYlSToyIv4jefyXwKkR8Y0Sl5WZpIuADRGxV9ItABFxXYnLykzSKcAnwB3Atcm35yuOpBHAb8gNEdMOtAJXRMTWkhZWBJLOA34P/Cgippa6nmJIxmsbFxHPSDoC2AhcUun/Xsn4c4dFxO8ljQR+CXwzIp7qz/5Ve8TQFQqJw4CqSMCI+EVE7E0WnwLGl7KeYomI5yPihVLXUQQzgG0R8XJE/AG4l9yEVhUvIh4H3i11HcUUEW9GxDPJ4x3A8/QwfE+liZzfJ4sjk1u/PwOrNhgAJN0k6Q3gy8CSUtczBP4c+Hmpi7CUE4E38pbbqYIPmlqQDPZ5JvDr0lZSHJJGSNpEbjqEhyOi3++rooNB0iOSnuvhNgcgIm6MiAnAWuCa0lbbf329r6TNjcBecu+tIvTnfVUB9bCuKo5Wq5mkw4GfAP+roLehYkXExxExnVyvwgxJ/e7+yzS6aqkdaBKhAv8A/BOwdAjLKZq+3pek+cCfARdEBZ0kGsC/VyVrJzfEfJfxQEeJarF+SPrgfwKsjYh/LHU9xRYR70t6DJhFP+e8qegjhgORNCVvcTbw76WqpZgkzQKuA2ZHxM5S12PdtAJTJE2WdAi5ec6bS1yT9SI5SftD4PmI+D+lrqdYJNV1XbEo6VPATAbwGVjNVyX9BDiZ3JUurwHfiIjflraq7CRtA0YB25NVT1XJ1VaXAv8XqAPeBzZFxBdKW9XgSPpT4LvACOCuiLipxCUVhaQfA/+F3IidvwOWRsQPS1pURpL+M/AEucnCPklW/3VE/HPpqspO0unAGnL/Bw8C1kXE8n7vX63BYGZmg1O1XUlmZjY4DgYzM0txMJiZWYqDwczMUhwMZmaW4mAwM7MUB4OZmaU4GMzMLOX/A/xoEVK9w+cWAAAAAElFTkSuQmCC\n", "text/plain": [ "
" ] @@ -159,16 +159,16 @@ "name": "stdout", "output_type": "stream", "text": [ - "Iteration 11 loss: -13.523289192527265\n", - "Iteration 21 loss: -16.077990179961076\n", - "Iteration 31 loss: -16.784414553096843\n", - "Iteration 41 loss: -16.820970924702017\n", - "Iteration 51 loss: -16.859865329532193\n", - "Iteration 61 loss: -16.895666914166453\n", - "Iteration 71 loss: -16.899409131167452\n", - "Iteration 81 loss: -16.901728290347176\n", - "Iteration 91 loss: -16.903122097339737\n", - "Iteration 100 loss: -16.903135093930537" + "Iteration 10 loss: -13.09287954321266\t\t\t\t\n", + "Iteration 20 loss: -15.971970034359586\t\t\t\t\n", + "Iteration 30 loss: -16.725359053995163\t\t\t\t\n", + "Iteration 40 loss: -16.835084442759314\t\t\t\t\n", + "Iteration 50 loss: -16.850332113428053\t\t\t\t\n", + "Iteration 60 loss: -16.893812683762203\t\t\t\t\n", + "Iteration 70 loss: -16.900137667771077\t\t\t\t\n", + "Iteration 80 loss: -16.901158761459012\t\t\t\t\n", + "Iteration 90 loss: -16.903085976668137\t\t\t\t\n", + "Iteration 100 loss: -16.903135093930537\t\t\t\t\n" ] } ], @@ -303,7 +303,7 @@ "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAY0AAAEKCAYAAADuEgmxAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4wLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvqOYd8AAAIABJREFUeJzt3Xd8zff3B/DXO4MQBImdkrQ2QUmMGnWpWaVWzOKnaLVKq0bVN1IxSkrtmjVLK6hqzeJeNVokRkvsVUIRK2Jlnt8fJyRIuEnuvZ87zvPxuI/c8cm952bc83mv81ZEBCGEEMIYTloHIIQQwnZI0hBCCGE0SRpCCCGMJklDCCGE0SRpCCGEMJokDSGEEEaTpCGEEMJokjSEEEIYTZKGEEIIo7loHYCpeXl5kY+Pj9ZhCCGETTlw4MANIir0suPsLmn4+PggIiJC6zCEEMKmKKX+NeY4TbunlFILlVLXlVJHM3i8oVIqRil1OOUyytIxCiGESKV1S2MxgJkAlr7gmF1E1Moy4QghhHgRTVsaRLQTwC0tYxBCCGE8rVsaxqijlPobwBUAQ4go8tkDlFL9APQDgJIlS1o4PCFEdiUkJCAqKgqPHj3SOhS75+bmBm9vb7i6umbp+609aRwEUIqI7imlWgL4BUCZZw8ionkA5gGAv7+/bBAihI2JiopC3rx54ePjA6WU1uHYLSLCzZs3ERUVBV9f3yw9h1Wv0yCiu0R0L+X6RgCuSikvjcMSQpjYo0eP4OnpKQnDzJRS8PT0zFaLzqqThlKqqEr5K1JK1QTHe1PbqIQQ5iAJwzKy+3PWtHtKKfUjgIYAvJRSUQCCAbgCABHNAdABQH+lVCKAhwA6kxn3p01OBm7cAAoXNtcrCCGEbdM0aRBRl5c8PhM8JdcivvwSWLEC2LoVKFfOUq8qhBC2w6q7pyytSxcgLg6oXx84fFjraIQQwvpI0kijalVg1y7AzQ1o2BDYu1friIQQlnLhwgWUL18effr0QeXKldGtWzds27YNdevWRZkyZbB//37cv38fvXv3RkBAAF5//XWsW7fuyffWr18f1atXR/Xq1fHnn38CAHbs2IGGDRuiQ4cOKF++PLp16wYz9rBbhLVPubW4smWB3buBxo2BNm2Ac+cAd3etoxLCcXz6qelb+tWqAVOnvvy4M2fOYNWqVZg3bx4CAgKwYsUK7N69G7/++ivGjx+PihUrolGjRli4cCHu3LmDmjVr4q233kLhwoWxdetWuLm54fTp0+jSpcuTGniHDh1CZGQkihcvjrp162LPnj2oV6+ead+gBUnSSEfJksC2bcDp05IwhHAkvr6+8PPzAwBUqlQJjRs3hlIKfn5+uHDhAqKiovDrr79i0qRJAHiq8MWLF1G8eHEMGDAAhw8fhrOzM06dOvXkOWvWrAlvb28AQLVq1XDhwgVJGvaoVCm+AMCqVUCNGsCrr2obkxCOwJgWgbnkzJnzyXUnJ6cnt52cnJCYmAhnZ2esWbMG5Z6ZKfPVV1+hSJEi+Pvvv5GcnAw3N7d0n9PZ2RmJiYlmfhfmJWMaL3H3LvDRR0CLFsAtqZIlhENr1qwZZsyY8WRc4tChQwCAmJgYFCtWDE5OTli2bBmSkpK0DNOsJGm8RL58wNq1wIULQLt2QHy81hEJIbQSFBSEhIQEVKlSBZUrV0ZQUBAA4KOPPsKSJUtQu3ZtnDp1Cu523K+tbH0k/1n+/v5kjk2YVqwAunUDevQAFi8GZPGqEKZz/PhxVKhQQeswHEZ6P2+l1AEi8n/Z90pLw0hduwKjRwNLlwKbN2sdjRBCaEMGwjMhKIgX/ul0WkcihBDakJZGJiiVmjD++Qf416gddYUQwn5I0siCR4+A5s2B9u35uhBCOApJGlng5gbMng0cOMCrV4UQwlFI0siiNm2AoUOBuXN5Sq4QQjgCSRrZMHYsrxTv0we4fFnraIQQWXXp0iXodDpUqFABlSpVwrRp00z6/IcPH8bGjRszfNzHxwc3btww6WuaiySNbMiRg9dv9OsHeMkmtELYLBcXF0yePBnHjx/H3r17MWvWLBw7dsxkz/+ypGFLJGlkU9mywNdfAzlzAnZcOUAIu1asWDFUr14dAJA3b15UqFABl9PpPmjTpg2WLl0KAJg7dy66dev23DGrVq1C5cqVUbVqVTRo0ADx8fEYNWoUVq5ciWrVqmHlypW4efMmmjZtitdffx0ffPCBTZVLl3UaJnLoENC5M/Dzz0ClSlpHI4Rta9jw+fsCA7kO3IMHQMuWzz/eqxdfbtwAOnR4+rEdO4x/7QsXLuDQoUOoVavWc4/NmzcPdevWha+vLyZPnoy96Wy6ExISgi1btqBEiRK4c+cOcuTIgZCQEERERGDmTN6IdODAgahXrx5GjRqFDRs2YN68ecYHqDFpaZiItzcXNPy//wNsvIilEA7r3r17aN++PaZOnYp8+fI993iRIkUQEhICnU6HyZMno2DBgs8dU7duXfTq1Qvz58/PsHDhzp070b17dwDA22+/jQIFCpj2jZiRtDRMpFAhYNYsoFMnYPJkYPhwrSMSwna9qGWQO/eLH/fyylzL4rGEhAS0b98e3bp1Q7t27TI87siRI/D09MSVK1fSfXzOnDnYt28fNmzYgGrVquFwBjtKKRstYCctDRPq2JEX/AUHAydOaB2NEMJYRIT3338fFSpUwODBgzM8bv/+/di0aRMOHTqESZMm4fz5888dc/bsWdSqVQshISHw8vLCpUuXkDdvXsTGxj45pkGDBli+fDkAYNOmTbh9+7bp35SZSNIwIaW4tZEnD/Ddd1pHI4Qw1p49e7Bs2TLo9XpUq1YN1apVe262U1xcHPr27YuFCxeiePHimDx5Mnr37v3cIPbQoUPh5+eHypUro0GDBqhatSp0Oh2OHTv2ZCA8ODgYO3fuRPXq1fH777+jZMmS2Yo/KYnLGiUkZOtpjCKl0c3g7FnA1xdwkpQshFGkNHrWEQHnzgG3bwNlygAeHi//HimNbmVee40TxtWrPJNDCCHM5coVThje3sYljOySpGEm9+8DVaoAL+geFUKIbMufHyhWDChSxDKvJ0nDTNzdgQ8+AJYtAwwGraMRQtibx1P73d2BEiUst5uoJA0z+vJL4NVXgf79ZW9xIYTpxMcDkZHcBW5pkjTMKFcuYPp04ORJIGUhqBBCZEtyMnDmDM+YssQYxrM0XdynlFoIoBWA60RUOZ3HFYBpAFoCeACgFxEdtGyU2fP221zy4MwZrSMRInMiI4H9+wFPT6BoUe4CKVFC66gcGxFw4QKXUildmk9MLU3rlsZiAM1f8HgLAGVSLv0AzLZATCa3dq2s2xDWLT4eWLMGeOcd4Pp1vu/334HevXnvmFq1eHZO+fLAf/8BCA19frDOYOD77cBXX32FSZMmvfCYX375xaSVcNNz5coVdEhTSOv6dS5XVLw4D4A/Nn78eLPGkZamSYOIdgK49YJD2gBYSmwvgPxKqWKWic50cuTgr0eOAGb+GxMiUxITgW+/5RZEhw7AwYPAqVP8WK9ePP8/IgJYvx6YOhUIyhmKoscNQEAAHrYOxN6vDSC9gWd9BAYCAQHmD9pKEpYlkkbx4sWxevXqJ7ddXICCBXm2VFqWTBogIk0vAHwAHM3gsfUA6qW5vR2AfzrH9QMQASCiZMmSZI3i4oiKFiWqX58oOVnraIQgevCAyN+fCCBq2pRo40aixMSXfJNeT+TlRUnb9NTbV0+3kY8eOOWmBPd8/FgWHTt2zPiDU2J48nrP3s6isWPHUtmyZalx48bUuXNn+uabb4iIaN68eeTv709VqlShdu3a0f3792nPnj1UoEAB8vHxoapVq9KZM2fSPe5ZwcHB1L17d9LpdFS6dGmaN28eERElJyfTkCFDqFKlSlS5cmX66aefiIjo/PnzVKlSJUpOJlq0aBG1bduWmjVrRqVLl6ahQ4cSEdHw4cPJycmJqlatSl27dqV79+5Ry5YtqUqVKlSpUqUnz5VWej9vABFkzGe2MQeZ8/KSpLEhnaRR40XPV6NGjef/GqzEnDn8E1+7VutIhGDrG0wkfZCekpP5ZGb3bqK5nfW0uOJEKleOyN2dqFgxosqViRo3JgoJIToxR0/JXl6U+GUQxefITQRQCILoiy+IHj3KWhyZShpEqYkiKMgkCSMiIoIqV65M9+/fp5iYGHrttdeeJI0bN248OW7kyJE0ffp0IiLq2bMnrVq16sljGR2XVnBwMFWpUoUePHhA0dHR5O3tTZcvX6bVq1fTW2+9RYmJiXT16lV65ZVX6MqVK0+SxokTRDNmLCJfX1+6c+cOPXz4kEqWLEkXL14kIiJ3d/cnr7F69Wrq06fPk9t37tx5Lo7sJA2txzReJgrAK2luewNIv7SkDXj/fe4T/uILKZ8utJGUBAwbxt1QAPD2VwF487tArPrIgIoVgf/VM6DtT4HYcT8AlSoBffvyRI7SpXnVcXAwUP5DHaY87A/n8WPg4kyIGxaET91mIzzUgMhIC70RnY7nso8Zw191umw93a5du9C2bVvkzp0b+fLlQ+vWrZ88dvToUdSvXx9+fn5Yvnw5IjN4k8Ye16ZNG+TKlQteXl7Q6XTYv38/du/ejS5dusDZ2RlFihTBm2++ifDwcABcTyo2ltdhNG7cGB4eHnBzc0PFihXx77//Pvf8fn5+2LZtG4YPH45du3bBw8RTrKw9afwKoIditQHEENF/WgeVVS4uwIQJPAX3+++1jkY4mkePgC5dgG++4TEKAFh7R4dOCINuTiCGxo7CxjyByLMhDIsu6LBmDTBlCrBgAU/mOHCA1wVs+cKAD+Km4z5y4e7DHFgWpYMKC8Nmj0BUj+GxhrNnzfxmDAZg9mwgKIi/mmAFbUalynv16oWZM2fiyJEjCA4OxqNHj7J13LOvo5TKcOe+27f5BLNIEV7ElzNnziePOTs7IzGds8+yZcviwIED8PPzw4gRIxASEpLuc2eVpklDKfUjgL8AlFNKRSml3ldKfaiU+jDlkI0AzgE4A2A+gI80CtVkWrcGmjThMwchLCUxkQe6V60CJk3iFkTbtkC7dsBpbx3ievdH78tjkOuz/nhUR4c9e4B583iB6sWL/Bw3bgCJWw1ouiAQ7r074b/5GzCt4Vq0WRGI3r2BiKFhQHg4tmwBypXj5GSWeqgGAw+6h4UBISH8NTAwW4mjQYMGWLt2LR4+fIjY2Fj89ttvTx6LjY1FsWLFkJCQ8KScOYDnyp1ndNyz1q1bh0ePHuHmzZvYsWMHAgIC0KBBA6xcuRJJSUmIjo7Gzp07UblyTVy+zHXsvL1fHL+rqysSUkrcXrlyBblz50b37t0xZMgQHDxo2lUKmq7TIKIuL3mcAHxsoXAsQilgyxbLLfkXgggYOBDYsIGnfjdowFNoo6OBiROBwa8b4NJ1Nq72DYLbhNloN0aHHeDuHmdn3lisZEnghx+AK5+F43KxMORVOvR9HRjVBzgxOwy1RoejzpfD0KuXDuPf42Q0bBjPvpo1y8QVn8PDOVE87pLS6fh2eHiWu6mqV6+OTp06oVq1aihVqhTq16//5LExY8agVq1aKFWqFPz8/J4kis6dO6Nv376YPn06Vq9eneFxz6pZsybefvttXLx4EUFBQShevDjatm2Lv/76C1WrVoVSCqGhociduyicnC4gR46Xf17069cPVapUQfXq1dGjRw8MHToUTk5OcHV1xezZJl6pYMzAhy1drHkgPK3kZJ6tcu2a1pEIexcXR/TOO0TDhhEZDEQeHjy4fegQUeJWPSV58kDymTNE3Yrr6V5uL/prvJ7OnydKSkp9nvPniWbMIGrdmihPntRZV/HxPAD+5ZdEzs5E5coRnThBNHw4H9Or18tnZWV6INxGBQcHPxlgN0ZCgnnisOeBcLv1779Aq1Y8xiGEOeXIwWMS/v5As2a8JmPvXl7QN6NHOIaWDAM11OG114BlUTq4rw9Dbedw+Pg83ULw8QEGDADWrQOioriV4usLuLoCOXMCI0cCej1w8yZQuzbQqBH3Hi1eDDyzn5F4geho4N49vu5ijRtyG5NZbOliKy0NIqL/+z+inDmJLl3SOhJhj86dI2rShL8aDEQ5chC98QbRzZtEwcFEShEVL060Zk321w4dPswzX+fNIzp7lsjPj8jJiWjuXKI9e17+/Y7S0niZu3eJwsOJzpwx7+tIS8NGjRrFxcfGjdM6EmFvEhOB7t2Bfft4tt677/KubmvWAIMGAaNHA++9xxUK2rXL/hhbvnyAnx/Qrx8wZAiP2zVvzgvF9+/nY8LDeZyDMhgcp4wecBAJCTwGlDMnUKqU+V4nuz9na2z8OAwfH57FMm8eMHQol1EXwhTGjQP+/JOrLPfpw9M1N23irqr9+4GxY3lmlKkmZPj6Atu2camRoUOB06d5plbu3MBnn/GmZAkJPKPKxQV4tuqFm5sbbt68CU9PzwynvtozStmyNSmJk7u5uqWICDdv3oSbm1uWn0P2CNfYlSs8m2XuXKBxY62jEfbgzz+B+vV51tPJk1xhecsWLgvl7MwVUnPnNt/rb90K7G4TilcDA9BtgQ7/938882pRDwPynghHh/3D8O23nEweS0hIQFRUVIZrG+xdbCwXIvT0BPLkMe9rubm5wdvbG66urk/db+we4dLS0Fjx4lwgzqRTEoVjCg0FAgIQMlGHkiWBAgWAfAcN2NU9HIP/NwyFCwPLl5s3YQC8DqnKwgAU/iQQalcY5s/XoXK0AW8vDcR/tdthVH0DPv9cB19f7jaDwQDX8HD4Dhtm3sCsWFISTxawRL3H7JKkYQWcnHgmy5492a6GIBxZQAAQGIh1y8Kw9JIOK/oZ8KtbIKZEhWH7DmDJEsutDyrSWQcUCUNSh0DMS+6PT5NnY3z9MOzcBWzME4jocmH48Ucd3vVIs1DPAZ09C7i58Yy2d97ROhojGTNabksXW5o9ldZXX/Fsk1OntI5E2KqrV4kebdJTYkEv+iZXEN109qIZ7fQEEIWGahPTgyFBRACNUUG0bBkXPWzspKeHeb0oaaRpig3aqthYoooViSpVeno9jFZgK1VuTX2x1aRx9SqRmxtRz55aRyJsUXIyUbNmRFWrEi3z5Q/qfc2CCCD6+GONyvGnVKJ9NCyIbrt4UWMnPa1aRRQQQDTehWO8+2kQ9exJdO+eBvFpKDmZKDCQTxS3btU6GiZJwwZ9+imvqDX3HG1hf1at4v/m8U30dB1etK95EMV5eNHIN/QUF6dBQM/scXHvNz3dcvGipq562vu1nm44edG3eYLoYV4vaqT01LGjY+0zM2kS/74mTNA6klSSNGzQlSvc2ujdW+tIhC15+JDolVeIevvqKVp5Uf/yei7bYaLNibJk4sTnXvf2z3raUa4fJXl60bmFesqXj+j9V7lsSUPoadw4y4ephT/+4BZG+/bWlSiNTRoyZ8eKFCvGi6P++YfntAthjJkzgUuXgHo5w9FZheF+TR2CgwFqmKaQn6UNG/bcrI78bXV4s/drcFoVhqKddZg7F1j8rw5j/cLQr1o4/vc/IE1xWbtVrRqXY1m0yDYLl8o6DSvz8CHPprDFPyahjVatuN7T3r1A167Ajz8CH37IFW2tVYsWvEapZ0/g8885x2zfzutI9u61z7//e/f4/eXKpXUk6TN2nYYkDSt15w6vEi1QQOtIhLVLSACqVuUPpTx5+GtkJJA3r9aRZWzrVk4cTZtyC3vhQl7g2rkzlySxN8nJXK7l6lVg927rLERobNKQ7ikrFBPDZRkmTtQ6EmHNrl3jy9KlwPHjwBtv8NfvvrPuhAHwAsDvvgMqbwqF300DatcGBg/m6s9xmw3Y2SoUSUlaR2k6I0dydeCuXa0zYWSKMQMftnSx5YHwtDp1Isqbl+j2ba0jEdaqd2+iggWJihThaax58vA0TlsyqwPP9lr1kZ6KFSPqWkxPD/LwwPiIEVpHZxrz5/OUow8+sK6B72dBZk/ZtkOH+LczdqzWkQhrdOYMT8+uU4f/TvbsITpyhNf72JKEBKLJrfSUUMCLLvYKouvwoi/r6KlvX35fP/ygdYTZs3Ur/56aNzffhkqmYmzSkO4pK1WtGvf5Tp3KBeaESOvrr3nzo7//5vITb7wBVK4MFCmidWSZ4+ICDP5NB5cB/fHK4jE481Z/jP+La2e9+Sbw/vvAX39pHWXW+fryXuwrV9pBt1QKSRpWbMQI4MYNYPNmrSMR1uTCBa4jVbo0EBfHiWP4cK2jygaDAZg9G+tfD0KFP2bjqzcNCA4GZpQMRQdPA3r3Rur4hsHAhRmtXHQ0D36/9hqXiLenwX1JGlasXj0e2GzXTutIhDX5/Xcucnn8OG+revEib61qkwypBQtvDAxB24QwDNkfiPe8DRj1awAW3g/E7yMMcHZOc6yVl4K9ehWoU4c3u7JHkjSsmFJA+fJ8PTFR21iElQgNRb8yBnTsyInj6FFgqL8Bzf62/rPvdIWH8wJEnQ69egEle+jwzsMwDKgVjq2JOgwtGQbvzwNBQaPwqHUg4n8Is+pS0Hfu8I6F//0HdOumdTRmYszAhy1d7GUgPK2RI3nA05pnXgjLiPmFq9g2dtJTlSpEjRQPIttLpdjYWKLy5YmKFiWaOZMHw7e9wcUNRyOI2rcnLpFihW7d4llsrq5EmzdrHU3mQQbC7Ye3Nw8G/vGH1pEILUVHA8W76TC4RBh+TA5E91Oj8EvOQLisse6z78zIk4cbHs7OvGBxUisDqvw5G6c7B2GI+2zcXGPAgAEZ7zOuFSKgTRseX1qzBmjWTOuIzMiYzGJLF3tsaTx4QFSoEFHLllpHIrQ0ejSfebu4EK2vwWff9wYHaR2WWTx6RER6PSV7edH7r+mpYEGiaz/pKTYXr+H46CPr2IMirW3biDZs0DqKrIO0NOxHrlxc4GzjRi4PIRzPw4dcmLBUKeDNZAOan58NBAXBfelsHiC2MzlzAsn7w7G6Yxg6zNQhIQF4d5oOOX8Jw9A3wzF3LnDokNZR8v7rS5bw9caNgZYttY3HIozJLLZ0sceWBhFRdDRRrlxE77+vdSRCC/PmcSujsZOebjpbSflzM7t2jcjTk8jfn2j5cn7/Q4bw2N6RI6nHaTXW9+ef/KMvVIjozh1tYjAl2EJLQynVXCl1Uil1Rin1RTqP91JKRSulDqdc+mgRpzXw8gKWLQOCgrSORGhh1iygaFGgenI4AikMqpGOp6HqNCx/bmaFC3MRw4gI4ORJ4KOPgEmTgPXreSEjAPzyC9CwIXD9umVjW76cf/T58wN79gAeHpZ9fU0Zk1nMcQHgDOAsgFcB5ADwN4CKzxzTC8DMzDyvvbY0hGM7c4Zrkfn4EOXIQXTpktYRWc5773Epjr/+Inr9daICBYguXODHwsK4Bf7KK0QHDlgmng8+4FZP/frcA2AvYAMtjZoAzhDROSKKB/ATgDYaxmMTIiKATp2AR4+0jkRY0oYNQGwsEBUF9OnDM+ocxdSp3NIeMIAbVUlJvMYvPh7o2JFLjQNA3brAt9/C7NVxy5QB/vc/QK/nuByNlkmjBIBLaW5Hpdz3rPZKqX+UUquVUq9YJjTrFRPD/zgrVmgdiTCb0NAng9uHDnFlgN1jDBjnEQoiYOhQjeOzsIIFeZe7KVO4dMqiRcD+/bxxEwBUr84nU40b84ZOv/9u2te/cYM3tQoL49uffw6MGWM/taQyS8ukkd7eXM/Ovv4NgA8RVQGwDcCSdJ9IqX5KqQilVER0dLSJw7QujRoBVarwGRVZ2Vx1YSIBAXwqbTBg5kzAfb8Bs24E4o1BAdi+HfDx0TpAy2vRAqhfn6+3aQP8Wi8Uf08zYM0avq9wYeC3wQac6ReK5s35vpUreSwkq27d4vxdpgywYAHPlBLQdEyjDoAtaW6PADDiBcc7A4h52fM6wpjGokXcp/r771pHIsxGr6ckTy8a5xxE0cqLupfQW+1KaEsKCiJq2pQobrOebrl40du59XTqFD03i+zhQ555pRRR+/ZEa9fyanNjhYYSubnx/1mzZkSRkeZ5P9YENjCmEQ6gjFLKVymVA0BnAL+mPUApVSzNzdYAjlswPqvVpQuXwJ42TetIhNnodNhbrT++TBqD76g/drvq8MsvWgelveLFuftpzS0d4peFYfHDQOjrjQKlFD18vDLezQ04dgz48kvee7xtW8DTE1i8mJ/n2jXgp5+AH37g/6OgIG7NXLjAj/v4AD168ArvzZuBihW1eLdWypjMYq4LgJYAToFnUY1MuS8EQOuU618DiATPrDIAKP+y53SElgYR0YwZRBMmSD0qe5W4VU83nLxoUm7emKgh9DZZz8jUEhN53Ubx4kR37xKd7sIr49dWCcrwfyEujhsgn39OdPgw37d+PbciHl+UIqpYkWdoOSrIzn1C2KiU8hlL/09PTk5EbfLxgr7k7fa3gC8r9u7lT67ZgdwltaMBJ9ZfBxv/83nwgOjoUaJTp4hu3LDeIoiWZGzSkDIiNiw+HvjxR+DuXa0jESYVHg4VFoYzr+iQnAysu6tD+NAwqAj7W8CXFbVqAaEtDOgQFojY78NQTx+Cb/zDUPvbQJyYbVxJlVy5gEqVeJDb05MLJArjKE4w9sPf358iIiK0DsMiwsOBmjWB6dOBTz7ROhphKmfOcD/8qFGp9128yPWYBLsfHIo7ZQJQojuPYdy8CQyoZEDlR+Hoe2oYChfWOEAbpJQ6QET+Lz1OkoZtq1OH/2FOnOBNeYSNCg3lqbY6HQYP5hOB+kkGjGwSjqs9hqF7d60DtF43b3Jr4dAh3iu9Zk1g2zbeQ10Yz9ikIR8zNm7QIOD0aWDTJq0jEdmSsjYjfosBS5YAb+c2YI1zIBoND5CE8QKffsrdVXFxwOuvA/PnAzt3AkOGaB2Z/ZKkYePat+dpiDNnah2JyJaUwoPJHQMx6NYoLIgNxITXw3C1gn1srmQuLVsCZ89yQUcA6N6dE8n06bxyXJieJA0b5+oKfPABcOkS8OCB1tGIbNHp8KNHf4zCGMxV/fFNhA7nzmkdlHVr2pT35A4J4XIfAPf0NW7M/xeP61IJ05GkYQeGDweOHAFy59Y6EpEdDzca8O7V2RjnFIQPMRt9XjOm0DB2AAAgAElEQVSgbl2to7J+kyZxMceQEL7t6gqsWsUL9Nq2TV2wJ0xDkoYdyJkTUAq4d493eBM2yGBArp6B2P1JGP6XHIKOFIbp1wKhdtjfrnymVqkS0K8fsHo1cP8+31egAPDbb0BiIvDOO5xUhGlI0rATFy9yuewl6ZZ0FNYu4c9w3F0QhuAdOuTPDxzy0EHZ6eZK5jB+PM8gdHdPva9cOa4scvw4139MSNAuPnsiU26NRAT89x/Pob9yBYiO5suDB0ByMl9y5OCpf56eQIkSXK/mlVcsMxWWCPD351kkR45wy0PYjqVL+Ww5Lg6oXZv3hpg0SeuobE9SEk/BTbtOY/58/tn26QPMmyf/Gxkxdsqtg1aEf7HERC5UFh4OHDzIlxMnUpu+jynFK0udnTkxPHzIq7TTcnfnUuaNGwNvvcUfCOZYpKUUb1LTuzfwxx+8BaawHQsW8N+FszOwZQuQN6/WEdmmt97i/4Xt21OTQ9++wL//AuPGAaVK8QZKIuukpZHi9m3ej/iPP3jP38d9oAUK8CYvlSvzBjBlynDroVAh3hwmbfkBIm553LzJ3UXHjvFl3z5OQElJ/GHQsSPQqxdvrmPKs56HD7mLSqfj/l1hG06eBMqX50193n2XB3FF1syYAQwcyJVpmzVLvZ8I6NkTWLYMWLgQ+L//0y5Ga2VsS0PzAoOmvmS1YOHt20ROTlzp8sMPiVasIDp3znRVZO/cIVq3jqhXLyJ3dy64Vro00dy5RI8emeY1iIiGDuX9lK9dM91zCvMaNoz/9h5XXF2/XuuIbFdcHJGvL1HVqkRJSc8/1qQJ/6zXrNEmPmsGqXKbebduZflbM+XePaKlS4kCAvg34O3Npc7j47P/3FFRRBER2X8eYRkJCUQheSZSK3c9eXgQ5c/PFVhJryeaOFHr8GzSDz/w/9WKFc8/du8eUZ06RDlyyCZmzzI2acjsqTQKFLDM67i7A++9x91WW7bwfPJPPgGqVQN27Mjec5coAdSoYYoohSW4uAANhwZg4f1A+Mca0KMHkGuvgaf7BARoHZ5N6tIF8PPjCtDPcncHNmzg7sB33+WuaJE5kjQ0pBSvaN21i+eUP3jA4xHdu/O4SFbducN9trLTm21YdUOHLk5h+DE5ECPjR3HCSLMLncgcJydODGvXpv94gQK8+5+3N68ml8SROZI0rESrVkBkJM/sCAvjVsfOnVl7rrx5ucUi28Fat6gooEMHXltz0EOH30r0R+E5Y4D+/SVhZNMrr/AkldjY52c0ArxdssHAdduaNeMTN2EcSRpWJHduYMwY4K+/eI9jnQ4IDuZZV5nh7Ax8+CEnjshIs4Qqsis0FDuCDVizhjfR+m2wAT1jpvHc7Nmz+RNNZEtUFPDqqzydOT3Fi/P/iLc37w+e3a5hh2HMwIctXexlu9e7d4l69OABvbffJoqNzdz3R0cT5cxJ9NFH5olPZE/ydj3ddPKiVu566lJUT8n58hF5ePAAuJ63MSW9bO+aHcnJRPXrExUrljK5IAP//UdUoQL/v/z8s+XiszaQgXDbljcvd1t89x3vlVG/Pp85GcvLi7vGly3jmlTCuux21aF9chgW3g/Ex3fGISlJcSe8TvekTLqUEMkepYCxY7mSw+zZGR9XtCh3T1Wrxt2F8+dbLkabZExmsaWLvbQ00tq0iShvXqLixYn++cf479u3j+jTT7nVIaxLr1487XM0gogAutgrSOuQ7FbjxkSFC/N02xe5d4+oRQtu3QcHm26Nlq2AtDTsx+MZHkrxSejhw8Z9X82awJQp3OoQ1qV0aaCJiwEfqdmY5RkE7/UyjmEuo0cD168Dv/764uPc3YF167haw+jRQKdOskdNeiRp2Ag/Py5xkjs30KgRcOCAcd9HxHV4ZEDcurRwM2DRg0B0pDDcHxbCFW0DAyVxmEHdurx/eJcuLz/W1ZXLjHzzDZfiqVePNzgTqSRp2JDXXuPE4eHBk2yMKbH14AHQrh0wYYL54xPG2b0bOLEsHN1dw7DLWYf33oOMY5hZtWr8NS7u5ccqxXuM//YbV7WuXp3HFQV7YdJQSpW0VCDCOL6+nDgKFOBpgqdPv/h4d3egRw/+PIqOtkyMImOnT/Okhp6Rw5D8pg5DhwLFiqU8qNMBw4ZpGp89mzuXuwWNnRjy9tvA/v08NbdlS/7VyJ4cL29pPFlTrJRaY+ZYhJFKluTyIwAvTLp69cXHf/ghL3BatMj8sYkXW7yYz2QTE7lU99dfax2R46halWcgfved8d9Tvjywdy+vt/zmG+CNN4CjR80Xoy14WdJIW7j7VXMGIjKnbFlg40Ye4GvRgheIZaRSJaBBA2DOHN4sSmgjKYmnUXt4cOJ/Vf6jLKp2bT7JmjTp+b1xXiRXLk40q1fzvhzVq/MiXEdtdbwsaVAG14UVCAgAfv6Zz3wCA1+8cvyjj3i/jXPnLBefeNr27cDly1wb7MoVICRE64gcT3Awd9POnZv5723fnieUdOgAjBrFyWP7dtPHaO1eljSqKqXuKqViAVRJuX5XKRWrlHrBua1xlFLNlVInlVJnlFJfpPN4TqXUypTH9ymlfLL7mvamaVM+C9qyBfjiuZ9gqvbteWOo0qUtF5t42rp1XB4G4O6pXr00Dcch1anDu/tNnsy/g8wqVAhYsYJ/l/fu8XO9+y4PmDsKzXbuU0o5AzgFoAmAKADhALoQ0bE0x3wEoAoRfaiU6gygLRF1etHzmmuPcGs3YAAwaxavAO/ePePjEhN5Bom7u+ViEywhgescxcdzQb2//5b9qrUQGQnkyMG7cGbHo0e8Dmr8eL7eowfw5Zc8y9EWGbtzn5ZTbmsCOENE54goHsBPANo8c0wbAEtSrq8G0Fgp+TdLz5QpvC94nz4Zz9p8+JBbGjL4qg29nsegHpeul79kbVSqlJowsnPO7OYGjBgBnDrF3b8rVgDlyvFeOfZ83qpl0igBIO2ymaiU+9I9hogSAcQA8LRIdDbG1ZX3li5alPcgv337+WNy5eJFggsWpF8uWphP69b8AZMrF/+uunbVOiLHdv8+0KZN5mZSZaRYMd6G4Nw53p987Voeb6xViyc+2FvtNy2TRnrnWc/mfWOOgVKqn1IqQikVEe3AixG8vHg9xpUrfCab3llU//7AtWuyQZMlHTvGC8X++Qfo3ZtXGBcponVUji13buDWLV70asyCP2MUKwZ8+y1Pdpg+nWc09uoFFC7ME1V+/tk+EoiWSSMKwCtpbnsDuJLRMUopFwAeAG49+0RENI+I/InIv1ChQmYK1zbUrAlMnMgDdTNmPP94s2a8QNAUZ1jCOIsX825ySUn8ISIJQ3tK8YZnUVHA0qWmfW4PD96++dgxXv3fuzfv1dG+PVCwIFdzCA3lfXNMlbAsScuBcBfwQHhjAJfBA+FdiSgyzTEfA/BLMxDejogCX/S8jjoQnhYRN703bwb+/BPwf2Zoa+JEnml17BhQoYI2MTqKxEQe9I6L4zUy7dpxbSOhPSLuQrpxAzh5krsNzSUxkXfi3LyZL0eO8P05cvDU3SpVeKylUiUeSPf25v3jLcnYgXDNkgYAKKVaApgKwBnAQiIap5QKAZfo/VUp5QZgGYDXwS2MzkT0wpUGkjTYrVtcbydnTq6Km3a21M2bPFjetCmfAQvzWb8eeOcdvu7iAvTrx7PchHX47Tceb1q8GOjZ03Kve/UqtzT++gvYt4+TSNpxSCcnoEQJHqMsVIi7nvPl4/9jd3dONs7O/DdFxK3YpCTuIuvRI2sx2UTSMAdJGql27OCKuP37yweVVv75h7ukDh3i2/v2cReisA5E3I3brRvgqeEUGyJOJMeOAefP88rzf//l2XY3bvCCxNhYHsB/0SSWmjX5bywrJGkIAMDnn/Pg3ObNPJ7xWHw8r2qtVImnCArzSEoCfHz4n71IEf5QkKm2IjsSEviSlMTdXkpxq+NxyyNHjqw9ry2s0xAWMG4cULEiD8bdSjOFwNUV2LqVB+Ts7LzBauzcyVuHRkVx10PPnpIwrJVezydPtlCbzdWVZ3/lzcvVrvPn5+u5c2c9YWSGJA075+YG/PADN3MHDEi9XylekHT0KM/wEKY3aBCvEPbw4Jk6L1qpL7R15Qr/n/z2m9aRWD9JGg7g9deBoCDgxx+f/qfo0oU/0GS8w/QOH+ZLbCz3l48ZwzNihHXq3JmrDo8dKy3vl5Gk4SC++IJXg3/4IRATw/flzs2LANes4TMtYTqLFnH/cmIiUKqU45bRthUuLrxiPyKCu21FxiRpOIgcOYDvv+cZGmk3h/v4Yy71bIuLjKxVXBx3dXh48GXsWEkatqBHD24Njh2rdSTWzcLLR4SWAgKAwYN5E5rOnXl30dKludtKmM4//3DV0wcPOFl3786tOmHdcuTgsiJ37/KAuKxhSp9MuXUwDx7wtpdEvKAoVy6+/+hRrotTu7a28dmLIUN4zwaA93Rv0EDbeIR4GZlyK9KVOzdv+3r2LJ9VAZxA2rcHPv1U29jsQUICj2P89BMvFvP1BerV0zoqkRlxccDs2cDBg1pHYp0kaTigxo15Rs+ECbwXgFI8trFvX8Z7cQjjjB/PeypcvszdHT16SDeHrYmPB0aOBEaP1joS6yR/zg5q8mTumvroI25p9OoF5MmTfmVcYZykJC5GeP8+tzLOnXvxFrzCOuXNy63uX3/l8SnxNEkaDqpIEd7Bb/t2HgjPl48Tx8qVwH//aR2dbdq+nfdhv3ED6NSJF1Y+3hNc2JZPPuHkMX681pFYH0kaDqxfPy5wNngwr90YNIir4h44oHVktmnBAh4zSkriKbd792odkciqAgW4yzYsjMumi1Qy5dYG/fEHr+w+e5b7zv/7j2vw79jBj2/YwN0jAQFcxCwjzs68GVNAAPfffvstr+OQ6aGZd+0ab/NZoAB3+z18yMUghe367DMuXW4Pu+2ZkrQ0bMTRo6nX58zh0h+nT/OHVOPGT8/QGTQIqFOH6/D368eD3RmpUQNYXTMU/0wzIDKSEwYRcGuNgasZCqN4eHDijY7mMY0OHbh7Q9iuwoX5RKxGDa0jsS6SNKwYEbBlC8/x9/NL3ZNh8mTeSOnoUX588eKnV7Hu3cvjFK1a8VaW5cu/+PO/8RcB+IkC8X13A4iAya0MoMBAJFUPMOv7sydubtzacHbmhX29emkdkTCVGzd4+2TBJGlYqStXeGvQ5s15U5Zp03gqJwAUL/7iLiQvL17xvXQpb+QyYkTq4rKYGO46ScvjXR32DAzDiMOBONZxFD7ZHYgOyWH4JUZnnjdn60JDAYPhyc0//wR+eN+A/PNDUagQ15p6800N4xMm9dVXQGAgdwULAERkV5caNWqQrYuLI/L2JnJzI5o4kW+bSrduRGXLEu3Z8/T9iYlEc4sEEQH0aFgQvfYaUa1aRMnJpnttu6HXE3l58VciCm6gp2h4UUPoado0onXrNI5PmNS5c0TOzkSDBmkdiXmBt9l+6Wes5h/ypr7YctKIj0+9vnYt0alTpn+NrVuJfHyIXFyI5s5N84BeT/H5vWg0guhebi/6ZZCeAKJt20wfg11ISRyxnwXRdXhRYCE9+foSJSVpHZgwh549iXLlIrp6VetIzEeSho25eJGoRg2i7783/2vFxBC1aMG//UGDiBK3pp45d+lC1NRVTwkFvKijl57atTN/PDYriFtmoxFEANGbbxIdPap1UMIcTp4kUopo2DCtIzEfY5OGjGlYgfBwwN+fZzkVKmT+18uXj6fsfvYZsHw5EGsI5wnpOh0mTgR2uegwrkoYpnUPx/Ll5o/HJhkMoNmzMc0jCB87zYYOBvzxh6xxsVdly/I4oSx8hbQ0tLZrF1HevNxlFBlp+dd/3NxOTiZKSODrX33FrZAdO/h2XJyMbTwlpWvq1ho9NWlC1MpdTzecvKhlLj3du6d1cMJcHv9/2CtIS8P6XbnCs6OKFQN27QIqVrR8DEWK8Ncvv+RKt/HxwNChwCuvcP2dyEg+y9qyxfKxWa1wbpkVaKfDgAHA+vs6dHUOQ2+/cLi7ax2cMBeXlKXQJ0+m7n7piCRpaKh4cS4QuHOn9vtHe3tzgbZu3XjNwTff8B7Xu3bx46NGyd7JTwwbhstldbh0CZg3jxf2/Z6gQ4lpw17+vcKmXbzIK/2nT9c6Eu1I0tDAvn18AXiP7sdn+1r6+GNOFKtXp85Lr1sXCA4GPv/8ycm1SDF+PC+a3LCB63fVqQPUqqV1VMLcSpYEWrYEpkwBYmO1jkYbkjQs7Ngx/qPr14+3lLQmn3/OSWzMGOCXX4CpU4Hr14FLl4Bq1Xg3uvv3tY5SezExwJIlXO/LyYkLFe7Zw/uSCPsXFATcvs2lfByRJA0LungRaNqUN+dZu9b6NudRigsYNmzIO9D5+wM9e/Jq9BEjgKgort7q6JYs4eQZFQU0asTjP5IwHEdAANCsGZfzccRihlb2sWW/YmO5FtS9ezyo/OqrWkeUPjc3QK/n7imAu2FcXLhras8ebiE5sqQk7s8uU4bPNiMieO8F4ViCg/l/2hHL32uSNJRSBZVSW5VSp1O+FsjguCSl1OGUy6+WjtOUZszgrqlVq4AqVbSO5sUenzXPnctN8BEjgDVruPWhFBdLdFQHDnA9rxw5uArqnTtAixZaRyUsrU4drkX11ltaR2J5WrU0vgCwnYjKANiecjs9D4moWsqlteXCM73hw7nMcpMmWkdivKNHeXe/N97gLpjH+wv4+HD3miOqWZNbipGRQP78POuseXOtoxJa8PTkr4624E+rpNEGwJKU60sAvKtRHGa3cSOvx3B2fnrPC1vw9dc8W6R/fy69fugQcOQId818+CGXjHYkcXH8dd067rI7fRro3fvFG10J+zZsGE8SefBA60gsR6ukUYSI/gOAlK+FMzjOTSkVoZTaq5SyucTyzz+8Gc/nn2sdSdbkyQPMn8/lTSIjgZklQ7HlCwNmzeL+/AEDwCXCHWSzpo4deaxn8WKgQgW+r3dvTUMSGmvdmmcYzpmjdSQWZMyy8axcAGwDcDSdSxsAd5459nYGz1E85eurAC4AeC2D4/oBiAAQUbJkSRMvrs+a27eJSpcmKlbM9itjvv8+l4beFaKn6/Ci+V31NHYsUUPo6VHe1BLh9iwykkurPC70uGuXQ7xtYYTGjYmKFCG6f1/rSLIH1lzlFsBJAMVSrhcDcNKI71kMoMPLjrOG2lPJyURt2nD58d27tY4m+27eJDIY+Pr4Jpw4bnwcRLdcvGheF8f45Ozdm0tj+/oS1a6tdTTCmuzaxZ+kkyZpHUn2GJs0tOqe+hVAz5TrPQE8t5miUqqAUipnynUvAHUBHLNYhNnw3Xfc7z1pEq+qtnUFC/LaDQDoOl+HBS794TlrDPIM6Y++K+x/d7/z53kXxFmlQlHqvAH58gGzZ6c86EDdcyJ99erxBJdFixyj1I5WSWMCgCZKqdMAmqTchlLKXym1IOWYCgAilFJ/AzAAmEBENpE0unYFvv0WGDhQ60hM6+uvgYnNDRiYYzZCEASaPRswGBARwf389mryZB7sPuwagNUqEIlbDYiKAieMwEBe7SUc2oIFvGbDERZ5KrKz1Ojv708RERGavHZsLJAzJ8/ht0fb/2dAlXGBuFG/HUJOdYa7OzD/biBCKoVh925g0Ufh8J5uf0X7YmKAn37iGWODqhgw8p9AuA7sj/wrZj/Zh0QIAEhM5Iubm9aRZJ5S6gAR+b/sOFkRbiJEXLfpzTd51bA90uUNR1DZMAQf74xFDwJx9hzwW/cwDC/1E36iQHyxJsDuirgRcRXbgwf5hGDFfzpsL9Mf+aeP4bnIkjBEirt3gcqVuWVq14wZ+LCli1YD4XPm8GDYxImavLzFGAz8PsP66+m2qxd9kyuIkjy96PAUPTk5EXXubD8bNp06RVS1Ku+r7uZGpNPxjLG4fF681auXY8wcE8Z75x2iAgV49qStgTXPnjLnRYukcfw4z6xp0oQoKcniL29xjRsTFS9OdLkP75G9MSCIiIjGj+e/qFWrNA7QRN57j3+vAwfy/tDbRurpbk4vStqWkij0ekkc4imHDvH/QFCQ1pFkniQNC4mLI6pencjTk+jKFYu+tGYiI4milvEH5kb/ILoOLzo9X09JSURLlhAlJmodYfYdPMiJYuBAIg8Pog4diJuRzyYIvd7+m5ciUzp2JMqTh+j6da0jyRxjk4aMaWTTtWu8ReqCBbxtqyOoeM2AEp8FAmFhqLUlBH3zhcGrfyDUDgN69OCZRlevAtHRWkeaNURcZ8vTky8xMUDt2sDFzsOeH8PQ6biWhBApQkK4rMiCBS8/1hbJ7CkTSExM3T/YIYSGIr5qANrP1KFePcDLC/ihjwGTO4Wj+k/DEB/Pu9qVKgX8/jvg6qp1wJmzeTNXrp0+nUvDlyvHJdDbt+e9NIR4mT//5J0cbakumcyeMrOYGGDoUJ4x4VAJAwCGDUOOZjokJ/O6tvbtgfsBOrz9xzDcvctTjkNCuKrvZ59pHWzmNWkCLF/Oye7qVa4zdf8+MGiQ1pEJW/HGG5ww4uO1jsT0JGlk0cCBvE/wyZNaR6Kd0aOBW7d4z41Zs7irLiiIH+venbeHnTWLix7aiqQk/mcPDOSpkzVqAJs2AQ0aANWrax2dsCXbtnGV6DNntI7EtCRpZMGaNVxWYuRIx14M7O8PvPMOf7iWLcvLFmbO5K4cAJgwgfea+Phj29jh7OJF4JtCodj7tQFLl/I/e5MmgO+/BswqJaVCROZUrswLfv/3P60jMS1JGpn033/ABx/wB6a9/TFkxVdfcZn0mTO5/79wYf75JCbyGfuPP/Kix8elxK0VEdC3L7DzYQD8vwnE7yMMqFmTB/3XOAeiQg8HPjsQWVK0KDB4MLByJe/4aDeMmWJlSxdzT7nt1IkXeh0/btaXsSnLlnElXCKilSt5IvfUqc8fd/8+UXS0ZWMz1vffc9wzZhCtHciVfM924wV8cZtlHYbImpgYno7/1ltaR/JykHUa5nH+PNHPP5v1JWxacjLvOZEnD9HFi0/f36gRUUAA0b172sWXnqgoXovRoAFRbCxR0aJEi0vywkWbXKUlrMqUKfynFBGhdSQvZmzSkO4pI8XEcBeGjw/Qtq3W0VifvXt5yUJMDA9+JydzgT9KmdGtFM8+OnCAdzN8vHWqNVi1ime5fP89MHcuUP6qAZ1vz8Y4pyA8/JYr+QqRVf37A7t28aQKeyBJwwhJSTzg27Wr1pFYr1y5eIrt5MmAry+XUd+48el1Da1b84fy5s1Aly487mENPv0UOHaM15vsCjFgrWsgFjQNQ7AKQcz8MJ5KJYlDZFHOnLznBmBdJ0tZJUnDCFOm8JlCixZaR2K9qlblz9YpU3jP5AEDgPr1+QP58uXU4/r0AaZNA9au5UFCLf3xB/D333zdx4cH9cvFhuP8hDAM26RD165A0S46Ln8eHq5lqMIOTJsGVKoEPHyodSTZZEwfli1dTD2mceQIUY4cRO++az/VW83lxAkiJyeizz7j26dPc8G/li2f/9nNmsVVZLVy/jwPUAYEcGyRkbwP+gcfEI0axX3QR49qF5+wP48rRI8bp3Uk6YMMhGdfXByXxi5c2PaKj2mld2+inDlTB8GnTuW/su+/T//45GROIPfvWy7Gu3e5yKSHByeu5GSuUJw/P9HVq0SlShG1bm25eITjaN2aJ4lcvap1JM+TpGECx49zCfBffjHZU9q9CxeI5s4lio/n20lJRA0bErm7E508+fzxe/dyNdl69Yhu3TJ/fHfv8ms5OxOtX8/3/fIL/ydMm8a3b93i9yGEqZ04QeTiwi1aa2Ns0pCChS/x4AGQO7fJns4hRUXxmEepUsBff/HAYFphYVx25NVXgZ9/BipWNF8so0cDY8bwosOOHfn36+fH23OGh/NXJxnpE2Y0cCBXwL14kSdfWAspWJgN9+7xLKCEBEkYWbV4MU+5BQBvb2DRIuDQIWDEiOePDQwEtm7lleU1a3KZFnP58kueCNWxY+rtc+d4RfvcuTwt8s4d872+EMHBwOHD1pUwMkOSRjoGD+YKtna19N/CLl3iD+Fdu/h269Y8o2rKFGD9+uePf/NNTir+/rwntynt28cFB2/e5Mq19evz/X/8wTNaPv6Ya4h9/TXvn5E/v2lfX4i0PD25VhvABT9tjjF9WLZ0ye6Yxrp13L89bFi2nsbh3b9PVKIEkb9/6ha4Dx8SVavGA9AnTqT/fWlnWY0bR7R4cdZnrSUmEk2axH3IPj5Pz4aKjSXy9SV67TVeoT52LP/e9+7N2msJkVlDhvDf4IMHWkfCIAPhmXf1KlGhQvzB9uhRlp9GpFi6lP/Cli1Lve/CBf4Zly374oHvxESi+vX5+/39+Tky8ztZvpyoXDn+/nffff61PvyQB+B37uTHPDyI3nknc+9PiOzQ6/nvc8wYrSNhkjSyoF07ni4q8/NNIymJP/C9vbmV8diuXUSurkTNmhElJLz4+xcu5AQDcLL54w9+LDHx6RZITAzRli2p97VvT+TnR7Rq1fMtlbAwfr7H60m+/ppvHz6c/fcsRGa0b89rmdLWadOKJI0sOHiQ6IcfsvztIh379hFt2vT8/fPn81/fxx+/vPspKYlo61ZuMRw4wPf9+CNR7tx8yZmTWw0ALygkIrp9O7VbLK1Dh/h73ngjteUSH88JRwhLO3+ek0a7dlpHYnzSkCm3wmKSk5+ezjp0KDBpEjB8OA9CK2X8c+3Zw7OsnJx43448eYA6dbjGj5tb+t8THc0D7cnJPL22aFGuBfTsFGAhLGn8eODbb4EjR4BixbSLw9gpt462u7XQyNdf82ylTZtSk0NoKE9vnjiRpzaPGmX889WtyxdjxcVxdd3r14Hduzlh7NnDU2/Xr5etXIV2hgzhjcs8PbWOxDgy5VZYRMGCwJYtwLJlqfcpxWXUe/bkuevjxqWWUjelB8gKt3QAAAkxSURBVA94yu/OnVz+vEYNrlw8YAC3UsqVM/1rCmGsHDk4YSQl8fRwa6dJ0lBKdVRKRSqlkpVSGTaHlFLNlVInlVJnlFJfWDJGYVp9+3L30eDBwI0bqfc7OfEHebduvH1u796mLR8dGwu8/TYvHvz++9Ty9vPn8wKrSZMAd3fTvZ4QWRUSwmuITpzQOpKXMGbgw9QXABUAlAOwA4B/Bsc4AzgL4FUAOQD8DaDiy57b3Dv3iaz75x9eM9G16/OPJScTBQfzYHb9+qYpEHntGlGdOlxnasWKp+8vWJBrYknlYmEtrl0jKlCAa6OlN4nD3GDNO/cR0XEiOvmSw2oCOENE54goHsBPANqYPzphLn5+PG6xahVw/PjTjynF+1n8+CMPUletCqxcmfXuqrVrgcqVgYMHubZVly6pjy1bxmMpM2dmbvBdCHMqXJgrJuzeDcyZo3U0GbPmMY0SAC6luR2Vcp+wYV9+yeVCKlRI//HOnYE//+RZJJ07A82bAydfdnqRRlQU0KsX0K4d17w6cICvpzV4MG++VKlSlt+GEGbRowfQtCnPKLx4Ueto0me2pKGU2qaUOprOxdjWQnrngOmedyql+imlIpRSEdHR0VkPWpids3Pqh/X27VwU8lmvvw7s3w9Mn857j5cvz/uPL1nCYxTPevSIn6t9e96Bb9kyHh/Zu/fpxPDvv8CpU9y6KF/eLG9PiGxRimu2lS3LtdKskabrNJRSOwAMIaLnFlYopeoA+IqImqXcHgEARPT1i55T1mnYhogILhI4fDgwYULGx129ymWkFy8Gzp7l+4oU4TLrefLwfRcvcjeWpyfw/vs8ffHVV59+nqQkoFEjThrnz2e8lkMIa0Bk+a5Te1inEQ6gjFLKF8BlAJ0BdNU2JGEq/v5Av368RqN8ee5SSk/RotxqGDmS11Xs2MEthn//5VZH3br8vZUrA61aZZwMhg/nKbeLFknCENZPKZ4qPmoU779RsqTWEaVhzGi5qS8A2oLHKOIAXAOwJeX+4gA2pjmuJYBT4FlUI415bpk9ZTvi44neeotnVG3fbr7XmTOHZ2V98on5XkMIUzt3jreGbdTIMrOpIGVEhC2IieHWwuXLwOnTpt+YZu9eLi3SrBmwbh3gYs1tayGesWABr3GaOhUYNMi8ryU79wmb4OEBbNgAzJhhnp3Mqlfn7q2ffpKEIWzP++9zt+sXXzw/TV0r0tIQVmXbNp5hpdNl73k2bQKqVAFKyCRtYeOuXuU1TuXK8U6Y5hogl5aGsDlEPPDXvDmwfHnWF/bNnctnZyNHmjY+IbRQtCgviF240DoWo0rSEFZDKe6qqlUL6N4daNMmcwuc/v0X6NQJ+PBDoEULLoYohD1o2JDXbhDxlHEtSdIQVqVAAUCvByZP5gV7FSsCFy68/Pt27OCpu7/9BoweDfzyixQiFPYnJIQXv2qZOGRMQ1itixe5/tTQoXz7iy/4TMvdndda3LrFi/j69eM57UOH8noMq5rTLoQJnT/PSaN0aR7fyJXLdM9tD4v7hIMrWTI1YRDxXgO7dwOJiXyfiwtPRwR4EyfpjhL2ztcXWLqUu2779+fFqpYe55CkIWyCUoDBwNcTE3nPDWdnWd0tHE/r1rxp2ejRvKHYJ59Y9vUlaQib4+Iiay6EYxs1iruqypSx/GvLv54QQtgYJyeu+vzYo0eWa3XL7CkhhLBhc+fypmVpt1E2J0kaQghhw/z8eI3Su+9yi8PcJGkIIYQNe+MN3njszz95nZK5yZiGEELYuI4deSGsJbYwlpaGEELYAUvteS9JQwghhNEkaQghhDCaJA0hhBBGk6QhhBDCaJI0hBBCGE2ShhBCCKNJ0hBCCGE0SRpCCCGMZnc79ymlogH8q3UcWeAFwEIlx6yGvGfHIO/ZNpQiokIvO8jukoatUkpFGLPVoj2R9+wY5D3bF+meEkIIYTRJGkIIIYwmScN6zNM6AA3Ie3YM8p7tiIxpCCGEMJq0NIQQQhhNkoYVUkoNUUqRUspL61jMTSn1jVLqhFLqH6XUWqVUfq1jMgelVHOl1Eml1Bml1Bdax2NuSqlXlFIGpdRxpVSkUmqQ1jFZilLKWSl1SCm1XutYzEGShpVRSr0CoAmAi1rHYiFbAVQmoioATgEYoXE8JqeUcgYwC0ALABUBdFFKVdQ2KrNLBPA5EVUAUBvAxw7wnh8bBOC41kGYiyQN6zMFwDAADjHYRES/E1Fiys29ALy1jMdMagI4Q0TniCgewE8A2mgck1kR0X9EdDDleiz4Q7SEtlGZn1LKG8DbABZoHYu5SNKwIkqp1gAuE9HfWseikd4ANmkdhBmUAHApze0oOMAH6GNKKR8ArwPYp20kFjEVfNKXrHUg5uKidQCORim1DUDRdB4aCeBLAE0tG5H5veg9E9G6lGNGgrs0llsyNgtR6dznEC1JpVQeAGsAfEpEd7WOx5yUUq0AXCeiA0qphlrHYy6SNCyMiN5K736llB8AXwB/K6UA7qY5qJSqSURXLRiiyWX0nh9TSvUE0ApAY7LPOeBRAF5Jc9sbwBWNYrEYpZQrOGEsJ6KftY7HAuoCaK2UagnADUA+pdQPRNRd47hMStZpWCml1AUA/kRka0XPMkUp1RzAtwDeJKJoreMxB6WUC3iQvzGAywDCAXQlokhNAzMjxWc+SwDcIqJPtY7H0lJaGkOIqJXWsZiajGkIrc0EkBfAVqXUYaXUHK0DMrWUgf4BALaAB4TD7DlhpKgL4D0AjVJ+r4dTzsCFjZOWhhBCCKNJS0MIIYTRJGkIIYQwmiQNIYQQRpOkIYQQwmiSNIQQQhhNkoYQZpZS8fW8Uqpgyu0CKbdLaR2bEJklSUMIMyOiSwBmA5iQctcEAPOI6F/tohIia2SdhhAWkFJS4wCAhQD6Ang9peKtEDZFak8JYQFElKCUGgpgM4CmkjCErZLuKSEspwWA/wBU1joQIbJKkoYQFqCUqgbekbE2gM+UUsU0DkmILJGkIYSZpVR8nQ3eU+IigG8ATNI2KiGyRpKGEObXF8BFItqacvs7AOWVUm9qGJMQWSKzp4QQQhhNWhpCCCGMJklDCCGE0SRpCCGEMJokDSGEEEaTpCGEEMJokjSEEEIYTZKGEEIIo0nSEEIIYbT/ByixNxS6Up4ZAAAAAElFTkSuQmCC\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAY0AAAEKCAYAAADuEgmxAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAIABJREFUeJzt3Xd8zff3B/DXO4MQBImdkrQ2QUmMGnWpWaVWzOKnaLVKq0bVN1IxSkrtmjVLK6hqzeJeNVokRkvsVUIRK2Jlnt8fJyRIuEnuvZ87zvPxuI/c8cm952bc83mv81ZEBCGEEMIYTloHIIQQwnZI0hBCCGE0SRpCCCGMJklDCCGE0SRpCCGEMJokDSGEEEaTpCGEEMJokjSEEEIYTZKGEEIIo7loHYCpeXl5kY+Pj9ZhCCGETTlw4MANIir0suPsLmn4+PggIiJC6zCEEMKmKKX+NeY4TbunlFILlVLXlVJHM3i8oVIqRil1OOUyytIxCiGESKV1S2MxgJkAlr7gmF1E1Moy4QghhHgRTVsaRLQTwC0tYxBCCGE8rVsaxqijlPobwBUAQ4go8tkDlFL9APQDgJIlS1o4PCFEdiUkJCAqKgqPHj3SOhS75+bmBm9vb7i6umbp+609aRwEUIqI7imlWgL4BUCZZw8ionkA5gGAv7+/bBAihI2JiopC3rx54ePjA6WU1uHYLSLCzZs3ERUVBV9f3yw9h1Wv0yCiu0R0L+X6RgCuSikvjcMSQpjYo0eP4OnpKQnDzJRS8PT0zFaLzqqThlKqqEr5K1JK1QTHe1PbqIQQ5iAJwzKy+3PWtHtKKfUjgIYAvJRSUQCCAbgCABHNAdABQH+lVCKAhwA6kxn3p01OBm7cAAoXNtcrCCGEbdM0aRBRl5c8PhM8JdcivvwSWLEC2LoVKFfOUq8qhBC2w6q7pyytSxcgLg6oXx84fFjraIQQwvpI0kijalVg1y7AzQ1o2BDYu1friIQQlnLhwgWUL18effr0QeXKldGtWzds27YNdevWRZkyZbB//37cv38fvXv3RkBAAF5//XWsW7fuyffWr18f1atXR/Xq1fHnn38CAHbs2IGGDRuiQ4cOKF++PLp16wYz9rBbhLVPubW4smWB3buBxo2BNm2Ac+cAd3etoxLCcXz6qelb+tWqAVOnvvy4M2fOYNWqVZg3bx4CAgKwYsUK7N69G7/++ivGjx+PihUrolGjRli4cCHu3LmDmjVr4q233kLhwoWxdetWuLm54fTp0+jSpcuTGniHDh1CZGQkihcvjrp162LPnj2oV6+ead+gBUnSSEfJksC2bcDp05IwhHAkvr6+8PPzAwBUqlQJjRs3hlIKfn5+uHDhAqKiovDrr79i0qRJAHiq8MWLF1G8eHEMGDAAhw8fhrOzM06dOvXkOWvWrAlvb28AQLVq1XDhwgVJGvaoVCm+AMCqVUCNGsCrr2obkxCOwJgWgbnkzJnzyXUnJ6cnt52cnJCYmAhnZ2esWbMG5Z6ZKfPVV1+hSJEi+Pvvv5GcnAw3N7d0n9PZ2RmJiYlmfhfmJWMaL3H3LvDRR0CLFsAtqZIlhENr1qwZZsyY8WRc4tChQwCAmJgYFCtWDE5OTli2bBmSkpK0DNOsJGm8RL58wNq1wIULQLt2QHy81hEJIbQSFBSEhIQEVKlSBZUrV0ZQUBAA4KOPPsKSJUtQu3ZtnDp1Cu523K+tbH0k/1n+/v5kjk2YVqwAunUDevQAFi8GZPGqEKZz/PhxVKhQQeswHEZ6P2+l1AEi8n/Z90pLw0hduwKjRwNLlwKbN2sdjRBCaEMGwjMhKIgX/ul0WkcihBDakJZGJiiVmjD++Qf416gddYUQwn5I0siCR4+A5s2B9u35uhBCOApJGlng5gbMng0cOMCrV4UQwlFI0siiNm2AoUOBuXN5Sq4QQjgCSRrZMHYsrxTv0we4fFnraIQQWXXp0iXodDpUqFABlSpVwrRp00z6/IcPH8bGjRszfNzHxwc3btww6WuaiySNbMiRg9dv9OsHeMkmtELYLBcXF0yePBnHjx/H3r17MWvWLBw7dsxkz/+ypGFLJGlkU9mywNdfAzlzAnZcOUAIu1asWDFUr14dAJA3b15UqFABl9PpPmjTpg2WLl0KAJg7dy66dev23DGrVq1C5cqVUbVqVTRo0ADx8fEYNWoUVq5ciWrVqmHlypW4efMmmjZtitdffx0ffPCBTZVLl3UaJnLoENC5M/Dzz0ClSlpHI4Rta9jw+fsCA7kO3IMHQMuWzz/eqxdfbtwAOnR4+rEdO4x/7QsXLuDQoUOoVavWc4/NmzcPdevWha+vLyZPnoy96Wy6ExISgi1btqBEiRK4c+cOcuTIgZCQEERERGDmTN6IdODAgahXrx5GjRqFDRs2YN68ecYHqDFpaZiItzcXNPy//wNsvIilEA7r3r17aN++PaZOnYp8+fI993iRIkUQEhICnU6HyZMno2DBgs8dU7duXfTq1Qvz58/PsHDhzp070b17dwDA22+/jQIFCpj2jZiRtDRMpFAhYNYsoFMnYPJkYPhwrSMSwna9qGWQO/eLH/fyylzL4rGEhAS0b98e3bp1Q7t27TI87siRI/D09MSVK1fSfXzOnDnYt28fNmzYgGrVquFwBjtKKRstYCctDRPq2JEX/AUHAydOaB2NEMJYRIT3338fFSpUwODBgzM8bv/+/di0aRMOHTqESZMm4fz5888dc/bsWdSqVQshISHw8vLCpUuXkDdvXsTGxj45pkGDBli+fDkAYNOmTbh9+7bp35SZSNIwIaW4tZEnD/Ddd1pHI4Qw1p49e7Bs2TLo9XpUq1YN1apVe262U1xcHPr27YuFCxeiePHimDx5Mnr37v3cIPbQoUPh5+eHypUro0GDBqhatSp0Oh2OHTv2ZCA8ODgYO3fuRPXq1fH777+jZMmS2Yo/KYnLGiUkZOtpjCKl0c3g7FnA1xdwkpQshFGkNHrWEQHnzgG3bwNlygAeHi//HimNbmVee40TxtWrPJNDCCHM5coVThje3sYljOySpGEm9+8DVaoAL+geFUKIbMufHyhWDChSxDKvJ0nDTNzdgQ8+AJYtAwwGraMRQtibx1P73d2BEiUst5uoJA0z+vJL4NVXgf79ZW9xIYTpxMcDkZHcBW5pkjTMKFcuYPp04ORJIGUhqBBCZEtyMnDmDM+YssQYxrM0XdynlFoIoBWA60RUOZ3HFYBpAFoCeACgFxEdtGyU2fP221zy4MwZrSMRInMiI4H9+wFPT6BoUe4CKVFC66gcGxFw4QKXUildmk9MLU3rlsZiAM1f8HgLAGVSLv0AzLZATCa3dq2s2xDWLT4eWLMGeOcd4Pp1vu/334HevXnvmFq1eHZO+fLAf/8BCA19frDOYOD77cBXX32FSZMmvfCYX375xaSVcNNz5coVdEhTSOv6dS5XVLw4D4A/Nn78eLPGkZamSYOIdgK49YJD2gBYSmwvgPxKqWKWic50cuTgr0eOAGb+GxMiUxITgW+/5RZEhw7AwYPAqVP8WK9ePP8/IgJYvx6YOhUIyhmKoscNQEAAHrYOxN6vDSC9gWd9BAYCAQHmD9pKEpYlkkbx4sWxevXqJ7ddXICCBXm2VFqWTBogIk0vAHwAHM3gsfUA6qW5vR2AfzrH9QMQASCiZMmSZI3i4oiKFiWqX58oOVnraIQgevCAyN+fCCBq2pRo40aixMSXfJNeT+TlRUnb9NTbV0+3kY8eOOWmBPd8/FgWHTt2zPiDU2J48nrP3s6isWPHUtmyZalx48bUuXNn+uabb4iIaN68eeTv709VqlShdu3a0f3792nPnj1UoEAB8vHxoapVq9KZM2fSPe5ZwcHB1L17d9LpdFS6dGmaN28eERElJyfTkCFDqFKlSlS5cmX66aefiIjo/PnzVKlSJUpOJlq0aBG1bduWmjVrRqVLl6ahQ4cSEdHw4cPJycmJqlatSl27dqV79+5Ry5YtqUqVKlSpUqUnz5VWej9vABFkzGe2MQeZ8/KSpLEhnaRR40XPV6NGjef/GqzEnDn8E1+7VutIhGDrG0wkfZCekpP5ZGb3bqK5nfW0uOJEKleOyN2dqFgxosqViRo3JgoJIToxR0/JXl6U+GUQxefITQRQCILoiy+IHj3KWhyZShpEqYkiKMgkCSMiIoIqV65M9+/fp5iYGHrttdeeJI0bN248OW7kyJE0ffp0IiLq2bMnrVq16sljGR2XVnBwMFWpUoUePHhA0dHR5O3tTZcvX6bVq1fTW2+9RYmJiXT16lV65ZVX6MqVK0+SxokTRDNmLCJfX1+6c+cOPXz4kEqWLEkXL14kIiJ3d/cnr7F69Wrq06fPk9t37tx5Lo7sJA2txzReJgrAK2luewNIv7SkDXj/fe4T/uILKZ8utJGUBAwbxt1QAPD2VwF487tArPrIgIoVgf/VM6DtT4HYcT8AlSoBffvyRI7SpXnVcXAwUP5DHaY87A/n8WPg4kyIGxaET91mIzzUgMhIC70RnY7nso8Zw191umw93a5du9C2bVvkzp0b+fLlQ+vWrZ88dvToUdSvXx9+fn5Yvnw5IjN4k8Ye16ZNG+TKlQteXl7Q6XTYv38/du/ejS5dusDZ2RlFihTBm2++ifDwcABcTyo2ltdhNG7cGB4eHnBzc0PFihXx77//Pvf8fn5+2LZtG4YPH45du3bBw8RTrKw9afwKoIditQHEENF/WgeVVS4uwIQJPAX3+++1jkY4mkePgC5dgG++4TEKAFh7R4dOCINuTiCGxo7CxjyByLMhDIsu6LBmDTBlCrBgAU/mOHCA1wVs+cKAD+Km4z5y4e7DHFgWpYMKC8Nmj0BUj+GxhrNnzfxmDAZg9mwgKIi/mmAFbUalynv16oWZM2fiyJEjCA4OxqNHj7J13LOvo5TKcOe+27f5BLNIEV7ElzNnziePOTs7IzGds8+yZcviwIED8PPzw4gRIxASEpLuc2eVpklDKfUjgL8AlFNKRSml3ldKfaiU+jDlkI0AzgE4A2A+gI80CtVkWrcGmjThMwchLCUxkQe6V60CJk3iFkTbtkC7dsBpbx3ievdH78tjkOuz/nhUR4c9e4B583iB6sWL/Bw3bgCJWw1ouiAQ7r074b/5GzCt4Vq0WRGI3r2BiKFhQHg4tmwBypXj5GSWeqgGAw+6h4UBISH8NTAwW4mjQYMGWLt2LR4+fIjY2Fj89ttvTx6LjY1FsWLFkJCQ8KScOYDnyp1ndNyz1q1bh0ePHuHmzZvYsWMHAgIC0KBBA6xcuRJJSUmIjo7Gzp07UblyTVy+zHXsvL1fHL+rqysSUkrcXrlyBblz50b37t0xZMgQHDxo2lUKmq7TIKIuL3mcAHxsoXAsQilgyxbLLfkXgggYOBDYsIGnfjdowFNoo6OBiROBwa8b4NJ1Nq72DYLbhNloN0aHHeDuHmdn3lisZEnghx+AK5+F43KxMORVOvR9HRjVBzgxOwy1RoejzpfD0KuXDuPf42Q0bBjPvpo1y8QVn8PDOVE87pLS6fh2eHiWu6mqV6+OTp06oVq1aihVqhTq16//5LExY8agVq1aKFWqFPz8/J4kis6dO6Nv376YPn06Vq9eneFxz6pZsybefvttXLx4EUFBQShevDjatm2Lv/76C1WrVoVSCqGhociduyicnC4gR46Xf17069cPVapUQfXq1dGjRw8MHToUTk5OcHV1xezZJl6pYMzAhy1drHkgPK3kZJ6tcu2a1pEIexcXR/TOO0TDhhEZDEQeHjy4fegQUeJWPSV58kDymTNE3Yrr6V5uL/prvJ7OnydKSkp9nvPniWbMIGrdmihPntRZV/HxPAD+5ZdEzs5E5coRnThBNHw4H9Or18tnZWV6INxGBQcHPxlgN0ZCgnnisOeBcLv1779Aq1Y8xiGEOeXIwWMS/v5As2a8JmPvXl7QN6NHOIaWDAM11OG114BlUTq4rw9Dbedw+Pg83ULw8QEGDADWrQOioriV4usLuLoCOXMCI0cCej1w8yZQuzbQqBH3Hi1eDDyzn5F4geho4N49vu5ijRtyG5NZbOliKy0NIqL/+z+inDmJLl3SOhJhj86dI2rShL8aDEQ5chC98QbRzZtEwcFEShEVL060Zk321w4dPswzX+fNIzp7lsjPj8jJiWjuXKI9e17+/Y7S0niZu3eJwsOJzpwx7+tIS8NGjRrFxcfGjdM6EmFvEhOB7t2Bfft4tt677/KubmvWAIMGAaNHA++9xxUK2rXL/hhbvnyAnx/Qrx8wZAiP2zVvzgvF9+/nY8LDeZyDMhgcp4wecBAJCTwGlDMnUKqU+V4nuz9na2z8OAwfH57FMm8eMHQol1EXwhTGjQP+/JOrLPfpw9M1N23irqr9+4GxY3lmlKkmZPj6Atu2camRoUOB06d5plbu3MBnn/GmZAkJPKPKxQV4tuqFm5sbbt68CU9PzwynvtozStmyNSmJk7u5uqWICDdv3oSbm1uWn0P2CNfYlSs8m2XuXKBxY62jEfbgzz+B+vV51tPJk1xhecsWLgvl7MwVUnPnNt/rb90K7G4TilcDA9BtgQ7/938882pRDwPynghHh/3D8O23nEweS0hIQFRUVIZrG+xdbCwXIvT0BPLkMe9rubm5wdvbG66urk/db+we4dLS0Fjx4lwgzqRTEoVjCg0FAgIQMlGHkiWBAgWAfAcN2NU9HIP/NwyFCwPLl5s3YQC8DqnKwgAU/iQQalcY5s/XoXK0AW8vDcR/tdthVH0DPv9cB19f7jaDwQDX8HD4Dhtm3sCsWFISTxawRL3H7JKkYQWcnHgmy5492a6GIBxZQAAQGIh1y8Kw9JIOK/oZ8KtbIKZEhWH7DmDJEsutDyrSWQcUCUNSh0DMS+6PT5NnY3z9MOzcBWzME4jocmH48Ucd3vVIs1DPAZ09C7i58Yy2d97ROhojGTNabksXW5o9ldZXX/Fsk1OntI5E2KqrV4kebdJTYkEv+iZXEN109qIZ7fQEEIWGahPTgyFBRACNUUG0bBkXPWzspKeHeb0oaaRpig3aqthYoooViSpVeno9jFZgK1VuTX2x1aRx9SqRmxtRz55aRyJsUXIyUbNmRFWrEi3z5Q/qfc2CCCD6+GONyvGnVKJ9NCyIbrt4UWMnPa1aRRQQQDTehWO8+2kQ9exJdO+eBvFpKDmZKDCQTxS3btU6GiZJwwZ9+imvqDX3HG1hf1at4v/m8U30dB1etK95EMV5eNHIN/QUF6dBQM/scXHvNz3dcvGipq562vu1nm44edG3eYLoYV4vaqT01LGjY+0zM2kS/74mTNA6klSSNGzQlSvc2ujdW+tIhC15+JDolVeIevvqKVp5Uf/yei7bYaLNibJk4sTnXvf2z3raUa4fJXl60bmFesqXj+j9V7lsSUPoadw4y4ephT/+4BZG+/bWlSiNTRoyZ8eKFCvGi6P++YfntAthjJkzgUuXgHo5w9FZheF+TR2CgwFqmKaQn6UNG/bcrI78bXV4s/drcFoVhqKddZg7F1j8rw5j/cLQr1o4/vc/IE1xWbtVrRqXY1m0yDYLl8o6DSvz8CHPprDFPyahjVatuN7T3r1A167Ajz8CH37IFW2tVYsWvEapZ0/g8885x2zfzutI9u61z7//e/f4/eXKpXUk6TN2nYYkDSt15w6vEi1QQOtIhLVLSACqVuUPpTx5+GtkJJA3r9aRZWzrVk4cTZtyC3vhQl7g2rkzlySxN8nJXK7l6lVg927rLERobNKQ7ikrFBPDZRkmTtQ6EmHNrl3jy9KlwPHjwBtv8NfvvrPuhAHwAsDvvgMqbwqF300DatcGBg/m6s9xmw3Y2SoUSUlaR2k6I0dydeCuXa0zYWSKMQMftnSx5YHwtDp1Isqbl+j2ba0jEdaqd2+iggWJihThaax58vA0TlsyqwPP9lr1kZ6KFSPqWkxPD/LwwPiIEVpHZxrz5/OUow8+sK6B72dBZk/ZtkOH+LczdqzWkQhrdOYMT8+uU4f/TvbsITpyhNf72JKEBKLJrfSUUMCLLvYKouvwoi/r6KlvX35fP/ygdYTZs3Ur/56aNzffhkqmYmzSkO4pK1WtGvf5Tp3KBeaESOvrr3nzo7//5vITb7wBVK4MFCmidWSZ4+ICDP5NB5cB/fHK4jE481Z/jP+La2e9+Sbw/vvAX39pHWXW+fryXuwrV9pBt1QKSRpWbMQI4MYNYPNmrSMR1uTCBa4jVbo0EBfHiWP4cK2jygaDAZg9G+tfD0KFP2bjqzcNCA4GZpQMRQdPA3r3Rur4hsHAhRmtXHQ0D36/9hqXiLenwX1JGlasXj0e2GzXTutIhDX5/Xcucnn8OG+revEib61qkwypBQtvDAxB24QwDNkfiPe8DRj1awAW3g/E7yMMcHZOc6yVl4K9ehWoU4c3u7JHkjSsmFJA+fJ8PTFR21iElQgNRb8yBnTsyInj6FFgqL8Bzf62/rPvdIWH8wJEnQ69egEle+jwzsMwDKgVjq2JOgwtGQbvzwNBQaPwqHUg4n8Is+pS0Hfu8I6F//0HdOumdTRmYszAhy1d7GUgPK2RI3nA05pnXgjLiPmFq9g2dtJTlSpEjRQPIttLpdjYWKLy5YmKFiWaOZMHw7e9wcUNRyOI2rcnLpFihW7d4llsrq5EmzdrHU3mQQbC7Ye3Nw8G/vGH1pEILUVHA8W76TC4RBh+TA5E91Oj8EvOQLisse6z78zIk4cbHs7OvGBxUisDqvw5G6c7B2GI+2zcXGPAgAEZ7zOuFSKgTRseX1qzBmjWTOuIzMiYzGJLF3tsaTx4QFSoEFHLllpHIrQ0ejSfebu4EK2vwWff9wYHaR2WWTx6RER6PSV7edH7r+mpYEGiaz/pKTYXr+H46CPr2IMirW3biDZs0DqKrIO0NOxHrlxc4GzjRi4PIRzPw4dcmLBUKeDNZAOan58NBAXBfelsHiC2MzlzAsn7w7G6Yxg6zNQhIQF4d5oOOX8Jw9A3wzF3LnDokNZR8v7rS5bw9caNgZYttY3HIozJLLZ0sceWBhFRdDRRrlxE77+vdSRCC/PmcSujsZOebjpbSflzM7t2jcjTk8jfn2j5cn7/Q4bw2N6RI6nHaTXW9+ef/KMvVIjozh1tYjAl2EJLQynVXCl1Uil1Rin1RTqP91JKRSulDqdc+mgRpzXw8gKWLQOCgrSORGhh1iygaFGgenI4AikMqpGOp6HqNCx/bmaFC3MRw4gI4ORJ4KOPgEmTgPXreSEjAPzyC9CwIXD9umVjW76cf/T58wN79gAeHpZ9fU0Zk1nMcQHgDOAsgFcB5ADwN4CKzxzTC8DMzDyvvbY0hGM7c4Zrkfn4EOXIQXTpktYRWc5773Epjr/+Inr9daICBYguXODHwsK4Bf7KK0QHDlgmng8+4FZP/frcA2AvYAMtjZoAzhDROSKKB/ATgDYaxmMTIiKATp2AR4+0jkRY0oYNQGwsEBUF9OnDM+ocxdSp3NIeMIAbVUlJvMYvPh7o2JFLjQNA3brAt9/C7NVxy5QB/vc/QK/nuByNlkmjBIBLaW5Hpdz3rPZKqX+UUquVUq9YJjTrFRPD/zgrVmgdiTCb0NAng9uHDnFlgN1jDBjnEQoiYOhQjeOzsIIFeZe7KVO4dMqiRcD+/bxxEwBUr84nU40b84ZOv/9u2te/cYM3tQoL49uffw6MGWM/taQyS8ukkd7eXM/Ovv4NgA8RVQGwDcCSdJ9IqX5KqQilVER0dLSJw7QujRoBVarwGRVZ2Vx1YSIBAXwqbTBg5kzAfb8Bs24E4o1BAdi+HfDx0TpAy2vRAqhfn6+3aQP8Wi8Uf08zYM0avq9wYeC3wQac6ReK5s35vpUreSwkq27d4vxdpgywYAHPlBLQdEyjDoAtaW6PADDiBcc7A4h52fM6wpjGokXcp/r771pHIsxGr6ckTy8a5xxE0cqLupfQW+1KaEsKCiJq2pQobrOebrl40du59XTqFD03i+zhQ555pRRR+/ZEa9fyanNjhYYSubnx/1mzZkSRkeZ5P9YENjCmEQ6gjFLKVymVA0BnAL+mPUApVSzNzdYAjlswPqvVpQuXwJ42TetIhNnodNhbrT++TBqD76g/drvq8MsvWgelveLFuftpzS0d4peFYfHDQOjrjQKlFD18vDLezQ04dgz48kvee7xtW8DTE1i8mJ/n2jXgp5+AH37g/6OgIG7NXLjAj/v4AD168ArvzZuBihW1eLdWypjMYq4LgJYAToFnUY1MuS8EQOuU618DiATPrDIAKP+y53SElgYR0YwZRBMmSD0qe5W4VU83nLxoUm7emKgh9DZZz8jUEhN53Ubx4kR37xKd7sIr49dWCcrwfyEujhsgn39OdPgw37d+PbciHl+UIqpYkWdoOSrIzn1C2KiU8hlL/09PTk5EbfLxgr7k7fa3gC8r9u7lT67ZgdwltaMBJ9ZfBxv/83nwgOjoUaJTp4hu3LDeIoiWZGzSkDIiNiw+HvjxR+DuXa0jESYVHg4VFoYzr+iQnAysu6tD+NAwqAj7W8CXFbVqAaEtDOgQFojY78NQTx+Cb/zDUPvbQJyYbVxJlVy5gEqVeJDb05MLJArjKE4w9sPf358iIiK0DsMiwsOBmjWB6dOBTz7ROhphKmfOcD/8qFGp9128yPWYBLsfHIo7ZQJQojuPYdy8CQyoZEDlR+Hoe2oYChfWOEAbpJQ6QET+Lz1OkoZtq1OH/2FOnOBNeYSNCg3lqbY6HQYP5hOB+kkGjGwSjqs9hqF7d60DtF43b3Jr4dAh3iu9Zk1g2zbeQ10Yz9ikIR8zNm7QIOD0aWDTJq0jEdmSsjYjfosBS5YAb+c2YI1zIBoND5CE8QKffsrdVXFxwOuvA/PnAzt3AkOGaB2Z/ZKkYePat+dpiDNnah2JyJaUwoPJHQMx6NYoLIgNxITXw3C1gn1srmQuLVsCZ89yQUcA6N6dE8n06bxyXJieJA0b5+oKfPABcOkS8OCB1tGIbNHp8KNHf4zCGMxV/fFNhA7nzmkdlHVr2pT35A4J4XIfAPf0NW7M/xeP61IJ05GkYQeGDweOHAFy59Y6EpEdDzca8O7V2RjnFIQPMRt9XjOm0DB2AAAgAElEQVSgbl2to7J+kyZxMceQEL7t6gqsWsUL9Nq2TV2wJ0xDkoYdyJkTUAq4d493eBM2yGBArp6B2P1JGP6XHIKOFIbp1wKhdtjfrnymVqkS0K8fsHo1cP8+31egAPDbb0BiIvDOO5xUhGlI0rATFy9yuewl6ZZ0FNYu4c9w3F0QhuAdOuTPDxzy0EHZ6eZK5jB+PM8gdHdPva9cOa4scvw4139MSNAuPnsiU26NRAT89x/Pob9yBYiO5suDB0ByMl9y5OCpf56eQIkSXK/mlVcsMxWWCPD351kkR45wy0PYjqVL+Ww5Lg6oXZv3hpg0SeuobE9SEk/BTbtOY/58/tn26QPMmyf/Gxkxdsqtg1aEf7HERC5UFh4OHDzIlxMnUpu+jynFK0udnTkxPHzIq7TTcnfnUuaNGwNvvcUfCOZYpKUUb1LTuzfwxx+8BaawHQsW8N+FszOwZQuQN6/WEdmmt97i/4Xt21OTQ9++wL//AuPGAaVK8QZKIuukpZHi9m3ej/iPP3jP38d9oAUK8CYvlSvzBjBlynDroVAh3hwmbfkBIm553LzJ3UXHjvFl3z5OQElJ/GHQsSPQqxdvrmPKs56HD7mLSqfj/l1hG06eBMqX50193n2XB3FF1syYAQwcyJVpmzVLvZ8I6NkTWLYMWLgQ+L//0y5Ga2VsS0PzAoOmvmS1YOHt20ROTlzp8sMPiVasIDp3znRVZO/cIVq3jqhXLyJ3dy64Vro00dy5RI8emeY1iIiGDuX9lK9dM91zCvMaNoz/9h5XXF2/XuuIbFdcHJGvL1HVqkRJSc8/1qQJ/6zXrNEmPmsGqXKbebduZflbM+XePaKlS4kCAvg34O3Npc7j47P/3FFRRBER2X8eYRkJCUQheSZSK3c9eXgQ5c/PFVhJryeaOFHr8GzSDz/w/9WKFc8/du8eUZ06RDlyyCZmzzI2acjsqTQKFLDM67i7A++9x91WW7bwfPJPPgGqVQN27Mjec5coAdSoYYoohSW4uAANhwZg4f1A+Mca0KMHkGuvgaf7BARoHZ5N6tIF8PPjCtDPcncHNmzg7sB33+WuaJE5kjQ0pBSvaN21i+eUP3jA4xHdu/O4SFbducN9trLTm21YdUOHLk5h+DE5ECPjR3HCSLMLncgcJydODGvXpv94gQK8+5+3N68ml8SROZI0rESrVkBkJM/sCAvjVsfOnVl7rrx5ucUi28Fat6gooEMHXltz0EOH30r0R+E5Y4D+/SVhZNMrr/AkldjY52c0ArxdssHAdduaNeMTN2EcSRpWJHduYMwY4K+/eI9jnQ4IDuZZV5nh7Ax8+CEnjshIs4Qqsis0FDuCDVizhjfR+m2wAT1jpvHc7Nmz+RNNZEtUFPDqqzydOT3Fi/P/iLc37w+e3a5hh2HMwIctXexlu9e7d4l69OABvbffJoqNzdz3R0cT5cxJ9NFH5olPZE/ydj3ddPKiVu566lJUT8n58hF5ePAAuJ63MSW9bO+aHcnJRPXrExUrljK5IAP//UdUoQL/v/z8s+XiszaQgXDbljcvd1t89x3vlVG/Pp85GcvLi7vGly3jmlTCuux21aF9chgW3g/Ex3fGISlJcSe8TvekTLqUEMkepYCxY7mSw+zZGR9XtCh3T1Wrxt2F8+dbLkabZExmsaWLvbQ00tq0iShvXqLixYn++cf479u3j+jTT7nVIaxLr1487XM0gogAutgrSOuQ7FbjxkSFC/N02xe5d4+oRQtu3QcHm26Nlq2AtDTsx+MZHkrxSejhw8Z9X82awJQp3OoQ1qV0aaCJiwEfqdmY5RkE7/UyjmEuo0cD168Dv/764uPc3YF167haw+jRQKdOskdNeiRp2Ag/Py5xkjs30KgRcOCAcd9HxHV4ZEDcurRwM2DRg0B0pDDcHxbCFW0DAyVxmEHdurx/eJcuLz/W1ZXLjHzzDZfiqVePNzgTqSRp2JDXXuPE4eHBk2yMKbH14AHQrh0wYYL54xPG2b0bOLEsHN1dw7DLWYf33oOMY5hZtWr8NS7u5ccqxXuM//YbV7WuXp3HFQV7YdJQSpW0VCDCOL6+nDgKFOBpgqdPv/h4d3egRw/+PIqOtkyMImOnT/Okhp6Rw5D8pg5DhwLFiqU8qNMBw4ZpGp89mzuXuwWNnRjy9tvA/v08NbdlS/7VyJ4cL29pPFlTrJRaY+ZYhJFKluTyIwAvTLp69cXHf/ghL3BatMj8sYkXW7yYz2QTE7lU99dfax2R46halWcgfved8d9Tvjywdy+vt/zmG+CNN4CjR80Xoy14WdJIW7j7VXMGIjKnbFlg40Ye4GvRgheIZaRSJaBBA2DOHN4sSmgjKYmnUXt4cOJ/Vf6jLKp2bT7JmjTp+b1xXiRXLk40q1fzvhzVq/MiXEdtdbwsaVAG14UVCAgAfv6Zz3wCA1+8cvyjj3i/jXPnLBefeNr27cDly1wb7MoVICRE64gcT3Awd9POnZv5723fnieUdOgAjBrFyWP7dtPHaO1eljSqKqXuKqViAVRJuX5XKRWrlHrBua1xlFLNlVInlVJnlFJfpPN4TqXUypTH9ymlfLL7mvamaVM+C9qyBfjiuZ9gqvbteWOo0qUtF5t42rp1XB4G4O6pXr00Dcch1anDu/tNnsy/g8wqVAhYsYJ/l/fu8XO9+y4PmDsKzXbuU0o5AzgFoAmAKADhALoQ0bE0x3wEoAoRfaiU6gygLRF1etHzmmuPcGs3YAAwaxavAO/ePePjEhN5Bom7u+ViEywhgescxcdzQb2//5b9qrUQGQnkyMG7cGbHo0e8Dmr8eL7eowfw5Zc8y9EWGbtzn5ZTbmsCOENE54goHsBPANo8c0wbAEtSrq8G0Fgp+TdLz5QpvC94nz4Zz9p8+JBbGjL4qg29nsegHpeul79kbVSqlJowsnPO7OYGjBgBnDrF3b8rVgDlyvFeOfZ83qpl0igBIO2ymaiU+9I9hogSAcQA8LRIdDbG1ZX3li5alPcgv337+WNy5eJFggsWpF8uWphP69b8AZMrF/+uunbVOiLHdv8+0KZN5mZSZaRYMd6G4Nw53p987Voeb6xViyc+2FvtNy2TRnrnWc/mfWOOgVKqn1IqQikVEe3AixG8vHg9xpUrfCab3llU//7AtWuyQZMlHTvGC8X++Qfo3ZtXGBcponVUji13buDWLV70asyCP2MUKwZ8+y1Pdpg+nWc09uoFFC7ME1V+/tk+EoiWSSMKwCtpbnsDuJLRMUopFwAeAG49+0RENI+I/InIv1ChQmYK1zbUrAlMnMgDdTNmPP94s2a8QNAUZ1jCOIsX825ySUn8ISIJQ3tK8YZnUVHA0qWmfW4PD96++dgxXv3fuzfv1dG+PVCwIFdzCA3lfXNMlbAsScuBcBfwQHhjAJfBA+FdiSgyzTEfA/BLMxDejogCX/S8jjoQnhYRN703bwb+/BPwf2Zoa+JEnml17BhQoYI2MTqKxEQe9I6L4zUy7dpxbSOhPSLuQrpxAzh5krsNzSUxkXfi3LyZL0eO8P05cvDU3SpVeKylUiUeSPf25v3jLcnYgXDNkgYAKKVaApgKwBnAQiIap5QKAZfo/VUp5QZgGYDXwS2MzkT0wpUGkjTYrVtcbydnTq6Km3a21M2bPFjetCmfAQvzWb8eeOcdvu7iAvTrx7PchHX47Tceb1q8GOjZ03Kve/UqtzT++gvYt4+TSNpxSCcnoEQJHqMsVIi7nvPl4/9jd3dONs7O/DdFxK3YpCTuIuvRI2sx2UTSMAdJGql27OCKuP37yweVVv75h7ukDh3i2/v2cReisA5E3I3brRvgqeEUGyJOJMeOAefP88rzf//l2XY3bvCCxNhYHsB/0SSWmjX5bywrJGkIAMDnn/Pg3ObNPJ7xWHw8r2qtVImnCArzSEoCfHz4n71IEf5QkKm2IjsSEviSlMTdXkpxq+NxyyNHjqw9ry2s0xAWMG4cULEiD8bdSjOFwNUV2LqVB+Ts7LzBauzcyVuHRkVx10PPnpIwrJVezydPtlCbzdWVZ3/lzcvVrvPn5+u5c2c9YWSGJA075+YG/PADN3MHDEi9XylekHT0KM/wEKY3aBCvEPbw4Jk6L1qpL7R15Qr/n/z2m9aRWD9JGg7g9deBoCDgxx+f/qfo0oU/0GS8w/QOH+ZLbCz3l48ZwzNihHXq3JmrDo8dKy3vl5Gk4SC++IJXg3/4IRATw/flzs2LANes4TMtYTqLFnH/cmIiUKqU45bRthUuLrxiPyKCu21FxiRpOIgcOYDvv+cZGmk3h/v4Yy71bIuLjKxVXBx3dXh48GXsWEkatqBHD24Njh2rdSTWzcLLR4SWAgKAwYN5E5rOnXl30dKludtKmM4//3DV0wcPOFl3786tOmHdcuTgsiJ37/KAuKxhSp9MuXUwDx7wtpdEvKAoVy6+/+hRrotTu7a28dmLIUN4zwaA93Rv0EDbeIR4GZlyK9KVOzdv+3r2LJ9VAZxA2rcHPv1U29jsQUICj2P89BMvFvP1BerV0zoqkRlxccDs2cDBg1pHYp0kaTigxo15Rs+ECbwXgFI8trFvX8Z7cQjjjB/PeypcvszdHT16SDeHrYmPB0aOBEaP1joS6yR/zg5q8mTumvroI25p9OoF5MmTfmVcYZykJC5GeP8+tzLOnXvxFrzCOuXNy63uX3/l8SnxNEkaDqpIEd7Bb/t2HgjPl48Tx8qVwH//aR2dbdq+nfdhv3ED6NSJF1Y+3hNc2JZPPuHkMX681pFYH0kaDqxfPy5wNngwr90YNIir4h44oHVktmnBAh4zSkriKbd792odkciqAgW4yzYsjMumi1Qy5dYG/fEHr+w+e5b7zv/7j2vw79jBj2/YwN0jAQFcxCwjzs68GVNAAPfffvstr+OQ6aGZd+0ab/NZoAB3+z18yMUghe367DMuXW4Pu+2ZkrQ0bMTRo6nX58zh0h+nT/OHVOPGT8/QGTQIqFOH6/D368eD3RmpUQNYXTMU/0wzIDKSEwYRcGuNgasZCqN4eHDijY7mMY0OHbh7Q9iuwoX5RKxGDa0jsS6SNKwYEbBlC8/x9/NL3ZNh8mTeSOnoUX588eKnV7Hu3cvjFK1a8VaW5cu/+PO/8RcB+IkC8X13A4iAya0MoMBAJFUPMOv7sydubtzacHbmhX29emkdkTCVGzd4+2TBJGlYqStXeGvQ5s15U5Zp03gqJwAUL/7iLiQvL17xvXQpb+QyYkTq4rKYGO46ScvjXR32DAzDiMOBONZxFD7ZHYgOyWH4JUZnnjdn60JDAYPhyc0//wR+eN+A/PNDUagQ15p6800N4xMm9dVXQGAgdwULAERkV5caNWqQrYuLI/L2JnJzI5o4kW+bSrduRGXLEu3Z8/T9iYlEc4sEEQH0aFgQvfYaUa1aRMnJpnttu6HXE3l58VciCm6gp2h4UUPoado0onXrNI5PmNS5c0TOzkSDBmkdiXmBt9l+6Wes5h/ypr7YctKIj0+9vnYt0alTpn+NrVuJfHyIXFyI5s5N84BeT/H5vWg0guhebi/6ZZCeAKJt20wfg11ISRyxnwXRdXhRYCE9+foSJSVpHZgwh549iXLlIrp6VetIzEeSho25eJGoRg2i7783/2vFxBC1aMG//UGDiBK3pp45d+lC1NRVTwkFvKijl57atTN/PDYriFtmoxFEANGbbxIdPap1UMIcTp4kUopo2DCtIzEfY5OGjGlYgfBwwN+fZzkVKmT+18uXj6fsfvYZsHw5EGsI5wnpOh0mTgR2uegwrkoYpnUPx/Ll5o/HJhkMoNmzMc0jCB87zYYOBvzxh6xxsVdly/I4oSx8hbQ0tLZrF1HevNxlFBlp+dd/3NxOTiZKSODrX33FrZAdO/h2XJyMbTwlpWvq1ho9NWlC1MpdTzecvKhlLj3du6d1cMJcHv9/2CtIS8P6XbnCs6OKFQN27QIqVrR8DEWK8Ncvv+RKt/HxwNChwCuvcP2dyEg+y9qyxfKxWa1wbpkVaKfDgAHA+vs6dHUOQ2+/cLi7ax2cMBeXlKXQJ0+m7n7piCRpaKh4cS4QuHOn9vtHe3tzgbZu3XjNwTff8B7Xu3bx46NGyd7JTwwbhstldbh0CZg3jxf2/Z6gQ4lpw17+vcKmXbzIK/2nT9c6Eu1I0tDAvn18AXiP7sdn+1r6+GNOFKtXp85Lr1sXCA4GPv/8ycm1SDF+PC+a3LCB63fVqQPUqqV1VMLcSpYEWrYEpkwBYmO1jkYbkjQs7Ngx/qPr14+3lLQmn3/OSWzMGOCXX4CpU4Hr14FLl4Bq1Xg3uvv3tY5SezExwJIlXO/LyYkLFe7Zw/uSCPsXFATcvs2lfByRJA0LungRaNqUN+dZu9b6NudRigsYNmzIO9D5+wM9e/Jq9BEjgKgort7q6JYs4eQZFQU0asTjP5IwHEdAANCsGZfzccRihlb2sWW/YmO5FtS9ezyo/OqrWkeUPjc3QK/n7imAu2FcXLhras8ebiE5sqQk7s8uU4bPNiMieO8F4ViCg/l/2hHL32uSNJRSBZVSW5VSp1O+FsjguCSl1OGUy6+WjtOUZszgrqlVq4AqVbSO5sUenzXPnctN8BEjgDVruPWhFBdLdFQHDnA9rxw5uArqnTtAixZaRyUsrU4drkX11ltaR2J5WrU0vgCwnYjKANiecjs9D4moWsqlteXCM73hw7nMcpMmWkdivKNHeXe/N97gLpjH+wv4+HD3miOqWZNbipGRQP78POuseXOtoxJa8PTkr4624E+rpNEGwJKU60sAvKtRHGa3cSOvx3B2fnrPC1vw9dc8W6R/fy69fugQcOQId818+CGXjHYkcXH8dd067rI7fRro3fvFG10J+zZsGE8SefBA60gsR6ukUYSI/gOAlK+FMzjOTSkVoZTaq5SyucTyzz+8Gc/nn2sdSdbkyQPMn8/lTSIjgZklQ7HlCwNmzeL+/AEDwCXCHWSzpo4deaxn8WKgQgW+r3dvTUMSGmvdmmcYzpmjdSQWZMyy8axcAGwDcDSdSxsAd5459nYGz1E85eurAC4AeC2D4/oBiAAQUbJkSRMvrs+a27eJSpcmKlbM9itjvv8+l4beFaKn6/Ci+V31NHYsUUPo6VHe1BLh9iwykkurPC70uGuXQ7xtYYTGjYmKFCG6f1/rSLIH1lzlFsBJAMVSrhcDcNKI71kMoMPLjrOG2lPJyURt2nD58d27tY4m+27eJDIY+Pr4Jpw4bnwcRLdcvGheF8f45Ozdm0tj+/oS1a6tdTTCmuzaxZ+kkyZpHUn2GJs0tOqe+hVAz5TrPQE8t5miUqqAUipnynUvAHUBHLNYhNnw3Xfc7z1pEq+qtnUFC/LaDQDoOl+HBS794TlrDPIM6Y++K+x/d7/z53kXxFmlQlHqvAH58gGzZ6c86EDdcyJ99erxBJdFixyj1I5WSWMCgCZKqdMAmqTchlLKXym1IOWYCgAilFJ/AzAAmEBENpE0unYFvv0WGDhQ60hM6+uvgYnNDRiYYzZCEASaPRswGBARwf389mryZB7sPuwagNUqEIlbDYiKAieMwEBe7SUc2oIFvGbDERZ5KrKz1Ojv708RERGavHZsLJAzJ8/ht0fb/2dAlXGBuFG/HUJOdYa7OzD/biBCKoVh925g0Ufh8J5uf0X7YmKAn37iGWODqhgw8p9AuA7sj/wrZj/Zh0QIAEhM5Iubm9aRZJ5S6gAR+b/sOFkRbiJEXLfpzTd51bA90uUNR1DZMAQf74xFDwJx9hzwW/cwDC/1E36iQHyxJsDuirgRcRXbgwf5hGDFfzpsL9Mf+aeP4bnIkjBEirt3gcqVuWVq14wZ+LCli1YD4XPm8GDYxImavLzFGAz8PsP66+m2qxd9kyuIkjy96PAUPTk5EXXubD8bNp06RVS1Ku+r7uZGpNPxjLG4fF681auXY8wcE8Z75x2iAgV49qStgTXPnjLnRYukcfw4z6xp0oQoKcniL29xjRsTFS9OdLkP75G9MSCIiIjGj+e/qFWrNA7QRN57j3+vAwfy/tDbRurpbk4vStqWkij0ekkc4imHDvH/QFCQ1pFkniQNC4mLI6pencjTk+jKFYu+tGYiI4milvEH5kb/ILoOLzo9X09JSURLlhAlJmodYfYdPMiJYuBAIg8Pog4diJuRzyYIvd7+m5ciUzp2JMqTh+j6da0jyRxjk4aMaWTTtWu8ReqCBbxtqyOoeM2AEp8FAmFhqLUlBH3zhcGrfyDUDgN69OCZRlevAtHRWkeaNURcZ8vTky8xMUDt2sDFzsOeH8PQ6biWhBApQkK4rMiCBS8/1hbJ7CkTSExM3T/YIYSGIr5qANrP1KFePcDLC/ihjwGTO4Wj+k/DEB/Pu9qVKgX8/jvg6qp1wJmzeTNXrp0+nUvDlyvHJdDbt+e9NIR4mT//5J0cbakumcyeMrOYGGDoUJ4x4VAJAwCGDUOOZjokJ/O6tvbtgfsBOrz9xzDcvctTjkNCuKrvZ59pHWzmNWkCLF/Oye7qVa4zdf8+MGiQ1pEJW/HGG5ww4uO1jsT0JGlk0cCBvE/wyZNaR6Kd0aOBW7d4z41Zs7irLiiIH+venbeHnTWLix7aiqQk/mcPDOSpkzVqAJs2AQ0aANWrax2dsCXbtnGV6DNntI7EtCRpZMGaNVxWYuRIx14M7O8PvPMOf7iWLcvLFmbO5K4cAJgwgfea+Phj29jh7OJF4JtCodj7tQFLl/I/e5MmgO+/BswqJaVCROZUrswLfv/3P60jMS1JGpn033/ABx/wB6a9/TFkxVdfcZn0mTO5/79wYf75JCbyGfuPP/Kix8elxK0VEdC3L7DzYQD8vwnE7yMMqFmTB/3XOAeiQg8HPjsQWVK0KDB4MLByJe/4aDeMmWJlSxdzT7nt1IkXeh0/btaXsSnLlnElXCKilSt5IvfUqc8fd/8+UXS0ZWMz1vffc9wzZhCtHciVfM924wV8cZtlHYbImpgYno7/1ltaR/JykHUa5nH+PNHPP5v1JWxacjLvOZEnD9HFi0/f36gRUUAA0b172sWXnqgoXovRoAFRbCxR0aJEi0vywkWbXKUlrMqUKfynFBGhdSQvZmzSkO4pI8XEcBeGjw/Qtq3W0VifvXt5yUJMDA9+JydzgT9KmdGtFM8+OnCAdzN8vHWqNVi1ime5fP89MHcuUP6qAZ1vz8Y4pyA8/JYr+QqRVf37A7t28aQKeyBJwwhJSTzg27Wr1pFYr1y5eIrt5MmAry+XUd+48el1Da1b84fy5s1Aly487mENPv0UOHaM15vsCjFgrWsgFjQNQ7AKQcz8MJ5KJYlDZFHOnLznBmBdJ0tZJUnDCFOm8JlCixZaR2K9qlblz9YpU3jP5AEDgPr1+QP58uXU4/r0AaZNA9au5UFCLf3xB/D333zdx4cH9cvFhuP8hDAM26RD165A0S46Ln8eHq5lqMIOTJsGVKoEPHyodSTZZEwfli1dTD2mceQIUY4cRO++az/VW83lxAkiJyeizz7j26dPc8G/li2f/9nNmsVVZLVy/jwPUAYEcGyRkbwP+gcfEI0axX3QR49qF5+wP48rRI8bp3Uk6YMMhGdfXByXxi5c2PaKj2mld2+inDlTB8GnTuW/su+/T//45GROIPfvWy7Gu3e5yKSHByeu5GSuUJw/P9HVq0SlShG1bm25eITjaN2aJ4lcvap1JM+TpGECx49zCfBffjHZU9q9CxeI5s4lio/n20lJRA0bErm7E508+fzxe/dyNdl69Yhu3TJ/fHfv8ms5OxOtX8/3/fIL/ydMm8a3b93i9yGEqZ04QeTiwi1aa2Ns0pCChS/x4AGQO7fJns4hRUXxmEepUsBff/HAYFphYVx25NVXgZ9/BipWNF8so0cDY8bwosOOHfn36+fH23OGh/NXJxnpE2Y0cCBXwL14kSdfWAspWJgN9+7xLKCEBEkYWbV4MU+5BQBvb2DRIuDQIWDEiOePDQwEtm7lleU1a3KZFnP58kueCNWxY+rtc+d4RfvcuTwt8s4d872+EMHBwOHD1pUwMkOSRjoGD+YKtna19N/CLl3iD+Fdu/h269Y8o2rKFGD9+uePf/NNTir+/rwntynt28cFB2/e5Mq19evz/X/8wTNaPv6Ya4h9/TXvn5E/v2lfX4i0PD25VhvABT9tjjF9WLZ0ye6Yxrp13L89bFi2nsbh3b9PVKIEkb9/6ha4Dx8SVavGA9AnTqT/fWlnWY0bR7R4cdZnrSUmEk2axH3IPj5Pz4aKjSXy9SV67TVeoT52LP/e9+7N2msJkVlDhvDf4IMHWkfCIAPhmXf1KlGhQvzB9uhRlp9GpFi6lP/Cli1Lve/CBf4Zly374oHvxESi+vX5+/39+Tky8ztZvpyoXDn+/nffff61PvyQB+B37uTHPDyI3nknc+9PiOzQ6/nvc8wYrSNhkjSyoF07ni4q8/NNIymJP/C9vbmV8diuXUSurkTNmhElJLz4+xcu5AQDcLL54w9+LDHx6RZITAzRli2p97VvT+TnR7Rq1fMtlbAwfr7H60m+/ppvHz6c/fcsRGa0b89rmdLWadOKJI0sOHiQ6IcfsvztIh379hFt2vT8/fPn81/fxx+/vPspKYlo61ZuMRw4wPf9+CNR7tx8yZmTWw0ALygkIrp9O7VbLK1Dh/h73ngjteUSH88JRwhLO3+ek0a7dlpHYnzSkCm3wmKSk5+ezjp0KDBpEjB8OA9CK2X8c+3Zw7OsnJx43448eYA6dbjGj5tb+t8THc0D7cnJPL22aFGuBfTsFGAhLGn8eODbb4EjR4BixbSLw9gpt462u7XQyNdf82ylTZtSk0NoKE9vnjiRpzaPGmX889WtyxdjxcVxdd3r14Hduzlh7NnDU2/Xr5etXIV2hgzhjcs8PbWOxDgy5VZYRMGCwJYtwLJlqfcpxWXUe/bkuevjxqWWUjelB8gKt3QAAAkxSURBVA94yu/OnVz+vEYNrlw8YAC3UsqVM/1rCmGsHDk4YSQl8fRwa6dJ0lBKdVRKRSqlkpVSGTaHlFLNlVInlVJnlFJfWDJGYVp9+3L30eDBwI0bqfc7OfEHebduvH1u796mLR8dGwu8/TYvHvz++9Ty9vPn8wKrSZMAd3fTvZ4QWRUSwmuITpzQOpKXMGbgw9QXABUAlAOwA4B/Bsc4AzgL4FUAOQD8DaDiy57b3Dv3iaz75x9eM9G16/OPJScTBQfzYHb9+qYpEHntGlGdOlxnasWKp+8vWJBrYknlYmEtrl0jKlCAa6OlN4nD3GDNO/cR0XEiOvmSw2oCOENE54goHsBPANqYPzphLn5+PG6xahVw/PjTjynF+1n8+CMPUletCqxcmfXuqrVrgcqVgYMHubZVly6pjy1bxmMpM2dmbvBdCHMqXJgrJuzeDcyZo3U0GbPmMY0SAC6luR2Vcp+wYV9+yeVCKlRI//HOnYE//+RZJJ07A82bAydfdnqRRlQU0KsX0K4d17w6cICvpzV4MG++VKlSlt+GEGbRowfQtCnPKLx4Ueto0me2pKGU2qaUOprOxdjWQnrngOmedyql+imlIpRSEdHR0VkPWpids3Pqh/X27VwU8lmvvw7s3w9Mn857j5cvz/uPL1nCYxTPevSIn6t9e96Bb9kyHh/Zu/fpxPDvv8CpU9y6KF/eLG9PiGxRimu2lS3LtdKskabrNJRSOwAMIaLnFlYopeoA+IqImqXcHgEARPT1i55T1mnYhogILhI4fDgwYULGx129ymWkFy8Gzp7l+4oU4TLrefLwfRcvcjeWpyfw/vs8ffHVV59+nqQkoFEjThrnz2e8lkMIa0Bk+a5Te1inEQ6gjFLKF8BlAJ0BdNU2JGEq/v5Av368RqN8ee5SSk/RotxqGDmS11Xs2MEthn//5VZH3br8vZUrA61aZZwMhg/nKbeLFknCENZPKZ4qPmoU779RsqTWEaVhzGi5qS8A2oLHKOIAXAOwJeX+4gA2pjmuJYBT4FlUI415bpk9ZTvi44neeotnVG3fbr7XmTOHZ2V98on5XkMIUzt3jreGbdTIMrOpIGVEhC2IieHWwuXLwOnTpt+YZu9eLi3SrBmwbh3gYs1tayGesWABr3GaOhUYNMi8ryU79wmb4OEBbNgAzJhhnp3Mqlfn7q2ffpKEIWzP++9zt+sXXzw/TV0r0tIQVmXbNp5hpdNl73k2bQKqVAFKyCRtYeOuXuU1TuXK8U6Y5hogl5aGsDlEPPDXvDmwfHnWF/bNnctnZyNHmjY+IbRQtCgviF240DoWo0rSEFZDKe6qqlUL6N4daNMmcwuc/v0X6NQJ+PBDoEULLoYohD1o2JDXbhDxlHEtSdIQVqVAAUCvByZP5gV7FSsCFy68/Pt27OCpu7/9BoweDfzyixQiFPYnJIQXv2qZOGRMQ1itixe5/tTQoXz7iy/4TMvdndda3LrFi/j69eM57UOH8noMq5rTLoQJnT/PSaN0aR7fyJXLdM9tD4v7hIMrWTI1YRDxXgO7dwOJiXyfiwtPRwR4EyfpjhL2ztcXWLqUu2779+fFqpYe55CkIWyCUoDBwNcTE3nPDWdnWd0tHE/r1rxp2ejRvKHYJ59Y9vUlaQib4+Iiay6EYxs1iruqypSx/GvLv54QQtgYJyeu+vzYo0eWa3XL7CkhhLBhc+fypmVpt1E2J0kaQghhw/z8eI3Su+9yi8PcJGkIIYQNe+MN3njszz95nZK5yZiGEELYuI4deSGsJbYwlpaGEELYAUvteS9JQwghhNEkaQghhDCaJA0hhBBGk6QhhBDCaJI0hBBCGE2ShhBCCKNJ0hBCCGE0SRpCCCGMZnc79ymlogH8q3UcWeAFwEIlx6yGvGfHIO/ZNpQiokIvO8jukoatUkpFGLPVoj2R9+wY5D3bF+meEkIIYTRJGkIIIYwmScN6zNM6AA3Ie3YM8p7tiIxpCCGEMJq0NIQQQhhNkoYVUkoNUUqRUspL61jMTSn1jVLqhFLqH6XUWqVUfq1jMgelVHOl1Eml1Bml1Bdax2NuSqlXlFIGpdRxpVSkUmqQ1jFZilLKWSl1SCm1XutYzEGShpVRSr0CoAmAi1rHYiFbAVQmoioATgEYoXE8JqeUcgYwC0ALABUBdFFKVdQ2KrNLBPA5EVUAUBvAxw7wnh8bBOC41kGYiyQN6zMFwDAADjHYRES/E1Fiys29ALy1jMdMagI4Q0TniCgewE8A2mgck1kR0X9EdDDleiz4Q7SEtlGZn1LKG8DbABZoHYu5SNKwIkqp1gAuE9HfWseikd4ANmkdhBmUAHApze0oOMAH6GNKKR8ArwPYp20kFjEVfNKXrHUg5uKidQCORim1DUDRdB4aCeBLAE0tG5H5veg9E9G6lGNGgrs0llsyNgtR6dznEC1JpVQeAGsAfEpEd7WOx5yUUq0AXCeiA0qphlrHYy6SNCyMiN5K736llB8AXwB/K6UA7qY5qJSqSURXLRiiyWX0nh9TSvUE0ApAY7LPOeBRAF5Jc9sbwBWNYrEYpZQrOGEsJ6KftY7HAuoCaK2UagnADUA+pdQPRNRd47hMStZpWCml1AUA/kRka0XPMkUp1RzAtwDeJKJoreMxB6WUC3iQvzGAywDCAXQlokhNAzMjxWc+SwDcIqJPtY7H0lJaGkOIqJXWsZiajGkIrc0EkBfAVqXUYaXUHK0DMrWUgf4BALaAB4TD7DlhpKgL4D0AjVJ+r4dTzsCFjZOWhhBCCKNJS0MIIYTRJGkIIYQwmiQNIYQQRpOkIYQQwmiSNIQQQhhNkoYQZpZS8fW8Uqpgyu0CKbdLaR2bEJklSUMIMyOiSwBmA5iQctcEAPOI6F/tohIia2SdhhAWkFJS4wCAhQD6Ang9peKtEDZFak8JYQFElKCUGgpgM4CmkjCErZLuKSEspwWA/wBU1joQIbJKkoYQFqCUqgbekbE2gM+UUsU0DkmILJGkIYSZpVR8nQ3eU+IigG8ATNI2KiGyRpKGEObXF8BFItqacvs7AOWVUm9qGJMQWSKzp4QQQhhNWhpCCCGMJklDCCGE0SRpCCGEMJokDSGEEEaTpCGEEMJokjSEEEIYTZKGEEIIo0nSEEIIYbT/ByixNxS6Up4ZAAAAAElFTkSuQmCC\n", "text/plain": [ "
" ] @@ -386,7 +386,7 @@ "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAX8AAAD8CAYAAACfF6SlAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4wLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvqOYd8AAAIABJREFUeJzs3XWYlFX7wPHvM7Xd3cEmu0t3SXeDKAICdteLor7mT3197UBCRBQFpaQbFOmOpWNhu7t3Z2fm+f2x+EosEjs7s3E+18Ulu3P2OffI7D3PnLiPJMsygiAIQtOiMHcAgiAIgumJ5C8IgtAEieQvCILQBInkLwiC0ASJ5C8IgtAEieQvCILQBInkLwiC0ASJ5C8IgtAEieQvCILQBKnMHcDNuLq6yoGBgeYOQxAEoUE5cuRIjizLbrdqV2+Tf2BgIIcPHzZ3GIIgCA2KJEmJt9NODPsIgiA0QSL5C4IgNEFGSf6SJM2XJClLkqRTN3lckiTpa0mS4iRJOiFJUhtj9CsIgiDcHWPd+f8IDPyHxwcBoVf+PAbMNlK/giAIwl0wSvKXZXknkPcPTUYAP8nV9gOOkiR5GaNvQRAE4c6ZaszfB0i+6uuUK98TBEEQzMBUyV+q4Xs3HCEmSdJjkiQdliTpcHZ2tgnCEgRBaJpMtc4/BfC76mtfIO36RrIszwXmArRr106cLykItaDTG7iUXcLOhNOczD6JlUaBg5UFbrY2DArpiK+9+PDdlJkq+a8BnpEkaTHQESiUZTndRH0LQpMSl1XCJ9u3cyJlLU6KfCz1tlhV2VJomUOedRrFFvl8fVLGUeFBV78ePN56EkEOQeYOWzAxoyR/SZJ+BXoCrpIkpQBvA2oAWZbnABuAwUAcUAZMNUa/giD8LSGnlPc3HCA7aTMxhc2YWHx/je2UhjJsFHs4GfgHW3TL2JCwnEGBI3ix/dN42niaOGrBXCRZrp+jK+3atZNFeQdBuD3bzmTyxfJf6ZTvinO5N2ptHn4pO7ErScVGXYmtvxclsjVFOhtyZFcyHWJAAveiY5S4LuOrNuXICjXPt3meKdGTkaSapumEhkCSpCOyLLe7Vbt6W9tHEIRb0xtkvthyjtjdGxiU0wKLylyaXf4JP+ss3B5/GOuOHVH7+FyTzGWDgdyDpzi+7jxxcguk8uZ8sXQRh2IO87n8KQfTD/NRz/9gr7E34zMT6pq48xeEBspgkHnp1yOoYg8SVBSBa84JWpZsxmfav7Dt3RtJcevFfAWZJWz+Yh85BUq80vfhWrWI6cMVWDt5MavfN4Q7h5vgmQjGdLt3/qK2jyA0UB+sPY3T4eMEFUUQFL+ebkEJhK1aiV3fvreV+AEcPWwZ+0Ef2g7wI92rE0kOL/LtfAV+F9KZvGEyZ3LP1PGzEMxFJH9BaIC+3RFH2Y6duJYHExa3iK6PtMPnky9RWFre8bWUSgWdRoUy8LEYip2acTzyZV5cakH//UVM2TCVk9kn6+AZCOYmkr8gNDCrj6dycs0mfMsi8EvaSJd3JuBw7/haX7dZG3cGPdmCMjtfjnd+jdE77Rizs5iHNj3MiewTRohcqE9E8heEBiQhp5QVv6wnsrQ57pkH6P5Me2y69Tba9QNjXBn2bCsqLF052Xkag/dqmLS1hMc3P05ycfKtLyDUmrZCR1Wlvs77EclfEBqIKr2B1xbsomOBD/aFcXS51w6nweOM3o9PuBMDH4+hWOnC2Xteps8hiXGbC3l00xMUaYuM3p/wN73OwKa5p1j79XEMhrpdjCOSvyA0EDO2XaDLuSQUBonw8JP4PPBMnfUVEOVCzwkRZBu8uNTzWQYckmn9ZwLPbnuBKkNVnfXblMmyzPafz5F8Jo+ILl4oFHW710Ikf0FoAA4n5JG9Zg0KZQhuZWtp/9ZXdd5n867etBscSLIcRkq7sUz6XY961wE+2PdhnffdFO1fdYnzBzLoMCyI5l2967w/kfwFoZ6r0hv47Met+GtjsC06w6BPX7rtpZy11WFYECHt3Imz60VxcDQvrNFzavtSNlzeYJL+m4oT21M4ujmJqO7Vb7im0KR2+FYZqriQd4GjWUc5nnWcIm0RKoUKlUKFr60v3X27086jHRqlxtyhCsL/LNibQO/4PPRqG6KHVmLtHWqyviVJotfECHKSSzht/SztC15h+ooiXnN6k+YTmxPoEGiyWBqrxFO57F56gcAWrvQYH26y0hpNYodvsbaYxecWs/DsQvIqqg8c87H1wdXKFb1BT5WhivjCeLQGLVYqK/r69+WZ1s/gbVv3H70E4Z/kllTyxbSP8KIb9tI6Js3+3DxxpJWw/L+HcXWqIuK357jkLjHvsTCWjl2MperO9xYI1XLTSvjt4yM4uFkxelpb1BbKWl9T1PYBtHot807OY+GZhRRXFdPNpxsjmo2gtXtrPGw8rmlbrivnUMYhdiTvYM2lNWxO2MzE5hN5JOYR7DR2ZnoGQlP35ZpjBJUHoVdkMeSDJ8wWh4u3LT0fCGfbj2dxGvYcoSu/pufaC3zg8xHvdX/bbHE1ZOXFWjbMOoFao2Twky2MkvjvRKMd87+Qf4Hx68czO3Y2nbw7sXToUmb3nc3AoIE3JH4AK5UVPXx78GbnN1k7ai0Dgwbyw6kfGLl6pNjiLpjFmbQifNYvo8LKB/vQkzh7h5k1nvBOXkR28eJsQTiVHVoy6IhM7pql7E7Zbda4GiK9zsDGb09SWqhl8JMtsHM2/aenRpf8DbKBn8/8zPh148kpz2Fmn5l83vNzIl0ib/sanjaefNDtA34Z8gsKScHkjZPZlritDqMWhBvN/2EdSk1XlFXxjJn2hrnDAaDbuFDsXa044f0UkqeaRzcZ+Gjla2L9/x3aszyO9LhCek+KwCPIPNVTG13yTypK4osjX9DFpwsrhq+gh2+Pu75WtGs0vw75lTDnMF7880W+P/m9ESMVhJs7HJ9L26PH0Fo4EjigCrXGytwhAaCxVNFvanNKivQkDngfCww8tDqHN7a/Z+7QGoyze9M5+WcKrfr6EdbBfIfnNLrkH+gQyOKhi/m619e4WLnU+nquVq7MHzCfQUGD+PLolyw6u8gIUQrCP9v03UKKHO8B6RQD7n3c3OFcwzPYgXaDA4lLtKWi7yAiUsB+5QY2x4tPx7eSmVDEjl/O4xPuROdRzcwaS6NL/gBhTmFGXS5lobTgw24f0se/Dx8d/IjNCZuNdm1BuN6xxDzCz1xGp7YmeoxvvTxVq92gADyC7DkqjUIRbMV9Ow18t+xNMfzzD8pLtGz69iTW9hoGPBqFQnmT9HthC5xcXufxNMrkXxeUCiX/7f5fWrm34rVdr3Eo45C5QxIaqd9nzafA6R70iovc03e0ucOpkUKpoM/kSHRVMnFd3kVhoefRNQW89bvY/VsTg0Fm6/wzlBdXMfDxaKxsb7KX6MRSWDwe9s8GQ90WdxPJ/w5YqiyZ0XsGfnZ+PL/9eVJLUs0dktDInEzMI/TUebQWjoQPrf2wZV1y8rSh4/BgElJsqOo7EP8ccFyxhgNpR8wdWr1zZGMCyWfy6H5fKO4BN5ngPfAtrHgU/DvDpJWgqNulnyL53yEHCwdm9pmJLMtM3zkdnUFn7pCERuTPGXPIc+mNTplCv4H1867/ai37+OERZM9h3WikYAvG7DHw6ZLpovjbVZLP5HFwXTzhnTxp3u0mG0d3fAIbX4GIoTBhOVjW/Qogkfzvgq+dL292epPY7FjmxM4xdzhCIxGfWUSzE2cos/YgqI81ChPV76kNhUK6MvwDlzq8jqQ2MGFdKh/unGnu0OqF0oJKtsw/jbOXDffcrHTD/jmw/X1oOR7uXQBq06z5r/+vrnpqcPBgRjQbwdwTc8X4v2AUO75bSK5rL3SKHIaMqP93/X9x8rShw/AgEjJc0XftSHgqlCyfz+X8JHOHZlbV4/yn0Wn1DHwsuuYdvCeWwabp1Xf8w78BpemKLojkXwuvd3wdf3t/Xt31KsXaYnOHIzRgJZU63HZuocg+CLf2WpQ3WwlST7Xq44ebvx1HLKdg8FFx/84q/r3s3+YOy6wOb0gg9UIB94wPx8nT5sYGF7fBqicgoBuM+d6kiR9E8q8Va7U1H3b7kOyybGYeFx9zhbu3ZckmKq3aYaCS4fc2nLv+vyiUCno/GEllhURKp+ew0Mv03nSExSe3mjs0s0i9kM/h9fGEd/QkorPXjQ2yzsKyyeAWCeN/MdlQz9VE8q+lGLcYxoWP49dzv3Iu75y5wxEaIINBRr14Nhke7VAGpGJra23ukO6Kq68tbQYGEJfbDG2rMLqdkVn32zuUV1WaOzSTKi/RsnX+GezdrOgxvoZ6TGV58Ot4UFvDA0vA0sH0QSKSv1E82/pZHC0ceX//+xhkg7nDERqYvbtiUWsDkBUa+o3pa+5waqXdoECcvGw44fEUlfZqJm/NY9p685ShNgdZlvl9wVnKS7QMeCQajeV1Qzl6HSx/CApT4L6F4OBjnkARyd8oHCwceLHti8Rmx7I6brW5wxEamJzZH5Hm3R2tbRqhYYHmDqdWlGoFvSdFUFKuIbvDOHxzwWHTLxxPTzB3aCYR+3syiSdz6TomFDf/GkrB//4OXN4OQz4D/44mj+9qIvkbyfBmw2nt3prPj3xOYWWhucMRGoiU1Gycksspt/Ygul+QucMxCs9gB1r29uOCrgtFIQGM3avj7RWNv+Z/VmIR+1ZeIqilKzE9a7ijP7sW9s6Adg9D28mmD/A6jS75a8t17F5+kYKsMpP2q5AUvN7xdQorC/nh1A8m7VtouI7PnEOmZw90ilJ69epk7nCMpuOIYOzdrLgQ/Dgqg4p+Ow8xZ3/jLfymLdex+btTWDto6P1g5I3r+fMTYPXT4N0aBtaPEhiNLvlXafWc2Z3G7qUXMfURlRHOEQwOHsyis4vIKssyad9Cw6PXG7DfvYVs1xgcW2hRaUx7klNdUmuU9J4YQZHWgbQ2g+kVK7Nx24cUVWjNHZrRybLM9kXnKM6rpP9DUVjaqK9toNPCsqkgyzD2B1BZmCfQ6zS65G/jYEGHoUEknsol4USOyft/utXT6Aw6vo391uR9Cw3LofV/Uq6JAUlJvyE9zR2O0fmEOxHVw4fL1v0pcA1g0s40nl8719xhGd3ZPenEHc6i4/AgvEIcb2yw7R1IOwojvgHn+jO01+iSP0BML1+cvW3YtfQiOm3dVsa7np+dH2PDxvLbxd9ILEo0ad9Cw1K64CvSvDpRZZ+Nt1/9LuJ2t7qMboaNkwWXoiYTkaJGOvwDBxPTzB2W0eSmlbBryQV8I5xo0z/gxgYXtsD+mdDhMWg+wvQB/oNGmfyVSgU97g+jOLeCI5tNn4Afb/k4GqWGmcfExi+hZvkZudgkl1Jm40t4Vz9zh1NnNJYqej/YnGLJg8sRg5m0o4zp6z9BbzDtkGxdqNLq2TLvNGpLJX2nNkdSXDfOX5INq58C9yjoV/9OOmuUyR/AJ8yJ0PYeHNucRGG2aSd/Xa1cmRg5kY0JGzmfd96kfQsNw7FvZpLl1hEDOnr2bmfucOqUX6QzUd29SXHvi5UhiI6ntjJr9wFzh1VrOxdfIC+9lL5Tm2PjcN04vixXT/BWFMGYeWbZwXsrjTb5A3QdE4JCKbFz8QWTT/5OiZ6CjdpGnPsr3ECWZWy3rSbVqz3qgBKs7G5ysEcj0mVMCDYOas5GT2DkfgWLDn1BVnGFucO6a2f3pnFubzrtBgXi37yGIbtD8+DiZuj/Hng0N32At6FRJ38bRws6jggm6XQeFw9nmrRve40948LHsTlxM0lFTbu6oXCt83/spVIRhEFlT+c+Lc0djkloLFX0mRpDmcaLDJ9RjI49yb/WrDR3WHclN7WEnb9ewCfcifZDa5jAzT4PW96AkL7VY/31VKNO/gAxPX1xD7Bj99KLVJSa9oCJB5s/iEpSMf/UfJP2K9Rv2T/OIM2rEzp1OdFt6s/qj7rmG+FMq16epPrcQ4fLzUlJmceOC6a9KastbYWOTXNPobFW0f/hKBTXj/PrtNWncWlsYMQsqIfnL/+l0Sd/hUKi16QIKkp17F0RZ9K+Xa1cGRU6ijWX1pBZ2rBe5ELdMFRWYn36AjmuMbi10KBUNfpfwWt0Gh2Bs6OWC2ETmXCgmOmbf6CiyrQr8u6WbJD5/cezFGaXM+CRKKztaxiu2/FfSI+FYV+DnYfpg7wDRnnlSZI0UJKk85IkxUmS9GoNj0+RJClbkqTjV/48Yox+b5errx2t+/lxdk86qefzTdk1U6KmYJAN/HTmJ5P2K9RPJ5esoNAuBiQ13Xu1Nnc4JqdUK+j/TFf0aitslRPwyl/OjO2nzB3WbTmyOZHLx7PpMroZ3qFONzZI2g+7v4DWEyFyqOkDvEO1Tv6SJCmBmcAgoDkwXpKkmmY4lsiy3OrKn3m17fdOtRsShL2rJX8sPEdVpenuNHztfBkUNIhlF5ZRUFFgsn6F+qli2XxSvdqityrFt1njXNt/Ky6+dnTuZ0+uSwyjTnRk/snvic8pNXdY/yjxdC4H1lwmtL0HLfvUsDS3shhWPg4OfjDwv6YP8C4Y486/AxAny/JlWZa1wGKgfu1m4Mp28wcjKcouZ//qSybt+6HohyjXlbPswjKT9ivUL5W5eVgk5lPoEIlPa8eaz3NtIlqO6YSvRQIlLiPpk5nIq6t3mHxF3u0qzC5j6/encfG2pdekiJr/3Ta+CgVJMOpbsKihmmc9ZIzk7wMkX/V1ypXvXW+MJEknJElaLkmSWXa1+IQ5EdPTlxPbU0i7aLq78FCnUDp7dWbx+cVUGUw76SzUH6fnzSXXpSWSpKRbjxbmDsesJEliwJsjsdTmEZ07mcv5y1gTW/92/laUVrHumxMgwaAnYlDXVH/pzGo4vhC6vQQBnU0f5F0yRvKv6fbl+rfwtUCgLMstgG3AghovJEmPSZJ0WJKkw9nZ2XcfUeYZMNR8qErnUc2wd7Hk95/OUmXC0g8TIieQVZbF74m/m6xPoZ7ZsIJEnzYYbMtxD7A3dzRmZ+nqTO+22cgKa+693J7/27yewrL6c3OkrzKwcc5JinLLGfxECxzcrG5sVJQGa5+vrtbZ84bpznrNGMk/Bbj6Tt4XuOYtXJblXFmW/zrL7TugbU0XkmV5rizL7WRZbufm5nZ30WRfgLn3VB+aUAO1xd/DP/tWmm74p7tvd/zs/Fh4dqHJ+hTqj+JLl1HmyZTZhhPUzq1JD/lcLeDpp2meswobQwRdCvP5cMNpc4cEVG/E+2PhWdIuFtBnciTeoTUUbDMYYNWToKuE0fNAqb6xTT1mjOR/CAiVJClIkiQNcD+w5uoGkiRdfYLxcOCsEfqtmWsotHkQ9nwFh2reXesT5kSL3r6c3J5C0uncOgvlagpJwQMRDxCbHcupnIaxukEwnri535Dl1hoJBZ26RZo7nHpDkiTaPT8K/6RtROS1J/7IQQ4n5Jk1JlmW2b/qEhcOZNJxeDBh7T1rbrjvG7j8Jwz4D7iGmDRGY6h18pdlWQc8A2ymOqkvlWX5tCRJ/ydJ0vArzZ6TJOm0JEmxwHPAlNr2e1OSBAM/gtABsGFadVW9GnQe1Qxnb5vq8zaLTVNjfGTISGzUNiw6u8gk/Qn1h7T7TxJ824JjBS4+tuYOp16x6TmAFg6ncM49Svf8YL766RBanfnOwj6yMYGjm5OI6uFD20E1VOoESD0Cv78LkcOg7RSTxmcsRlnnL8vyBlmWw2RZbibL8gdXvveWLMtrrvz9NVmWo2RZbinLci9Zls8Zo9+bUqpg7HzwiIZlU6o3XVxHpVbS76EoKsqq+OPncyZZaWCrsWVkyEg2JWwiu6wWcxpCg1J8/jwUq6m0bkZoOy8x5FMDj3c+Ifr0TxgMibRLl/h2mXmGf45tTeLAmnjCO3lyz/1hNf9bVRRVH8Ju5wXDZ9TrXbz/pPFuL7SwhQeWgpUTLBpXvQzrOq6+tnQe2YyEEzmc3mWalQbjI8ajM+hYfnG5SfoTzC9+3gyyXVsgoaBtl1Bzh1MvWYaH4zKoF90OzqZMk41+RyYH96eaNIbj25LY+1sczdq403tSxI0lmqG6Wue6F6Egubpap1UNm70aiMab/AHsvWDicqgqh4VjofzG3b0te/vhF+nE7mUXyU0tqfOQAuwD6OTViRUXV6A3NIxt7ULtyLt3k+DbChwqcfayMXc49Zbbv6Zjpa/AJe1Lyi2z2b/gPIkmmJOTDTK7l11kz/I4glu70e+h5iiUN0mNR36EU8uh12vg37DPXG7cyR/APRLuXwT58bB4QvXM/FUkhUTfqVForFRs/u4U2gpdnYd0b9i9ZJRmsCdtT533JZhXyelTSMVKKq3DCG7tLoZ8/oHa2xvnSQ/S7WQJBzy/okRVxLpZJ+q0Iq+uSs/meaeJ/T2ZFr18GfBo9M3rLSUfgg0vQ7M+1Wv6G7jGn/wBgrrDyNmQuKd6C/Z1ewCs7TX0e6g5+Zll7Fp8oc7D6eXfCxdLF7HjtwlI+n4G2a7RSChp00kM+dyK62OPobSzZcrOQnZFfUiWQsuWeafZuyIOg5FP/yrILGPFJ0e5dDSLrmND6DYu9MYqnX8pyYKlD4K9d/Vwj6KGzV4NTNNI/gAxY6uPUju9Eja/Vj12dxW/CGfaDQ7k3P4Mzu5Nr9NQ1Ao1I0NGsjNlJxmlGXXal2A+siyj37ufy36tkG20uAc0jG3/5qR0dMT1iScIvwwOmSXkB79PjoeaY1uSWDfjOKWFlbe+yC3IsszZveks+c+h6g1cT8bQqq//zT+V6atg2dTqYeP7F4G1c61jqA+aTvIH6PIsdHoaDsyBPV/e8HD7IUH4hDmy89fz5KQU12koY8LGYJANrLzYMA+0EG6t/PQJKJaosIkkoIWzGPK5TU4TJ6Ly9uKZHWr2OOZRKC3ApZcXaRcLWfT2fo5tTUKvv7uloLmpJWyYdYI/fjqLR4Ad97/RgaCW/7ChVJarl4wn7oZhX4FnzF0+q/qnaSV/SYL+70P0GNj2Dhz/5ZqHFQqJ/o9EY2GtYuOck3V6+IufnR9dvLvw28XfxMRvI5X0w0xynSNRoKF1p4a3CchcFBYWuD//PC5plXQ/J1HguYM951czaFprvEMc2ftbHEveO8jZvem3NUcnyzI5KcVs+f40i98/SFpcIZ1HN2P4C62xdbrF2bq7Pque5O32IrS8zzhPsJ6Q6mslvXbt2smHDx+um4vrKuGXcRC/C+77GSKGXPNwxuVCVn52FL9IZ4Y81aLmJV9GsDVxKy/9+RLf9P6Ge/zuqZM+BPM52bUVhwLHU+bWmqc+63vzFSTCDWSDgfjRYyjOy2DK5GJezSsmwe1zpk8ZQ8LJHPb+Fkd+RhkqtYKgVm54hzri4G6Fg2t1/Z2yIi2lBZWkXSwg/kQOxbkVqCyUtOzlS6t+/lja3EYphtjF1XOEMeNg9NwGs55fkqQjsiy3u1U7lSmCqXdUFnDfQvhpRPVY3sTlENTjfw97BjvQ7d5Qdi6+wKH18XQYFlwnYfT064mLpQsr41aK5N/IVF6+gCJPT2lMNL7R9iLx3yFJocD95WlUPvwID531YkZz+DbxFXYcCeOetjEERLuQGV/E+f0ZXDycycVDNa8IUqoV+EU40XZgAMGt3LCyq+H0rZpc3Aarn67OCyNmNpjEfyeaZvKH6prbE5bDD4Ph1/EweQ34/F1vLvoeH7ISiji0PgEXX1uatXY3eghqhZqhwUNZdHYReRV5OFs2jokkAVIXzCbfKQwFVrTq0Mzc4TRItl27YtO9O313HGNJqJKfXSSeWDuOfN/NOHn44xnsgGewAz3uD6OkoJLCrDIKs8tRKCWs7S2wttfg6GGN2uIOV+acW19dGcA9svomUXWbbxgNTNO+HbF2hkkrwdoFFo6pLgV9hSRJ3DMhHI8ge7b9cKbOJoBHhIxAJ+tYf3l9nVxfMI/ynTuJ941GVurxixRv6nfL/eVpUFrGG+cjWW9nxWXLYrTzBiEX/r37V1JI2Dlb4hvhTFR3HyK7eBMQ7YKbv92dJ/5Tv1Uv6fSMgclrwdLByM+o/mjayR+qdwE/uApUltXDQLl/l3lWqZUMeiIGC2s162edoKzI+AXgQp1CiXKJYnXcaqNfWzCPqvRUpPQy8p1a4BxqgUrd8NeEm4tlWBiOY0YTsOU0bap8eMs7AENVHqVzB9ZYsuWuyTIc/A5+ewR8O8CkVQ26dMPtEMkfwDkYHlwNsh4WDL/mRWXjYMHgJ2MoL65i07cn0VcZv9rgyJCRnM8/z9ncuqt0LZhO1qLZlNj6opScaNG+buaLmhLXZ59FUqt55YgXxZTwkHd35NJsDLO7w/lNte+gorB6mGfDNAjpVz0HaNn4D9sRyf8vbuHV7/ba4uo3gKs+VroH2NPnwUjSLxWyfaHxK4AOChqEWqFmVdwqo15XMI+irVtJ8G2BjExwjPHnipoatbs7Lg8/jGL7fl7SDCHF4ixDLR4hyeACv94HW96o3oh1N5IPwrf3wNm10PcdGL8YNOatv3Q29yy7U3fXeT8i+V/NqwVMXAllubBgaPURbVeEtvegw7Agzh/I4MjGBKN262DhQG//3qyPX49Wb5qzBYS6oc/LRU4qJMM9BmsfCWv7xjlZaGouD01F5eFB92XniXQMp9JvBwPKXuKUz72wdwbMaAtHFoDuNn9/ss5V1/r6vl/10u8p66vX8ivMlxJ1Bh3fxn7LA+sf4LPDn2GQ6/ZMA5H8r+fbFiaugJJs+PHaN4B2gwMJ6+jBgTXxN11adrdGhoyksLKQHSk7jHpdwbTyls+jUuOEUuFP87b+5g6n0VBYW+M+bRqVp8/wfwW90MqleIZvYmT8KC4PWFC9aGPtczCjDfzxAVzeUV3N92p58XBoHvxyP8zuXN2m17/hmUNmP3g9vjCeBzc+yDfHv6FfQD9+HPgjCqlu03PT3OR1O5IPws+jwNajetbfwQeoPtR59VfHyEooZvgLrfAOqeFsz7ugN+jpv7w/zV2bM6P3DKNcUzC9C6Pv4XxpGElB4xn/VkecvUUJZ2ORZZnECRPRJiRw9OuH+OD0l1gUjsC2sg/rn+2GTfIO2P05JO0D2QBKDVg5g15b/Ud7pWS7gz9EjYRxJm3zAAAgAElEQVSuL4CNi3mfFLD+8nre3fcuGqWGNzq9wcDAgbW6ntjkVVt+HaqXgS4cAz8Mqn4DcApAqVYw6IkYfvv4CBtmn2DMy21x8qz9L7hSoWRw8GAWnllIfkU+TpaNe6VBYyRXVqK7mElih7GoHPQ4eVmbO6RGRZIkPN/4N/FjxtJzSxb7OvVmh7SelHhf3ljtxOfj+iCF9q2ewE3aX13Ftzy/+k1AqQFHfwjpCy4h9WLTllav5eNDH7Pk/BLauLfhk3s+wd3adHNEYtjnn/h1qF4FVFFYvRnsyjJQK1sNw55tiUIhse6bWKMtAR0aPBSdrGNLQs3nDgv1W8nmpegNlsjqMEJaeYpCbnXAsnlzHMeNI/+XX3jTcyoe1u64N1vOytg4Fh9KvtLIAcIGQL//qz5mcchnMPBD6PQkuIbWi8SfWZrJlE1TWHJ+CVOjpjJvwDyTJn4Qyf/WfNrAlHWgK6/+BJBVvRzTwc2aIU+1pKxQy/qZsVRV1r44W5hTGCGOIay7vK7W1xJML3vVErJdwlGgJqKNr7nDabTcXngehY0Npf/9go97fES5IRef0JW8veYkp1ILzR3eLcVmx3L/+vu5VHCJL3t+yUvtXkKtuI1aQ0Ymkv/t8IyBKRsAqfoNIPUoAB5B9vR/JIrspGI2f3cKw12Wmf2LJEkMDR7K8ezjJBcnGyFwwVRkWaby5CXi/GOQ1Xq8QhrvzlBzUzk54f7SS5QdPEjgvkRe7fAqRYoT2Hht5OlfjlJUUXfVeGtrVdwqpm6aipXKikWDF9EnoI/ZYhHJ/3a5R8BDm6prAi0YDgnVRzAGtXSjx/hwEk/l8uei87XeAzAkuLrCqCj30LBUHv4DuRjKbaPwjLBDKQq51SnHe8di1bIlmR99zFivgUyInIDOdgcZhu28tCTW6Kd+1ZbOoOOjgx/x5p43aevRll+H/EqIk3nLfItX6J1wDoKHNleXhFg4Gi5uBSC6hw/tBgdydm86B9fF16oLTxtP2nu2Z/3l9UbfTCbUndxlCyix9UOFA1FtAswdTqMnKRR4vvsO+sJCsj7/gmntptHNpxsWnqvZnrSbL7bV/XGst6uwspAntz3JwrMLmRg5kdl9Z+NgYf5PhiL534ZyrZ7UgnISckop0rghT9kArmHV1UBPV5/E1WFYEBFdvDi8PoFTO1NvccV/NjR4KAlFCZzOPW2M8AUTKDlwjAsB0cjIBEabf/lgU2AZEYHzxIkULF1K1YlTfNLjE0Icm2Hnv5BZ+7awNjbt1hepYxfyLzB+/XiOZB7h/7r8H9M7TEelqB+LLOtHFPVIYVkVey/lcDAhj4PxeVzKLqHiuno+GpWCMIdXmWH5XwKXPURxQS72XR+h54Rwyou07Pz1PNb2GoJb/cPxcP+gb0BfPtj/AesuryPaNdoYT0uoQ7qEs+gzq8iJiMbBR3H7NeOFWnN99lmKNm8m7Y03CPrtN+b2n8vUTQ+RGLCAV9apCHJ9gGgf89xlr7u8jnf3voudxo75A+bTyr3Vbf1c4dp1yDodjqNG1ml8YpMX1ZN1x5ILWLQ/iXUn0qjUGbBUK2jj70SUtz3ONhY426hRKRTklWrJKa3kcnYpsZfT+Fj/KT2VsSx1egy3AS/TOcCZtV8dJze1hBHPt8LrLjeBvbj9RY5nH2fb2G0oFaIqZH2W++lLpCzYza6u/6H90CA6DhX1+02pZNdukh99FJdHH8H9X/8iqyyLyRunklKUiUXOE6x65AF8nUy350Kr1/LZ4c/45dwvtHFvw2c9P8PVyvW2frb81GkSJ0zAqkUL/Bf8iHQX5SbEJq/bdDQpn/fWneFYUgE2GiVj2/oyuo0PMT6OaFT//D9eb5A5l9KZ86ufYFzuXGYuzOBtu6m8cE8zbDdWsX7WCUa/3BZnrzvfBDYoaBDbkrZxKPMQnbw63e3TE0ygYMcukryjkVAQ3EIUcjM12+7dcBg7htzv52PXty/uLVvyw8Dvmbh+MpnM4v5FJax76Ckcrev+E9mlgktM3zmd8/nnmRg58Y6Wcepyc0l59lmULs74fPXlXSX+O9Fkx/zTC8t5YfExRs/aS2p+Oe+NiOLAv/vywagY2gY43zLxAygVElH+boQ/vRR9m8k8rVrDdMNcpq0+wTonPQYJ1s2IpbSw8o7j6+HbAxu1DRvjN97N0xNMRC4tpDKhmESfKBQ2Blz9bM0dUpPkMX06Kg8P0l57HUNlJZ42niwetohmDmEU2n3PyEXvUq699WHvd8sgG1hybgn3rbuPrLIsvun9DdM7TL/txC9XVZH6/Avo8/LwnTEDlXPdHwDUJJP/xpPp9P98JxtOZfBMrxC2T+vJpM6B2Frc5QchhRLlsK+g6wsMqdzIn81+IamshO+lEooKK1k7IxZt+Z298CxVlvT2683WxK2i0mc9Vrb+R2SdCr1FJEEt3MSuXjNR2tnh9d57aC9fJvvzLwBwtXJlyYgFtHbqQ65mDQN+fZiMkmyj93069zSTNk7i/QPv086jHStGrLjjM7kzP/yQssOH8Xr/PayiooweY02aVPKv1Ol5Z81pnlx0lGB3W7a9eA/TBoRjc7dJ/2qSBP3ehT5v45+6gZ2BPzCkqwe/WVaQnVLCqpmx6HV3tglsUNAgirXF7EndU/v4hDqRt2kdeU7BqGQrwlp6mzucJs22W1ecHhhP3oIFFP/5JwAWSgsWDPuCnq5TyTPEMui3Yay4sMooy6gzSzN5d9+7jF83npTiFN7r+h6z+s667fH9v+T99DP5v/yK80MP4TBsWK3jul1NJvnnlFQy7tv9/Lg3gUe6BbHs8c74u9TBJFD3l2Dwp6gubuKtwrd5fVIoO+x1ZMcVsnh27B296Dp5d8LRwlEM/dRXBgNlp5Kql3gqDPhGiGJ85uY+fToW4eGkv/Y6VZnVZdclSWLGkJd40P9rKspceXvfm0zZNIXtSdvRG+68LEtycTLv7nuXQSsGsfLiSiZETmDdqHWMDBl5x2WYi//4g8wPP8SuX1/cp/3rjmOpjSYx4ZuSX8aD3x8krbCcORPbMjDas2477PBo9U7gVU/ST/80kS/9zCffnCHsdD4/zjvB1Edb3tZl1Ao1/QP6s/byWsqqyrBWiyqR9Yn20AYMRQqKHKLwCLJEY9kkfp3qNYWFBT5ffE78mLGkvfwK/j/MR1JWr5Z7pU8P7JRefL7/J06zk+eynsPX1pcxYWPo5NWJCOeIGtfgy7JMSkkKfyb/yR9Jf3A06yhKScmokFFMjZ6Kr93d1XEqP3Wa1H9NwzI6Gu+PP67zCd7rNfqlnhczi5n0/UHKtDrmT2lPu8C6n0j5n9MrYfnD4NOW4rGL+eyTU7jl6qnq4MzzU1ve1vjw4YzDTN08lY+6f8Tg4MEmCFq4XTn/nkDS+gT2dXqPLmOb0bqv2NlbXxSsWEn666/j+tSTuD333DWPzdwexyebz9AqIhkbt32cyIkFwEZtQ7hTOPYae2w0NsiyTHJxMolFiRRpiwAIcQyht39v7gu/r1ZVOLVJSSRMmICkVhO0ZAkqt7vbE1QTsdQTiMsqYdy3+1ApFSx5vDORXiY+lDlqFEhKWD4Vu2X38uory5j54WlUB3P5j/I4rz/Y6pZvAG082uBh7cHG+I0i+dczhQdiifftAkBQjPF+eYXacxg1krJDh8iZNRtNs2Y4DBnyv8ee7hWCg5WaN1craVfRmtXjgrhQFMvhjMPEFcSRUZZBaWEpBtmAr50vg4IGEeQQRHef7vjb1/50tqrMTJKmPgRVOvx/+MGoif9ONNrkn1lUweT5B1EqJJY+3pkgVzOdqNR8OIz7GZY+iOXScTz18lK++88p1Pvz+NzhDP8a+c8z+wpJQf/A/vx67leKtEXYa0z8BibUyJB6lso0HaldorBwlHFwtzJ3SMJVJEnC89130CYnkf7a66i9vbFu3fp/j0/sFICTtYYXlhzjiQUX+O7B7rU+Qet26PLzSXr4YfQFBfj/+CMWIeYr7tYoJ3wLy6uYPP8gBWVafpzawXyJ/y8Rg2HcAkg/jtWqB5j4fCRWSgVFW9KZtfXiLX98UOAgdAYdfyT9YYJghdtRumY+BtSgCqdZCw+xxLMeUmg01WvmPT1JefoZtCkp1zw+pIUXC6Z2IKu4kmEzdvPn+aw6jUdfUEDyo49RlZSM76xZWMWYt3RLo0v+FVV6HvvpMJeyS/h2Ujuz1fW4QcQQGPM9pBzC+fepDHskDDeDgvNrEvj1QOI//mi0azQ+tj5sSthkomCFWyn4cydZLqEo0RDawsvc4Qg3oXJywm/OHGSdjuSHH6Eq89oE3yXElbXPdMPb0YqpPx5i5vY49HVQDlqXnU3ipAepvHABn6+/wqZjB6P3cacaXfLPLq4kJb+cT+9tSbfQO1tvW+eiRsLouZC0l8BTz9NlVBBhVUo2/Hqe/Zdzb/pjkiQxIHAAB9IOUFBRYMKAhZrI5YWUxuUTFxCFrDTgE3Z39ZsE07AIDsJvzhx02dkkTZlCVda1bwB+ztaseKoLw1p488nm84yfu5/kvDKj9V+VmkrCxIloU1Px+3YOdj17Gu3atdHokr+fszXbXrqHEa18zB1KzWLGwtAvIG4rrYs/ILi9O50qVPz3u6P/+IIbGDgQnaxjW9I2EwYr1ET752LkUiWldlG4hVij0ojCe/WddZvW+H03t3qydcpUdNnX7vS11qj46v5WfHZvS86mFzHwy538ciCp1ofClJ8+TcIDE9DnF+D//TxsOneu1fWMySjJX5KkgZIknZckKU6SpFdreNxCkqQlVx4/IElSoDH6vRmr+v7L2HYK9HkL6dQy+rkvxMHHhnsKFLww9xAllTWXgYhwjiDAPkAM/dQDRZtXU2blhkZ2J7KVn7nDEW6Tddu2+H87h6r0dBImTqTy0qVrHpckiTFtfdn0Yg9a+jny+sqTjJy1h0MJeXfVX+G69SQ+MAEUCgJ+WnDNhHN9UOvkL0mSEpgJDAKaA+MlSWp+XbOHgXxZlkOAL4CPattvg9ftJej8DKojsxnR/gDWVmpaJVXx2uKadwH/NfRzKOMQOeU5ZghYAMCgp/h4HHH+1au0AqLr2dCi8I+s27fH//vvMZSUkjDuPoq3b7+hjY+jFQsf7sgX97Ukq6iSe+fs4+lFRzmbXnRbfchaLVmffUbatOoNXEHLl2EZEWHsp1Jrxrjz7wDEybJ8WZZlLbAYGHFdmxHAgit/Xw70kZr68ghJgn7vQfRY7Pa/ybCBhTjLChSH8/jlQFKNPzIwcCAG2cC2RDH0Yy7687uoyFSQ4R6F2lnGwU0s8WxorNu0Jmj5MjQBAaQ89TTZM77BoL22eKJCITGqtS9/TLuH5/uEsv18FoO+2sWUHw6y91LOTcu0lMfGEj9mLLnfzcNx3DgCfpiPyqV+nuxmjOTvAyRf9XXKle/V2EaWZR1QCNzwf0SSpMckSTosSdLh7GzjV9+rdxQKGDETfDvgffAROve2JKxKyapl5zifUXxD81CnUJo5NBO1fsyodMMiDJIGhTKUsJZilU9DpfbyImDRQuyHDSVn5kzihw2nZOfOG9pZa1S82C+Mva/2Zlr/ME6mFPLAdwfo/vF2Ptl8jnMZRciyjC47m4z33ifh/vHoi4vxnTUTr/97F0lzZ2cIVOkNnEot5MA/LAAxllqXd5Ak6V5ggCzLj1z5ehLQQZblZ69qc/pKm5QrX1+60uamz9CUJ3mZXUk2zOuNXFXJKsUCki+WsdNfyc/Tut8wfzH7+Gxmx85m273barW9XLg7KaNbcD43jHORTzH8uVb4NTdhuRChTpTs2kXmB/9Bm5CATdfqyqC2PXogqW+sxV9RpWf9iXRWx6axJy4Hz8JMJiTupnv8IZQGHRWDR+H6wvP4+LihVNx8cKNMqyOjsILE3DIuZZdwOaeU02lFnE0vQqszEOVtz/rnut/V8zFleYcU4OpZL1/g+pOT/2qTIkmSCnAA7m4WpTGydYMHliJ9359B9u/wk+2/aZ2i5YNVp3h/3LVF4PoH9mdW7Cy2Jm5lQuQEMwXcNMn5iZQkVHAxKhpZZcA7VCzxbAxsu3fHZs1q8n5eSO6PP5Dy9DMoXVywHzAAi4hwLIKDUXt7Y6ioRC4tpX9hFl0z9lN0fC/6y5fQqdTsCenIT35dSVO7wcxDaJQKHK3V2FupsbNUYZChskpPRZWe3BItxdct7HC0VhPuYceULoHE+DjQyq/uX1vGSP6HgFBJkoKAVOB+4IHr2qwBJgP7gLHAH3J9rShnLu6RMOpbLBePZ1jUVn472Iv0XZnsaZNN15C/a380c2xGiGMIWxK2iORvYpW/L8JQpqTcNgrfUBuU6ka3UrrJkjQaXB5+COfJD1KyaxeFK1ZSsGIFckXFTdtbt2uLzZhROIwYQYyrK2OKK7mcXUJ8TikJuWUUlGkpqqiiuEKHQpKwsLPAUq3E2UaDh70lHvYW+DtbE+xmi7NN3R8xeb1aJ39ZlnWSJD0DbAaUwHxZlk9LkvR/wGFZltcA3wM/S5IUR/Ud//217bdRihgMPV7Ba+fHtG/VEumYM3MWnKT1mz2w1vz9TzUgcACzjs8iszQTDxsPMwbctBRv20SZtQca2YUIscSzUZJUKux69cKuVy9kg4GqtHS08ZepyshAYWmFwsYGpaMDls2bo7C0vOZn3ewscLOzoGNw/ZzgvZ5RCrvJsrwB2HDd99666u8VwL3G6KvR6/kqpB2j/aUnueixmNaZ8PnKM7xxX4v/Nekf2J+Zx2eyLWmbuPs3lcoSSs6kcdG/uvhXQHTD+AUX7p6kUKDx9UHjW083jNaS+Nxa3yiUMHouCgcvhtu8g0YlUbYzk8Pxf8+NBzsEE+oUyuaEzWYMtGnRn9xIebaKLPfmqF3B3kUs8RQaNpH86yNrZxg7H/uKM/QN3YGPXsmP805Qpf/7DOABAQM4lnWMzNJMMwbadJRuXIpeYYlCEUK4WOIpNAIi+ddXvu2g1+tE5H+Jh3chYbkG5q+78L+H+wf2B2Br4lZzRdh0GAwUHzxBhls4ClQ0a1HHx4AKggmI5F+fdX0BgnowlFdAJZO+NYW0K8XfghyCCHcKF7V+TEBOO0pxiswl/yhktQGvZvWkTLgg1IJI/vWZQgmj5mJpoWOAzxLcdArmzj3+v4f7B/YnNjuWjNIMMwbZ+FX+sRhDuZIKmyg8w21RqsSvjdDwiVdxfWfvBUO/ILxyKU4u6TgnlPP7vupqGv0DxNCPKZT8uZ1SG2/UshPNW9f+DFdBqA9E8m8IokZC1ChGqf+NQWFg/+KLaLU6Ah0CCXcKZ0vCFnNH2HgVpVF8MZ+LAdVH7gVEiSWeQuMgkn9DMfhTrKyV9PZcimMl/LzwNFA99HM8+7gY+qkj+mOrKc/RkOUahcYdbBwtzB2SIBiFSP4NhY0rDP2cGP1S1NYZFB/KIT29WAz91LHSravQKa1RKYKIbNM4N/sITZNRdvgK1zJotVSeO0fFuXPoCwsxFBVjKCur3hru4IDSyQnLyAgswsKQlHdw6ljzEUjRo7jv1Pv8WPY1y+ac4Nl3uvxv6GdS80l196SaIm0ZJccuku7eGQklIWKJp9CIiORvJNqEBIo2baJk+59UnDmDXFX194NqNQprawylpaD7u5qfwsYGq1atsOvXD/shg1Ha2d26o4H/xSGuA82dt3Ehsz+Hd6XSP7A/M47NIKM0A08bkaCMRb60naJUFZdbRiNb6HEPtDd3SIJgNCL514JBq6Vw1SryFy+m8sxZAKxatsTpwUlYtWiJZVRzVC4uSJaWSJKELMsYSkvRZWdTceoUZUePUnbgIBnvvEPmhx9i178/zpMnYxUddfNO7Tyh79v0WTeNk6pO7F1+kcH/7ssMZrA1cau4+zeiiu3LMFSo0Fo1xz/SAcU/1GcXhIZGJP+7YCgtJX/xYvJ+XIAuOxuL5pF4vPYqdgMGoPa8+Z23JEkobW1R2tpiERSEw7BhyLJMxanTFK5cQeHadRStXYv90KG4vfA8Gl/fmi/UdiqK2F8Zbfia9dlvcH5zBeFO4WxO2CySv7EYDJTs2UehXRAq7IhqI5Z4Co2LmPC9A7LBQOHq1VwaOIisTz7FIjQE/x/mE/TbbzhPnvyPif9mJEnCKiYaz7feImT7H7g88TjF27ZxedBgsr/++trho78oFDD0SwJUx7G0PE3Cvgz62w8VG76MKSOW4kQdcQHRyMj4NxdLPIXGRST/21Rx5gyJ4x8gbfqrqDw9CfjlF/znz8emc2eMdRa90tYW9xdeoNnmTdgNGkjOrNkkTJiINjHxxsae0Uidn2K8/Sfo0KPa0wxkxJp/I9EdWUVFjpo8l2isfCQsbW880k8QGjKR/G9B1unImfMt8ePuQ5uaitd//kPgksVYt2ldZ32qPTzw+fhjfL78Am1iIpdHjaZwzZobG/Z4BUtbDeG2aylPrqK7dogo82wkpX9sRKtxRCX5E9MuwNzhCILRieT/D7TJySROepDsL7/Erl9fmq1bi+PoUUgK0/xvsx84kODVq7CKiiLtlelkfz2Da06/tLRH0fdtBlj/TIWymIhzvTiTeZbUklSTxNdoFaVRciaTFM/qifdmLcRpaULjI5L/TZTs2kX8mLFUxsXh/ckn+Hz+OUpH0x/Yrfb0xP/7eTiMHk3OrFmkvTIdg1b7d4NWE8CrBQMdZqIsU9MivRdbE8SGr9qQz22gKMOSRJ8YZFsdzt425g5JEIxOJP/ryLJMztzvSH7scdReXgStXIHDsKFGG9e/G5JGg9cH7+P2wgsUrV1L8uOPY/jrYGmFAsXgj4jSHMCgSaBNan+2n91ltlgbg4rtq9BXWaC3CCe4hatZ/+0Foa6I5H8Vg1ZL2rSXyf78c+wHDSLw119uvtzSxCRJwvWJx/H674eU7T9AyrPP/f0JwL8T+uajuc/+MxSyEocTIaQUp5g34IZKW0rxoZPkOYWixIKotmK8X2icRPK/Ql9cTPKjj1G0fj1uL72E92eforC2NndYN3AcORKv9/6P0l27SH3xpf8tBVX2ewcXdSZK68OEZ3dk08E/zRtoQ3XpD4pT1Vzyi8Gg1OMTZvqhPkEwBZH8garMLBInTqLsyBG8P/4I18cerdcf9R3HjsXjzTco+f130qa/imwwgFMAcodHmWrzNRWqUrK2SddODgu3pergKirz1RQ5RuMQrEalvoPaS4LQgDT55K9NSSXxgQeoSk7G79s5OAwfbu6QbovzhAm4vfQSRRs2kP311wCo7nkZhUZBpfNmHPK9OLjvrJmjbGAMekp27KDUxhsVzrTu0MzcEQlCnWnSyV+blETipEnoS0rwX7AA265dzR3SHXF59BEcxo4hd863FKxaBdbOKLr/iymqxeRbZXB4TRIGvcHcYTYcKYcoSdCT6B0DQHCMu5kDEoS602STf+XleBInTkIuLyfgxx+wiok2d0h3TJIkvN56C+uOHUl/8y3KDh9G3eUJ7NUuJHmuhQINZ/akmzvMBsNwYg3FmRake8aAa5U4uEVo1Jpk8tcmJ5M0eTKyXo//TwuwjIw0d0h3TdJo8P36KzQ+PqQ8+xxVuYWo+v6bHqqdpNtdYt+ai2grdLe+kEDZ9g1olQ4olEFEt68fq7wEoa40ueRflZlJ0pSpyFotAT/+gGVYmLlDqjWlgwO+s2ZiqKwk9V/TsIi5l/YGF/b7r0ZbYiD292Rzh1j/5Vyk+Hwe6e4tAIgWJR2ERq5JJX9dXh5JDz2MvqAAv3nzsAgNNXdIRmMRHIzXu+9SfuQI2d/MxKfPm/ipz5PidJqjW5IoK9Le+iJNmHxuPUVpliT6tqDKplLs6hUavSaT/A2lpSQ/+hhVKSn4zZndIMf4b8Vh2FAc77uP3HnfIxe40LnKll0BK9Fp9RzdVENlUOF/KnetRltpTZVlGAExTvV6qa8gGEOTSP6yTkfKiy9Sce4cPl99iXX79uYOqc54vP4aFpGRpL32Or3DnqTYMosC17Oc3JlCcV6FucOrn4ozKT56kTzn5ihQ06FLuLkjEoQ61+iTvyzLZLz7LqU7d+H59lvY9exp7pDqlMLCAp/PP0OurESz7BAxlSoOeP0CMhxaH2/u8Oqn8+spTrUg3rcFVWotXs0czB2RINS5Rp/8c+fMoWDZclyeeByncePMHY5JWAQF4f7yNEp37+b+9BbE2xShcIvj3N508jNKzR1evVN1cAXl+ZYU20djF6JEoWz0vxaC0LiTf9GGDWR/9TUOI4bj9vzz5g7HpJweeACbLl0IWXUS31yZDPs5KDUKDq4Vd//XKM+nZN8xChxCUUjWdOnacJf9CsKdaLTJv/zkSdJeex2rtm3xfO+9JjeBJ0kSXh/+B0mjYdpGC3bZleHinkjckSyyk4rNHV79cWELxSkakrxboFfoCInxMndEgmASjTL5V2VmkvLU06hcXPCd8TUKjcbcIZmF2sMDzzffxDu5jDaxCmz5BAtrJQfXibv/v+iPr6Q405Ic15bgo0VtIQq5CU1Do0v+hvJyUp56GkNpKb6zZ6NydjZ3SGZlP3QIlt26Mn6HgROU4umVQMKJHDLiC80dmvlpyyjdvYdi20AUCic6dWs8+z4E4VYaXfLX5+djKC/H+9NPsQxv+Lt3a0uSJHzffRelQon/HitaFf8HSxsVB9dcNndo5nfpd4qTFKR4tkEv6WkpqngKTUijS/5qb2+CV6/Crncvc4dSb6h9fKh4aDQtL8ukZJbh759A8tl80i7mmzs0s5JPraEw3YpMj1ZoPUqwsFKZOyRBMJlaJX9JkpwlSdoqSdLFK/91ukk7vSRJx6/8WVObPm8rLrW6rrtocFo9+Trx3krUB22JyfoQa3s1+1dfbroHvugqKdu1lWKNPyhdaNVZ1PIRmpba3vm/Cvwuy3Io8PuVr2tSLsv/396dR0dZ5/kef39TVVkJCWRfISEBWcIakKVJUJYB3MD6L+EAABXeSURBVBC0tV0accEZdWbU6VanOfY4ffW21+5rq2jrYE8r2vQA2iCIIE0U2bcQSCTsJBASsofsVLb63T9Ie+wre0ieJPV9nZNTVcmT+n1+cM4nv3rqqecxw1u/usbVUroZT09vjs6fjJcTmjLrSIg/TeHxKvIPuenq/8TX1OS2UBQ2EhcuJozvfqf7UOpS2lr+dwCLW+8vBma18flUO5ow6QG+HCXUHPcj/sgr9Ojlxa7P3XP1b75dSVWBLwXhw6kNrsTH3z2PCFPuq63lH2aMKQRovb3YpY+8RSRdRHaKiP6BsMiosFFsnh5JjZ+Nxl1OBiYWUJxbzakD5VZH61hNTpzb11Mt0RhHKImj9Ipdyv1ctvxFJE1EDlzg646rGCfWGJMM3Ae8ISIXPKxCROa3/pFILy0tvYqnV1fCQzyYPOg2Ft8MzgpPwrf8ip7B3uz+PNe9Vv8nvqb6hIui0BG4cPEPNydbnUipDnfZ8jfGTDHGDLnA1yqgWEQiAFpvSy7yHGdab3OAb4ARF9lukTEm2RiTHBISco1TUpdyW/xtbBkM+TE9ce5tIik+j9K8GnL3l1kdrcOY7JVU5vuSHzmK6l7l+Af4Wh1JqQ7X1t0+q4G5rffnAqv+/w1EpJeIeLXeDwYmAAfbOK66RvGB8QwKHsyy2cG0NHnQc/UrBIb6nN/373KD1X+TE+e29VRJH1yOMGJGXvAANaW6vbaW/6vAVBE5BkxtfYyIJIvIH1q3GQiki0gmsBF41Rij5W+h2+JvY5dvHseGxNFwpImhUSeoOFPH8YwLvnDrXk58RXWOoTAsmRZp5o5p46xOpJQl2lT+xphyY8xkY0xi621F6/fTjTGPtt7fboxJMsYMa7397+sRXF27GXEzsImNbfeNAgf4LPs/9I7wZffnubhaXFbHa1fmwAqq8v0oiEimPKiYgAB/qyMpZYlu9wlfdXlBPkGMjxxPRvNOskaOpOlMM0P8MqksrufonmKr47Wfxjqc29dTZu+PsQfSZ4R7n/dJuTctfzd1W7/bKKovIvf+OXgEuPBctpDgKD/2rMmlpbuu/g+vpSYXikKTafJo4K7pKVYnUsoyWv5u6qaYm/B3+FPhuYfNyeNx1bQwsGEz1WVOjuwosjpeuzCZSzlb0IuisBGUhBTQ0093+Sj3peXvprzt3syIm8HXp9Oouf0JHFEtOFZ9QGi0D3vW5tLS1M1W/7UlnNu9lWLHQIzNj9gRQVYnUspSWv5u7M7EO2loaSAo8gRrhk6EFhcJRWuprWgge+sZq+NdXwdWUJ3rRWH4GJz2Wu6Zpmd9Ve5Ny9+NDQ4aTEJgAn/N+5zalH/CJ7EJr7RPCY9ykL7uJE0NLVZHvG7MvqWUFYZQFpxEYWgevXwDrY6klKW0/N2YiDArYRZZZVncNNKHTwfchN2rhfhD/8O56kayNp62OuL1UXaM2oyDFAaOBnGQODbc6kRKWU7L383dEn8LNrGxq3Q9Z4bOw29wE94ZG4kKN+z7ax4N9U1WR2y7rOVUnfQlP3I8pb6nuX/SdKsTKWU5LX83F+wTTEp0CqtPrObBlBv4c5/JePZsInbXIhrqm9m3Ic/qiG3jctGyZylnqvpT7xdNWXQxPTx7WJ1KKctp+StmJcyi3FnOWTI53Pcn9BjRiE9uFrG968j8Op/66karI167k5upyS7lTNg4WmhiwqRBVidSqlPQ8ldMjJ5IiE8Inxz9hAcnJfFx8DT8IpxEffM2LU0tpK89aXXEa5fxERWngygMH0NO0AHuHDrF6kRKdQpa/gqHh4PZibPZVrCNhMhGdoTcjd/wZnzO5tHXu4jszQVUltRbHfPq1VfQuPsLTrWMwmXz4VxcHd52b6tTKdUpaPkrAOYkzkFEWHFsBfNuTuKPvtMJ7FdH5NcL8bDBrlU5Vke8elnLqDzm4EzkOGodZdye+iOrEynVaWj5KwAiekQwMWoiK46tYMqgYL4JvBPvJIOPqSau8VuO7y2h+GS11TGvnDGYPYvJLxlAZeAADoVnMK3fRKtTKdVpaPmr7/x4wI8pd5azOf8b5k5K4n2P6QQPPEvE1g/w8oIdK493ncs9FuylZn8up4JScdGI90BPHB4Oq1Mp1Wlo+avvTIicQIRfBJ8c/YRZI6JY73cH9v42fANaiD+TRsGRyq5zsfeMxZScDKcw/EaOhKTz2PjZVidSqlPR8lffsXnYmJM4h52FOymsP839KYN5q+U2woYUEXZgNT28mtj+l+Od/5TPzioat6/gmC0V4+EgJ/owI8OGWZ1KqU5Fy1/9ndmJs7GLnaWHl3LvmBjWec2kMcYf/7524rP/zNmierI3F1gd89IyPqLiqJ2CyFSK/Y4waegERMTqVEp1Klr+6u+E+IYwre80Vh5fiQsnc1MH8ppzFmEDTxNUmE6Io4Ldn+firOukp31oacZs/y+OVqfQ4N2LjKgtzBt2l9WplOp0tPzVDzww8AHqmupYdWIVD47rw1deUznbO4ygYQ7itr9L47lmdq/JtTrmhR1eQ1VWGXkhKZzzKMcR60OIb4jVqZTqdLT81Q8khSQxPGQ4Sw4twcsuPJySyMv1swmOO0mgZw0xdQc4sCmfijN1Vkf9AbPjHY6duZGqgH7sjd3Iw8PvtjqSUp2Slr+6oPsH3c/pmtNsKdjCT8f1YbvXjzjtl0DoiFpi932M3cPF5mVHO9ehn/l7qdudydHgGTRRy/Gwb5keN9nqVEp1Slr+6oKmxE4h3C+cPx38E35edh5NTeD5mnvpGVJAYLw//XJWUXDkLEd3daLr/e78PUfyR1MZ2J89MX8lNXYKDpse26/UhWj5qwuye9i5d8C97CraxZGKI/x0XF+O+Q4j3WcC4f2PEnHqG3rbKtn66XHO1XaCs35W5FK/ZQ2He95Oi6kjO3IbTyQ/aHUqpTotLX91UXf1vwsfuw8fZn9IDy87T96UwM+rZuPwdxKcGk3CzndoqGti+4oTVkeFzb/l6MmRVAYmsidyI/EBQ4kPiLc6lVKdlpa/uqgArwDu7n8363LXcbrmNA+MjaUpIJ7VnrcQFLSD3kFC3/JtHN5eSMGRs9YFLT+Bc/Nysn1mgauOb2M38uSoudblUaoL0PJXlzR38Fw8xIMPDnyAl93Gs1P788vKmbT49CTiR83EZn+Kn83J1x8fotHZbE3ITa+RlZNKZWAiu0J34ucZxKSYVGuyKNVFaPmrSwr1DWVWwiw+O/4ZxXXFzBoRRURYBK/zU3xd+wienMSA9N9TU+5ky/JjHR+w9ChVX60lK/BehGr2J6zhgUE/weZh6/gsSnUhWv7qsh4e8jAu4+Kjgx9h8xCenzGA96rHUtQrmdDgzYT41RFXsZ3D2ws5kVHSodnMN79mR9E9NHj3ZnX0djzEzn2D9BO9Sl2Olr+6rGj/aGbGzeSTo59w1nmWmwaEMjY+iPmVDyLiJHJmb2IPLCPQVsXGJYepPdvQMcEKMihYn0FO0DQ8HKXkR6UxNXYGAV4BHTO+Ul2Ylr+6Io8kPYKz2cni7MWICP95+xCyG0JJC3kQ39o0Qu+ezA3b36DZ2UTaB9ntf+ZPVwuuz55ma/1j2Ewzf4jajXg0808jH27fcZXqJrT81RXpF9iPGXEzWHJoCSX1JQwI92fuuL48eSoFZ6/+BPuuI6h/GDccW0rB0Uq2tvf+//Q/snNHHOU9B3GudxUNoZtIjZqsh3cqdYW0/NUVe2rEUzSbZt7NfBeAp6cm0tPPlxf4F2g4S1RKIxFnM4mvy+DApgIOtNepn2uKyfufJez3vZ9gKebd4N2IrYF/HvmP7TOeUt2Qlr+6YjH+Mdwz4B5WHltJTlUOPb0dvDBjIJ8V9mbfDf+Go3QjkY9Mok/6B4RJEVuWHm2X4//PrXqJtMon8GquZc/wIBxB2xgfkcKA3gOu+1hKdVda/uqqPJb0GF42LxZmLARg9ogoRvftxdzsYTjjp+Ff+gHhTzzAgM2/wc9Wz9p3syg8XnndxjcH1/DlV/E4HYHcMMGb1dVpiO0cT43QVb9SV0PLX12VIJ8gHhryEGl5aWSWZuLhIfzmrmE0t8AzzscwvkH0al5C8KxpJG1+GS+PBla/tZ/ThyvaPLar9DgbFqZzxmsUg32O8XKdJ15BWxgTPpakkKTrMDul3IeWv7pqcwfNJcg7iFd3vUqLq4W+wX4suGUg63KaWDf4t0h9OeGRWwkadQPD0l7Ez9HIF29ncTKr7JrHdJ2rZf0rqzhmUuhXvYMTM1M40fg52Gr55xFPXsfZKeUetPzVVfN1+PLc6Oc4UH6A5UeXA3D/jbGk9A/h2W02iqYvQsoOET2+lF43DmPo+l8Q4OXki3ez2LHyOC3NV3cYaFNDM+teWk6OcwSJJRsY/KsH+e2ub/EO3sLMuJkMDx3eHtNUqlvT8lfXZEbcDMZFjOPNjDcpqS9BRHhtzlC87DYe3R5I4y1v4pG/hZhJdfS+aSxJ654nPqCCjPV5/OW1vVQUXtlVwHL3l/Dn59ZxsqovA05/Rur/nc9/bitGgr7A027jmVHPtPNMleqe2lT+InK3iGSLiEtEki+x3XQROSIix0XkhbaMqToHEeHFsS/S7Grm1d2vAhAe4M3rPx5G9plqnjkyGNe0V5Bja4gakk3vmTfT97MXGVX7JdWldSz91S7WvJ1Jzr7SH3wgrNHZzMlvy/jinf2sfe8ApqKK5INvMeHXj/KXEhsbT+3Ao0cWjyY9QrhfuBXTV6rLs7fx9w8As4H/utgGImID3gGmAvnAHhFZbYw52MaxlcViesYwf+h8Fu5byKbTm0iNSWXywDBemH4Dv153mMSwaTx9Tx9kxeNEhBfht+BJbO8sYfTBbZTP/BdOnbKz7kA5dk8PfHt64uPviTFQmleDcRlsrgb65a5lgN9hopd/TKbTk/9Yto2gxHUE+kXy0OCHrP4nUKrLatPK3xhzyBhz5DKbjQGOG2NyjDGNwFLgjraMqzqPeYPnkRCYwC+3/5LS+lIA5qfEM2dkNG+kHePzxlHwyHrEZiMg50XinxhI0NhBRHz6EmPWPMHohq+I71VJL49KpKocU1pIfPk3DN//Jj/a9nNGT2im74r1lPkE8I9/yqB31FbOST4/H/0zvO3eFs9eqa6rrSv/KxEFnP7e43zgxg4YV3UAh83Baymvcd8X9/H8ludZNHURdg87/3v2EPIq6nh62X6a7hrK7Mc3w9bfYd+9iOjwZhqfnU5Vjp3q3Vvw37Hi757TJ6iRnmOi6fn477EPTqG+sZnHP96L0+MEth7ruSXuFqb2mWrRjJXqHi5b/iKSBlxox+oCY8yqKxhDLvA9c5Gx5gPzAWJjY6/gqVVnkNgrkQVjF/Dithd5L/M9nhrxFF52G398aDSPf7yXZ5dnUj5zII9N+18w9gnY/Bs8D64ixLuM4InQ0uiBcQE2HyQyCfv05yBhMgDF1U4eWbyHg0UlxCR9io8jnAU3LrB2wkp1A5ctf2PMlDaOkQ/EfO9xNHDmImMtAhYBJCcnX/APhOqcZiXMYm/xXhZlLWJk6EjGR43H39vBB/NG88yy/byy9hAFled4fvoN+Nz6Otz6OtSVISWHsJ+rgNBB0DsevncRluwzVTzyYTo1ziZSx21lX0Upb978If6e/hbOVKnuoSMO9dwDJIpInIh4AvcCqztgXNXBfnHjL+gX2I9nNz1LZmkmAF52Gwt/MpKHxvflw+0nmfL6Jr48UIQxBvyCIW4iDLoDghO/K/4aZxPvbDzOj9/bgQjMnZFHenkajw97XI/pV+o6EWOufYEtIncCC4EQoBLYb4z5BxGJBP5gjJnZut1M4A3ABvzRGPPK5Z47OTnZpKenX3M2ZY3iumLmrZ/HWedZ3p/2PkOCh3z3s1055fzH6mwOF9Uwum8vJg0IZWx8bxJC/SmrbaCw0smu3HI+3H6SGmczqf1DmDjqGG/uf5VpfabxWsprenlGpS5DRPYaYy566P1327Wl/NuTln/XVVRXxENfPkR1YzXvT3ufwUGDv/tZc4uLj3acYumePI4W117w96cPDufJmxLIbdjEgq0LSI1O5XeTfofD5uioKSjVZWn5K0udqT3DvC/nUeGs4PkxzzMncQ4if//ef3ltA3tOVnCyvJ6wnl5EBPjQN8iPEH8HH2Z/yFv73mJM+Bjenvw2XjYvi2aiVNei5a8sV1JfwoKtC9hZuJPJsZN5adxLBHoHXvJ38qrzWLB1AftL9zO1z1RenvAyvg7fDkqsVNen5a86BZdxsTh7MW/tewsfmw/T46Zze7/bGRYy7LtXAk2uJvYU7mFD3ga+yPkCu4edBTcuYGbczB+8WlBKXZqWv+pUDlccZnH2YtJOpeFscRLoFYiP3QeHh4OzDWepaazBx+7DzbE388zIZwjzC7M6slJdkpa/6pRqG2vZcGoDWWVZNLU00ehqxNfuS2p0KuMix+kpG5RqIy1/pZRyQ1da/no+f6WUckNa/kop5Ya0/JVSyg1p+SullBvS8ldKKTek5a+UUm5Iy18ppdyQlr9SSrmhTvshLxEpBU5ZneMaBANlVofoYDpn96Bz7hr6GGNCLrdRpy3/rkpE0q/k03Xdic7ZPeicuxfd7aOUUm5Iy18ppdyQlv/1t8jqABbQObsHnXM3ovv8lVLKDenKXyml3JCWfzsSkZ+JiBGRYKuztDcR+Y2IHBaRLBFZKSKXvlhvFyUi00XkiIgcF5EXrM7T3kQkRkQ2isghEckWkX+1OlNHERGbiOwTkTVWZ2kPWv7tRERigKlAntVZOsgGYIgxZihwFPh3i/NcdyJiA94BZgCDgJ+IyCBrU7W7ZuDfjDEDgbHAk24w57/5V+CQ1SHai5Z/+/kd8BzgFm+qGGP+aoxpbn24E4i2Mk87GQMcN8bkGGMagaXAHRZnalfGmEJjTEbr/RrOl2GUtanan4hEA7cAf7A6S3vR8m8HInI7UGCMybQ6i0UeBtZZHaIdRAGnv/c4Hzcowr8Rkb7ACGCXtUk6xBucX7y5rA7SXuxWB+iqRCQNCL/AjxYAvwCmdWyi9nepORtjVrVus4DzuwqWdGS2DiIX+J5bvLITkR7AX4CnjTHVVudpTyJyK1BijNkrIpOsztNetPyvkTFmyoW+LyJJQByQKSJwfvdHhoiMMcYUdWDE6+5ic/4bEZkL3ApMNt3zGOJ8IOZ7j6OBMxZl6TAi4uB88S8xxqywOk8HmADcLiIzAW+gp4j8yRjzgMW5ris9zr+dichJINkY09VODnVVRGQ68DqQaowptTpPexARO+ffzJ4MFAB7gPuMMdmWBmtHcn4FsxioMMY8bXWejta68v+ZMeZWq7Ncb7rPX10vbwP+wAYR2S8i71kd6HprfUP7KWA959/4XN6di7/VBOBB4ObW/9f9rSti1cXpyl8ppdyQrvyVUsoNafkrpZQb0vJXSik3pOWvlFJuSMtfKaXckJa/Ukq5IS1/pZRyQ1r+Sinlhv4fC0rLwvqcHgkAAAAASUVORK5CYII=\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAX8AAAD8CAYAAACfF6SlAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAIABJREFUeJzs3XWYlFX7wPHvM7Xd3cEmu0t3SXeDKAICdteLor7mT3197UBCRBQFpaQbFOmOpWNhu7t3Z2fm+f2x+EosEjs7s3E+18Ulu3P2OffI7D3PnLiPJMsygiAIQtOiMHcAgiAIgumJ5C8IgtAEieQvCILQBInkLwiC0ASJ5C8IgtAEieQvCILQBInkLwiC0ASJ5C8IgtAEieQvCILQBKnMHcDNuLq6yoGBgeYOQxAEoUE5cuRIjizLbrdqV2+Tf2BgIIcPHzZ3GIIgCA2KJEmJt9NODPsIgiA0QSL5C4IgNEFGSf6SJM2XJClLkqRTN3lckiTpa0mS4iRJOiFJUhtj9CsIgiDcHWPd+f8IDPyHxwcBoVf+PAbMNlK/giAIwl0wSvKXZXknkPcPTUYAP8nV9gOOkiR5GaNvQRAE4c6ZaszfB0i+6uuUK98TBEEQzMBUyV+q4Xs3HCEmSdJjkiQdliTpcHZ2tgnCEgRBaJpMtc4/BfC76mtfIO36RrIszwXmArRr106cLykItaDTG7iUXcLOhNOczD6JlUaBg5UFbrY2DArpiK+9+PDdlJkq+a8BnpEkaTHQESiUZTndRH0LQpMSl1XCJ9u3cyJlLU6KfCz1tlhV2VJomUOedRrFFvl8fVLGUeFBV78ePN56EkEOQeYOWzAxoyR/SZJ+BXoCrpIkpQBvA2oAWZbnABuAwUAcUAZMNUa/giD8LSGnlPc3HCA7aTMxhc2YWHx/je2UhjJsFHs4GfgHW3TL2JCwnEGBI3ix/dN42niaOGrBXCRZrp+jK+3atZNFeQdBuD3bzmTyxfJf6ZTvinO5N2ptHn4pO7ErScVGXYmtvxclsjVFOhtyZFcyHWJAAveiY5S4LuOrNuXICjXPt3meKdGTkaSapumEhkCSpCOyLLe7Vbt6W9tHEIRb0xtkvthyjtjdGxiU0wKLylyaXf4JP+ss3B5/GOuOHVH7+FyTzGWDgdyDpzi+7jxxcguk8uZ8sXQRh2IO87n8KQfTD/NRz/9gr7E34zMT6pq48xeEBspgkHnp1yOoYg8SVBSBa84JWpZsxmfav7Dt3RtJcevFfAWZJWz+Yh85BUq80vfhWrWI6cMVWDt5MavfN4Q7h5vgmQjGdLt3/qK2jyA0UB+sPY3T4eMEFUUQFL+ebkEJhK1aiV3fvreV+AEcPWwZ+0Ef2g7wI92rE0kOL/LtfAV+F9KZvGEyZ3LP1PGzEMxFJH9BaIC+3RFH2Y6duJYHExa3iK6PtMPnky9RWFre8bWUSgWdRoUy8LEYip2acTzyZV5cakH//UVM2TCVk9kn6+AZCOYmkr8gNDCrj6dycs0mfMsi8EvaSJd3JuBw7/haX7dZG3cGPdmCMjtfjnd+jdE77Rizs5iHNj3MiewTRohcqE9E8heEBiQhp5QVv6wnsrQ57pkH6P5Me2y69Tba9QNjXBn2bCsqLF052Xkag/dqmLS1hMc3P05ycfKtLyDUmrZCR1Wlvs77EclfEBqIKr2B1xbsomOBD/aFcXS51w6nweOM3o9PuBMDH4+hWOnC2Xteps8hiXGbC3l00xMUaYuM3p/wN73OwKa5p1j79XEMhrpdjCOSvyA0EDO2XaDLuSQUBonw8JP4PPBMnfUVEOVCzwkRZBu8uNTzWQYckmn9ZwLPbnuBKkNVnfXblMmyzPafz5F8Jo+ILl4oFHW710Ikf0FoAA4n5JG9Zg0KZQhuZWtp/9ZXdd5n867etBscSLIcRkq7sUz6XY961wE+2PdhnffdFO1fdYnzBzLoMCyI5l2967w/kfwFoZ6r0hv47Met+GtjsC06w6BPX7rtpZy11WFYECHt3Imz60VxcDQvrNFzavtSNlzeYJL+m4oT21M4ujmJqO7Vb7im0KR2+FYZqriQd4GjWUc5nnWcIm0RKoUKlUKFr60v3X27086jHRqlxtyhCsL/LNibQO/4PPRqG6KHVmLtHWqyviVJotfECHKSSzht/SztC15h+ooiXnN6k+YTmxPoEGiyWBqrxFO57F56gcAWrvQYH26y0hpNYodvsbaYxecWs/DsQvIqqg8c87H1wdXKFb1BT5WhivjCeLQGLVYqK/r69+WZ1s/gbVv3H70E4Z/kllTyxbSP8KIb9tI6Js3+3DxxpJWw/L+HcXWqIuK357jkLjHvsTCWjl2MperO9xYI1XLTSvjt4yM4uFkxelpb1BbKWl9T1PYBtHot807OY+GZhRRXFdPNpxsjmo2gtXtrPGw8rmlbrivnUMYhdiTvYM2lNWxO2MzE5hN5JOYR7DR2ZnoGQlP35ZpjBJUHoVdkMeSDJ8wWh4u3LT0fCGfbj2dxGvYcoSu/pufaC3zg8xHvdX/bbHE1ZOXFWjbMOoFao2Twky2MkvjvRKMd87+Qf4Hx68czO3Y2nbw7sXToUmb3nc3AoIE3JH4AK5UVPXx78GbnN1k7ai0Dgwbyw6kfGLl6pNjiLpjFmbQifNYvo8LKB/vQkzh7h5k1nvBOXkR28eJsQTiVHVoy6IhM7pql7E7Zbda4GiK9zsDGb09SWqhl8JMtsHM2/aenRpf8DbKBn8/8zPh148kpz2Fmn5l83vNzIl0ib/sanjaefNDtA34Z8gsKScHkjZPZlritDqMWhBvN/2EdSk1XlFXxjJn2hrnDAaDbuFDsXa044f0UkqeaRzcZ+Gjla2L9/x3aszyO9LhCek+KwCPIPNVTG13yTypK4osjX9DFpwsrhq+gh2+Pu75WtGs0vw75lTDnMF7880W+P/m9ESMVhJs7HJ9L26PH0Fo4EjigCrXGytwhAaCxVNFvanNKivQkDngfCww8tDqHN7a/Z+7QGoyze9M5+WcKrfr6EdbBfIfnNLrkH+gQyOKhi/m619e4WLnU+nquVq7MHzCfQUGD+PLolyw6u8gIUQrCP9v03UKKHO8B6RQD7n3c3OFcwzPYgXaDA4lLtKWi7yAiUsB+5QY2x4tPx7eSmVDEjl/O4xPuROdRzcwaS6NL/gBhTmFGXS5lobTgw24f0se/Dx8d/IjNCZuNdm1BuN6xxDzCz1xGp7YmeoxvvTxVq92gADyC7DkqjUIRbMV9Ow18t+xNMfzzD8pLtGz69iTW9hoGPBqFQnmT9HthC5xcXufxNMrkXxeUCiX/7f5fWrm34rVdr3Eo45C5QxIaqd9nzafA6R70iovc03e0ucOpkUKpoM/kSHRVMnFd3kVhoefRNQW89bvY/VsTg0Fm6/wzlBdXMfDxaKxsb7KX6MRSWDwe9s8GQ90WdxPJ/w5YqiyZ0XsGfnZ+PL/9eVJLUs0dktDInEzMI/TUebQWjoQPrf2wZV1y8rSh4/BgElJsqOo7EP8ccFyxhgNpR8wdWr1zZGMCyWfy6H5fKO4BN5ngPfAtrHgU/DvDpJWgqNulnyL53yEHCwdm9pmJLMtM3zkdnUFn7pCERuTPGXPIc+mNTplCv4H1867/ai37+OERZM9h3WikYAvG7DHw6ZLpovjbVZLP5HFwXTzhnTxp3u0mG0d3fAIbX4GIoTBhOVjW/Qogkfzvgq+dL292epPY7FjmxM4xdzhCIxGfWUSzE2cos/YgqI81ChPV76kNhUK6MvwDlzq8jqQ2MGFdKh/unGnu0OqF0oJKtsw/jbOXDffcrHTD/jmw/X1oOR7uXQBq06z5r/+vrnpqcPBgRjQbwdwTc8X4v2AUO75bSK5rL3SKHIaMqP93/X9x8rShw/AgEjJc0XftSHgqlCyfz+X8JHOHZlbV4/yn0Wn1DHwsuuYdvCeWwabp1Xf8w78BpemKLojkXwuvd3wdf3t/Xt31KsXaYnOHIzRgJZU63HZuocg+CLf2WpQ3WwlST7Xq44ebvx1HLKdg8FFx/84q/r3s3+YOy6wOb0gg9UIB94wPx8nT5sYGF7fBqicgoBuM+d6kiR9E8q8Va7U1H3b7kOyybGYeFx9zhbu3ZckmKq3aYaCS4fc2nLv+vyiUCno/GEllhURKp+ew0Mv03nSExSe3mjs0s0i9kM/h9fGEd/QkorPXjQ2yzsKyyeAWCeN/MdlQz9VE8q+lGLcYxoWP49dzv3Iu75y5wxEaIINBRr14Nhke7VAGpGJra23ukO6Kq68tbQYGEJfbDG2rMLqdkVn32zuUV1WaOzSTKi/RsnX+GezdrOgxvoZ6TGV58Ot4UFvDA0vA0sH0QSKSv1E82/pZHC0ceX//+xhkg7nDERqYvbtiUWsDkBUa+o3pa+5waqXdoECcvGw44fEUlfZqJm/NY9p685ShNgdZlvl9wVnKS7QMeCQajeV1Qzl6HSx/CApT4L6F4OBjnkARyd8oHCwceLHti8Rmx7I6brW5wxEamJzZH5Hm3R2tbRqhYYHmDqdWlGoFvSdFUFKuIbvDOHxzwWHTLxxPTzB3aCYR+3syiSdz6TomFDf/GkrB//4OXN4OQz4D/44mj+9qIvkbyfBmw2nt3prPj3xOYWWhucMRGoiU1Gycksspt/Ygul+QucMxCs9gB1r29uOCrgtFIQGM3avj7RWNv+Z/VmIR+1ZeIqilKzE9a7ijP7sW9s6Adg9D28mmD/A6jS75a8t17F5+kYKsMpP2q5AUvN7xdQorC/nh1A8m7VtouI7PnEOmZw90ilJ69epk7nCMpuOIYOzdrLgQ/Dgqg4p+Ow8xZ3/jLfymLdex+btTWDto6P1g5I3r+fMTYPXT4N0aBtaPEhiNLvlXafWc2Z3G7qUXMfURlRHOEQwOHsyis4vIKssyad9Cw6PXG7DfvYVs1xgcW2hRaUx7klNdUmuU9J4YQZHWgbQ2g+kVK7Nx24cUVWjNHZrRybLM9kXnKM6rpP9DUVjaqK9toNPCsqkgyzD2B1BZmCfQ6zS65G/jYEGHoUEknsol4USOyft/utXT6Aw6vo391uR9Cw3LofV/Uq6JAUlJvyE9zR2O0fmEOxHVw4fL1v0pcA1g0s40nl8719xhGd3ZPenEHc6i4/AgvEIcb2yw7R1IOwojvgHn+jO01+iSP0BML1+cvW3YtfQiOm3dVsa7np+dH2PDxvLbxd9ILEo0ad9Cw1K64CvSvDpRZZ+Nt1/9LuJ2t7qMboaNkwWXoiYTkaJGOvwDBxPTzB2W0eSmlbBryQV8I5xo0z/gxgYXtsD+mdDhMWg+wvQB/oNGmfyVSgU97g+jOLeCI5tNn4Afb/k4GqWGmcfExi+hZvkZudgkl1Jm40t4Vz9zh1NnNJYqej/YnGLJg8sRg5m0o4zp6z9BbzDtkGxdqNLq2TLvNGpLJX2nNkdSXDfOX5INq58C9yjoV/9OOmuUyR/AJ8yJ0PYeHNucRGG2aSd/Xa1cmRg5kY0JGzmfd96kfQsNw7FvZpLl1hEDOnr2bmfucOqUX6QzUd29SXHvi5UhiI6ntjJr9wFzh1VrOxdfIC+9lL5Tm2PjcN04vixXT/BWFMGYeWbZwXsrjTb5A3QdE4JCKbFz8QWTT/5OiZ6CjdpGnPsr3ECWZWy3rSbVqz3qgBKs7G5ysEcj0mVMCDYOas5GT2DkfgWLDn1BVnGFucO6a2f3pnFubzrtBgXi37yGIbtD8+DiZuj/Hng0N32At6FRJ38bRws6jggm6XQeFw9nmrRve40948LHsTlxM0lFTbu6oXCt83/spVIRhEFlT+c+Lc0djkloLFX0mRpDmcaLDJ9RjI49yb/WrDR3WHclN7WEnb9ewCfcifZDa5jAzT4PW96AkL7VY/31VKNO/gAxPX1xD7Bj99KLVJSa9oCJB5s/iEpSMf/UfJP2K9Rv2T/OIM2rEzp1OdFt6s/qj7rmG+FMq16epPrcQ4fLzUlJmceOC6a9KastbYWOTXNPobFW0f/hKBTXj/PrtNWncWlsYMQsqIfnL/+l0Sd/hUKi16QIKkp17F0RZ9K+Xa1cGRU6ijWX1pBZ2rBe5ELdMFRWYn36AjmuMbi10KBUNfpfwWt0Gh2Bs6OWC2ETmXCgmOmbf6CiyrQr8u6WbJD5/cezFGaXM+CRKKztaxiu2/FfSI+FYV+DnYfpg7wDRnnlSZI0UJKk85IkxUmS9GoNj0+RJClbkqTjV/48Yox+b5errx2t+/lxdk86qefzTdk1U6KmYJAN/HTmJ5P2K9RPJ5esoNAuBiQ13Xu1Nnc4JqdUK+j/TFf0aitslRPwyl/OjO2nzB3WbTmyOZHLx7PpMroZ3qFONzZI2g+7v4DWEyFyqOkDvEO1Tv6SJCmBmcAgoDkwXpKkmmY4lsiy3OrKn3m17fdOtRsShL2rJX8sPEdVpenuNHztfBkUNIhlF5ZRUFFgsn6F+qli2XxSvdqityrFt1njXNt/Ky6+dnTuZ0+uSwyjTnRk/snvic8pNXdY/yjxdC4H1lwmtL0HLfvUsDS3shhWPg4OfjDwv6YP8C4Y486/AxAny/JlWZa1wGKgfu1m4Mp28wcjKcouZ//qSybt+6HohyjXlbPswjKT9ivUL5W5eVgk5lPoEIlPa8eaz3NtIlqO6YSvRQIlLiPpk5nIq6t3mHxF3u0qzC5j6/encfG2pdekiJr/3Ta+CgVJMOpbsKihmmc9ZIzk7wMkX/V1ypXvXW+MJEknJElaLkmSWXa1+IQ5EdPTlxPbU0i7aLq78FCnUDp7dWbx+cVUGUw76SzUH6fnzSXXpSWSpKRbjxbmDsesJEliwJsjsdTmEZ07mcv5y1gTW/92/laUVrHumxMgwaAnYlDXVH/pzGo4vhC6vQQBnU0f5F0yRvKv6fbl+rfwtUCgLMstgG3AghovJEmPSZJ0WJKkw9nZ2XcfUeYZMNR8qErnUc2wd7Hk95/OUmXC0g8TIieQVZbF74m/m6xPoZ7ZsIJEnzYYbMtxD7A3dzRmZ+nqTO+22cgKa+693J7/27yewrL6c3OkrzKwcc5JinLLGfxECxzcrG5sVJQGa5+vrtbZ84bpznrNGMk/Bbj6Tt4XuOYtXJblXFmW/zrL7TugbU0XkmV5rizL7WRZbufm5nZ30WRfgLn3VB+aUAO1xd/DP/tWmm74p7tvd/zs/Fh4dqHJ+hTqj+JLl1HmyZTZhhPUzq1JD/lcLeDpp2meswobQwRdCvP5cMNpc4cEVG/E+2PhWdIuFtBnciTeoTUUbDMYYNWToKuE0fNAqb6xTT1mjOR/CAiVJClIkiQNcD+w5uoGkiRdfYLxcOCsEfqtmWsotHkQ9nwFh2reXesT5kSL3r6c3J5C0uncOgvlagpJwQMRDxCbHcupnIaxukEwnri535Dl1hoJBZ26RZo7nHpDkiTaPT8K/6RtROS1J/7IQQ4n5Jk1JlmW2b/qEhcOZNJxeDBh7T1rbrjvG7j8Jwz4D7iGmDRGY6h18pdlWQc8A2ymOqkvlWX5tCRJ/ydJ0vArzZ6TJOm0JEmxwHPAlNr2e1OSBAM/gtABsGFadVW9GnQe1Qxnb5vq8zaLTVNjfGTISGzUNiw6u8gk/Qn1h7T7TxJ824JjBS4+tuYOp16x6TmAFg6ncM49Svf8YL766RBanfnOwj6yMYGjm5OI6uFD20E1VOoESD0Cv78LkcOg7RSTxmcsRlnnL8vyBlmWw2RZbibL8gdXvveWLMtrrvz9NVmWo2RZbinLci9Zls8Zo9+bUqpg7HzwiIZlU6o3XVxHpVbS76EoKsqq+OPncyZZaWCrsWVkyEg2JWwiu6wWcxpCg1J8/jwUq6m0bkZoOy8x5FMDj3c+Ifr0TxgMibRLl/h2mXmGf45tTeLAmnjCO3lyz/1hNf9bVRRVH8Ju5wXDZ9TrXbz/pPFuL7SwhQeWgpUTLBpXvQzrOq6+tnQe2YyEEzmc3mWalQbjI8ajM+hYfnG5SfoTzC9+3gyyXVsgoaBtl1Bzh1MvWYaH4zKoF90OzqZMk41+RyYH96eaNIbj25LY+1sczdq403tSxI0lmqG6Wue6F6Egubpap1UNm70aiMab/AHsvWDicqgqh4VjofzG3b0te/vhF+nE7mUXyU0tqfOQAuwD6OTViRUXV6A3NIxt7ULtyLt3k+DbChwqcfayMXc49Zbbv6Zjpa/AJe1Lyi2z2b/gPIkmmJOTDTK7l11kz/I4glu70e+h5iiUN0mNR36EU8uh12vg37DPXG7cyR/APRLuXwT58bB4QvXM/FUkhUTfqVForFRs/u4U2gpdnYd0b9i9ZJRmsCdtT533JZhXyelTSMVKKq3DCG7tLoZ8/oHa2xvnSQ/S7WQJBzy/okRVxLpZJ+q0Iq+uSs/meaeJ/T2ZFr18GfBo9M3rLSUfgg0vQ7M+1Wv6G7jGn/wBgrrDyNmQuKd6C/Z1ewCs7TX0e6g5+Zll7Fp8oc7D6eXfCxdLF7HjtwlI+n4G2a7RSChp00kM+dyK62OPobSzZcrOQnZFfUiWQsuWeafZuyIOg5FP/yrILGPFJ0e5dDSLrmND6DYu9MYqnX8pyYKlD4K9d/Vwj6KGzV4NTNNI/gAxY6uPUju9Eja/Vj12dxW/CGfaDQ7k3P4Mzu5Nr9NQ1Ao1I0NGsjNlJxmlGXXal2A+siyj37ufy36tkG20uAc0jG3/5qR0dMT1iScIvwwOmSXkB79PjoeaY1uSWDfjOKWFlbe+yC3IsszZveks+c+h6g1cT8bQqq//zT+V6atg2dTqYeP7F4G1c61jqA+aTvIH6PIsdHoaDsyBPV/e8HD7IUH4hDmy89fz5KQU12koY8LGYJANrLzYMA+0EG6t/PQJKJaosIkkoIWzGPK5TU4TJ6Ly9uKZHWr2OOZRKC3ApZcXaRcLWfT2fo5tTUKvv7uloLmpJWyYdYI/fjqLR4Ad97/RgaCW/7ChVJarl4wn7oZhX4FnzF0+q/qnaSV/SYL+70P0GNj2Dhz/5ZqHFQqJ/o9EY2GtYuOck3V6+IufnR9dvLvw28XfxMRvI5X0w0xynSNRoKF1p4a3CchcFBYWuD//PC5plXQ/J1HguYM951czaFprvEMc2ftbHEveO8jZvem3NUcnyzI5KcVs+f40i98/SFpcIZ1HN2P4C62xdbrF2bq7Pque5O32IrS8zzhPsJ6Q6mslvXbt2smHDx+um4vrKuGXcRC/C+77GSKGXPNwxuVCVn52FL9IZ4Y81aLmJV9GsDVxKy/9+RLf9P6Ge/zuqZM+BPM52bUVhwLHU+bWmqc+63vzFSTCDWSDgfjRYyjOy2DK5GJezSsmwe1zpk8ZQ8LJHPb+Fkd+RhkqtYKgVm54hzri4G6Fg2t1/Z2yIi2lBZWkXSwg/kQOxbkVqCyUtOzlS6t+/lja3EYphtjF1XOEMeNg9NwGs55fkqQjsiy3u1U7lSmCqXdUFnDfQvhpRPVY3sTlENTjfw97BjvQ7d5Qdi6+wKH18XQYFlwnYfT064mLpQsr41aK5N/IVF6+gCJPT2lMNL7R9iLx3yFJocD95WlUPvwID531YkZz+DbxFXYcCeOetjEERLuQGV/E+f0ZXDycycVDNa8IUqoV+EU40XZgAMGt3LCyq+H0rZpc3Aarn67OCyNmNpjEfyeaZvKH6prbE5bDD4Ph1/EweQ34/F1vLvoeH7ISiji0PgEXX1uatXY3eghqhZqhwUNZdHYReRV5OFs2jokkAVIXzCbfKQwFVrTq0Mzc4TRItl27YtO9O313HGNJqJKfXSSeWDuOfN/NOHn44xnsgGewAz3uD6OkoJLCrDIKs8tRKCWs7S2wttfg6GGN2uIOV+acW19dGcA9svomUXWbbxgNTNO+HbF2hkkrwdoFFo6pLgV9hSRJ3DMhHI8ge7b9cKbOJoBHhIxAJ+tYf3l9nVxfMI/ynTuJ941GVurxixRv6nfL/eVpUFrGG+cjWW9nxWXLYrTzBiEX/r37V1JI2Dlb4hvhTFR3HyK7eBMQ7YKbv92dJ/5Tv1Uv6fSMgclrwdLByM+o/mjayR+qdwE/uApUltXDQLl/l3lWqZUMeiIGC2s162edoKzI+AXgQp1CiXKJYnXcaqNfWzCPqvRUpPQy8p1a4BxqgUrd8NeEm4tlWBiOY0YTsOU0bap8eMs7AENVHqVzB9ZYsuWuyTIc/A5+ewR8O8CkVQ26dMPtEMkfwDkYHlwNsh4WDL/mRWXjYMHgJ2MoL65i07cn0VcZv9rgyJCRnM8/z9ncuqt0LZhO1qLZlNj6opScaNG+buaLmhLXZ59FUqt55YgXxZTwkHd35NJsDLO7w/lNte+gorB6mGfDNAjpVz0HaNn4D9sRyf8vbuHV7/ba4uo3gKs+VroH2NPnwUjSLxWyfaHxK4AOChqEWqFmVdwqo15XMI+irVtJ8G2BjExwjPHnipoatbs7Lg8/jGL7fl7SDCHF4ixDLR4hyeACv94HW96o3oh1N5IPwrf3wNm10PcdGL8YNOatv3Q29yy7U3fXeT8i+V/NqwVMXAllubBgaPURbVeEtvegw7Agzh/I4MjGBKN262DhQG//3qyPX49Wb5qzBYS6oc/LRU4qJMM9BmsfCWv7xjlZaGouD01F5eFB92XniXQMp9JvBwPKXuKUz72wdwbMaAtHFoDuNn9/ss5V1/r6vl/10u8p66vX8ivMlxJ1Bh3fxn7LA+sf4LPDn2GQ6/ZMA5H8r+fbFiaugJJs+PHaN4B2gwMJ6+jBgTXxN11adrdGhoyksLKQHSk7jHpdwbTyls+jUuOEUuFP87b+5g6n0VBYW+M+bRqVp8/wfwW90MqleIZvYmT8KC4PWFC9aGPtczCjDfzxAVzeUV3N92p58XBoHvxyP8zuXN2m17/hmUNmP3g9vjCeBzc+yDfHv6FfQD9+HPgjCqlu03PT3OR1O5IPws+jwNajetbfwQeoPtR59VfHyEooZvgLrfAOqeFsz7ugN+jpv7w/zV2bM6P3DKNcUzC9C6Pv4XxpGElB4xn/VkecvUUJZ2ORZZnECRPRJiRw9OuH+OD0l1gUjsC2sg/rn+2GTfIO2P05JO0D2QBKDVg5g15b/Ud7pWS7gz9EjYRxJm3zAAAgAElEQVSuL4CNi3mfFLD+8nre3fcuGqWGNzq9wcDAgbW6ntjkVVt+HaqXgS4cAz8Mqn4DcApAqVYw6IkYfvv4CBtmn2DMy21x8qz9L7hSoWRw8GAWnllIfkU+TpaNe6VBYyRXVqK7mElih7GoHPQ4eVmbO6RGRZIkPN/4N/FjxtJzSxb7OvVmh7SelHhf3ljtxOfj+iCF9q2ewE3aX13Ftzy/+k1AqQFHfwjpCy4h9WLTllav5eNDH7Pk/BLauLfhk3s+wd3adHNEYtjnn/h1qF4FVFFYvRnsyjJQK1sNw55tiUIhse6bWKMtAR0aPBSdrGNLQs3nDgv1W8nmpegNlsjqMEJaeYpCbnXAsnlzHMeNI/+XX3jTcyoe1u64N1vOytg4Fh9KvtLIAcIGQL//qz5mcchnMPBD6PQkuIbWi8SfWZrJlE1TWHJ+CVOjpjJvwDyTJn4Qyf/WfNrAlHWgK6/+BJBVvRzTwc2aIU+1pKxQy/qZsVRV1r44W5hTGCGOIay7vK7W1xJML3vVErJdwlGgJqKNr7nDabTcXngehY0Npf/9go97fES5IRef0JW8veYkp1ILzR3eLcVmx3L/+vu5VHCJL3t+yUvtXkKtuI1aQ0Ymkv/t8IyBKRsAqfoNIPUoAB5B9vR/JIrspGI2f3cKw12Wmf2LJEkMDR7K8ezjJBcnGyFwwVRkWaby5CXi/GOQ1Xq8QhrvzlBzUzk54f7SS5QdPEjgvkRe7fAqRYoT2Hht5OlfjlJUUXfVeGtrVdwqpm6aipXKikWDF9EnoI/ZYhHJ/3a5R8BDm6prAi0YDgnVRzAGtXSjx/hwEk/l8uei87XeAzAkuLrCqCj30LBUHv4DuRjKbaPwjLBDKQq51SnHe8di1bIlmR99zFivgUyInIDOdgcZhu28tCTW6Kd+1ZbOoOOjgx/x5p43aevRll+H/EqIk3nLfItX6J1wDoKHNleXhFg4Gi5uBSC6hw/tBgdydm86B9fF16oLTxtP2nu2Z/3l9UbfTCbUndxlCyix9UOFA1FtAswdTqMnKRR4vvsO+sJCsj7/gmntptHNpxsWnqvZnrSbL7bV/XGst6uwspAntz3JwrMLmRg5kdl9Z+NgYf5PhiL534ZyrZ7UgnISckop0rghT9kArmHV1UBPV5/E1WFYEBFdvDi8PoFTO1NvccV/NjR4KAlFCZzOPW2M8AUTKDlwjAsB0cjIBEabf/lgU2AZEYHzxIkULF1K1YlTfNLjE0Icm2Hnv5BZ+7awNjbt1hepYxfyLzB+/XiOZB7h/7r8H9M7TEelqB+LLOtHFPVIYVkVey/lcDAhj4PxeVzKLqHiuno+GpWCMIdXmWH5XwKXPURxQS72XR+h54Rwyou07Pz1PNb2GoJb/cPxcP+gb0BfPtj/AesuryPaNdoYT0uoQ7qEs+gzq8iJiMbBR3H7NeOFWnN99lmKNm8m7Y03CPrtN+b2n8vUTQ+RGLCAV9apCHJ9gGgf89xlr7u8jnf3voudxo75A+bTyr3Vbf1c4dp1yDodjqNG1ml8YpMX1ZN1x5ILWLQ/iXUn0qjUGbBUK2jj70SUtz3ONhY426hRKRTklWrJKa3kcnYpsZfT+Fj/KT2VsSx1egy3AS/TOcCZtV8dJze1hBHPt8LrLjeBvbj9RY5nH2fb2G0oFaIqZH2W++lLpCzYza6u/6H90CA6DhX1+02pZNdukh99FJdHH8H9X/8iqyyLyRunklKUiUXOE6x65AF8nUy350Kr1/LZ4c/45dwvtHFvw2c9P8PVyvW2frb81GkSJ0zAqkUL/Bf8iHQX5SbEJq/bdDQpn/fWneFYUgE2GiVj2/oyuo0PMT6OaFT//D9eb5A5l9KZ86ufYFzuXGYuzOBtu6m8cE8zbDdWsX7WCUa/3BZnrzvfBDYoaBDbkrZxKPMQnbw63e3TE0ygYMcukryjkVAQ3EIUcjM12+7dcBg7htzv52PXty/uLVvyw8Dvmbh+MpnM4v5FJax76Ckcrev+E9mlgktM3zmd8/nnmRg58Y6Wcepyc0l59lmULs74fPXlXSX+O9Fkx/zTC8t5YfExRs/aS2p+Oe+NiOLAv/vywagY2gY43zLxAygVElH+boQ/vRR9m8k8rVrDdMNcpq0+wTonPQYJ1s2IpbSw8o7j6+HbAxu1DRvjN97N0xNMRC4tpDKhmESfKBQ2Blz9bM0dUpPkMX06Kg8P0l57HUNlJZ42niwetohmDmEU2n3PyEXvUq699WHvd8sgG1hybgn3rbuPrLIsvun9DdM7TL/txC9XVZH6/Avo8/LwnTEDlXPdHwDUJJP/xpPp9P98JxtOZfBMrxC2T+vJpM6B2Frc5QchhRLlsK+g6wsMqdzIn81+IamshO+lEooKK1k7IxZt+Z298CxVlvT2683WxK2i0mc9Vrb+R2SdCr1FJEEt3MSuXjNR2tnh9d57aC9fJvvzLwBwtXJlyYgFtHbqQ65mDQN+fZiMkmyj93069zSTNk7i/QPv086jHStGrLjjM7kzP/yQssOH8Xr/PayiooweY02aVPKv1Ol5Z81pnlx0lGB3W7a9eA/TBoRjc7dJ/2qSBP3ehT5v45+6gZ2BPzCkqwe/WVaQnVLCqpmx6HV3tglsUNAgirXF7EndU/v4hDqRt2kdeU7BqGQrwlp6mzucJs22W1ecHhhP3oIFFP/5JwAWSgsWDPuCnq5TyTPEMui3Yay4sMooy6gzSzN5d9+7jF83npTiFN7r+h6z+s667fH9v+T99DP5v/yK80MP4TBsWK3jul1NJvnnlFQy7tv9/Lg3gUe6BbHs8c74u9TBJFD3l2Dwp6gubuKtwrd5fVIoO+x1ZMcVsnh27B296Dp5d8LRwlEM/dRXBgNlp5Kql3gqDPhGiGJ85uY+fToW4eGkv/Y6VZnVZdclSWLGkJd40P9rKspceXvfm0zZNIXtSdvRG+68LEtycTLv7nuXQSsGsfLiSiZETmDdqHWMDBl5x2WYi//4g8wPP8SuX1/cp/3rjmOpjSYx4ZuSX8aD3x8krbCcORPbMjDas2477PBo9U7gVU/ST/80kS/9zCffnCHsdD4/zjvB1Edb3tZl1Ao1/QP6s/byWsqqyrBWiyqR9Yn20AYMRQqKHKLwCLJEY9kkfp3qNYWFBT5ffE78mLGkvfwK/j/MR1JWr5Z7pU8P7JRefL7/J06zk+eynsPX1pcxYWPo5NWJCOeIGtfgy7JMSkkKfyb/yR9Jf3A06yhKScmokFFMjZ6Kr93d1XEqP3Wa1H9NwzI6Gu+PP67zCd7rNfqlnhczi5n0/UHKtDrmT2lPu8C6n0j5n9MrYfnD4NOW4rGL+eyTU7jl6qnq4MzzU1ve1vjw4YzDTN08lY+6f8Tg4MEmCFq4XTn/nkDS+gT2dXqPLmOb0bqv2NlbXxSsWEn666/j+tSTuD333DWPzdwexyebz9AqIhkbt32cyIkFwEZtQ7hTOPYae2w0NsiyTHJxMolFiRRpiwAIcQyht39v7gu/r1ZVOLVJSSRMmICkVhO0ZAkqt7vbE1QTsdQTiMsqYdy3+1ApFSx5vDORXiY+lDlqFEhKWD4Vu2X38uory5j54WlUB3P5j/I4rz/Y6pZvAG082uBh7cHG+I0i+dczhQdiifftAkBQjPF+eYXacxg1krJDh8iZNRtNs2Y4DBnyv8ee7hWCg5WaN1craVfRmtXjgrhQFMvhjMPEFcSRUZZBaWEpBtmAr50vg4IGEeQQRHef7vjb1/50tqrMTJKmPgRVOvx/+MGoif9ONNrkn1lUweT5B1EqJJY+3pkgVzOdqNR8OIz7GZY+iOXScTz18lK++88p1Pvz+NzhDP8a+c8z+wpJQf/A/vx67leKtEXYa0z8BibUyJB6lso0HaldorBwlHFwtzJ3SMJVJEnC89130CYnkf7a66i9vbFu3fp/j0/sFICTtYYXlhzjiQUX+O7B7rU+Qet26PLzSXr4YfQFBfj/+CMWIeYr7tYoJ3wLy6uYPP8gBWVafpzawXyJ/y8Rg2HcAkg/jtWqB5j4fCRWSgVFW9KZtfXiLX98UOAgdAYdfyT9YYJghdtRumY+BtSgCqdZCw+xxLMeUmg01WvmPT1JefoZtCkp1zw+pIUXC6Z2IKu4kmEzdvPn+aw6jUdfUEDyo49RlZSM76xZWMWYt3RLo0v+FVV6HvvpMJeyS/h2Ujuz1fW4QcQQGPM9pBzC+fepDHskDDeDgvNrEvj1QOI//mi0azQ+tj5sSthkomCFWyn4cydZLqEo0RDawsvc4Qg3oXJywm/OHGSdjuSHH6Eq89oE3yXElbXPdMPb0YqpPx5i5vY49HVQDlqXnU3ipAepvHABn6+/wqZjB6P3cacaXfLPLq4kJb+cT+9tSbfQO1tvW+eiRsLouZC0l8BTz9NlVBBhVUo2/Hqe/Zdzb/pjkiQxIHAAB9IOUFBRYMKAhZrI5YWUxuUTFxCFrDTgE3Z39ZsE07AIDsJvzhx02dkkTZlCVda1bwB+ztaseKoLw1p488nm84yfu5/kvDKj9V+VmkrCxIloU1Px+3YOdj17Gu3atdHokr+fszXbXrqHEa18zB1KzWLGwtAvIG4rrYs/ILi9O50qVPz3u6P/+IIbGDgQnaxjW9I2EwYr1ET752LkUiWldlG4hVij0ojCe/WddZvW+H03t3qydcpUdNnX7vS11qj46v5WfHZvS86mFzHwy538ciCp1ofClJ8+TcIDE9DnF+D//TxsOneu1fWMySjJX5KkgZIknZckKU6SpFdreNxCkqQlVx4/IElSoDH6vRmr+v7L2HYK9HkL6dQy+rkvxMHHhnsKFLww9xAllTWXgYhwjiDAPkAM/dQDRZtXU2blhkZ2J7KVn7nDEW6Tddu2+H87h6r0dBImTqTy0qVrHpckiTFtfdn0Yg9a+jny+sqTjJy1h0MJeXfVX+G69SQ+MAEUCgJ+WnDNhHN9UOvkL0mSEpgJDAKaA+MlSWp+XbOHgXxZlkOAL4CPattvg9ftJej8DKojsxnR/gDWVmpaJVXx2uKadwH/NfRzKOMQOeU5ZghYAMCgp/h4HHH+1au0AqLr2dCi8I+s27fH//vvMZSUkjDuPoq3b7+hjY+jFQsf7sgX97Ukq6iSe+fs4+lFRzmbXnRbfchaLVmffUbatOoNXEHLl2EZEWHsp1Jrxrjz7wDEybJ8WZZlLbAYGHFdmxHAgit/Xw70kZr68ghJgn7vQfRY7Pa/ybCBhTjLChSH8/jlQFKNPzIwcCAG2cC2RDH0Yy7687uoyFSQ4R6F2lnGwU0s8WxorNu0Jmj5MjQBAaQ89TTZM77BoL22eKJCITGqtS9/TLuH5/uEsv18FoO+2sWUHw6y91LOTcu0lMfGEj9mLLnfzcNx3DgCfpiPyqV+nuxmjOTvAyRf9XXKle/V2EaWZR1QCNzwf0SSpMckSTosSdLh7GzjV9+rdxQKGDETfDvgffAROve2JKxKyapl5zifUXxD81CnUJo5NBO1fsyodMMiDJIGhTKUsJZilU9DpfbyImDRQuyHDSVn5kzihw2nZOfOG9pZa1S82C+Mva/2Zlr/ME6mFPLAdwfo/vF2Ptl8jnMZRciyjC47m4z33ifh/vHoi4vxnTUTr/97F0lzZ2cIVOkNnEot5MA/LAAxllqXd5Ak6V5ggCzLj1z5ehLQQZblZ69qc/pKm5QrX1+60uamz9CUJ3mZXUk2zOuNXFXJKsUCki+WsdNfyc/Tut8wfzH7+Gxmx85m273barW9XLg7KaNbcD43jHORTzH8uVb4NTdhuRChTpTs2kXmB/9Bm5CATdfqyqC2PXogqW+sxV9RpWf9iXRWx6axJy4Hz8JMJiTupnv8IZQGHRWDR+H6wvP4+LihVNx8cKNMqyOjsILE3DIuZZdwOaeU02lFnE0vQqszEOVtz/rnut/V8zFleYcU4OpZL1/g+pOT/2qTIkmSCnAA7m4WpTGydYMHliJ9359B9u/wk+2/aZ2i5YNVp3h/3LVF4PoH9mdW7Cy2Jm5lQuQEMwXcNMn5iZQkVHAxKhpZZcA7VCzxbAxsu3fHZs1q8n5eSO6PP5Dy9DMoXVywHzAAi4hwLIKDUXt7Y6ioRC4tpX9hFl0z9lN0fC/6y5fQqdTsCenIT35dSVO7wcxDaJQKHK3V2FupsbNUYZChskpPRZWe3BItxdct7HC0VhPuYceULoHE+DjQyq/uX1vGSP6HgFBJkoKAVOB+4IHr2qwBJgP7gLHAH3J9rShnLu6RMOpbLBePZ1jUVn472Iv0XZnsaZNN15C/a380c2xGiGMIWxK2iORvYpW/L8JQpqTcNgrfUBuU6ka3UrrJkjQaXB5+COfJD1KyaxeFK1ZSsGIFckXFTdtbt2uLzZhROIwYQYyrK2OKK7mcXUJ8TikJuWUUlGkpqqiiuEKHQpKwsLPAUq3E2UaDh70lHvYW+DtbE+xmi7NN3R8xeb1aJ39ZlnWSJD0DbAaUwHxZlk9LkvR/wGFZltcA3wM/S5IUR/Ud//217bdRihgMPV7Ba+fHtG/VEumYM3MWnKT1mz2w1vz9TzUgcACzjs8iszQTDxsPMwbctBRv20SZtQca2YUIscSzUZJUKux69cKuVy9kg4GqtHS08ZepyshAYWmFwsYGpaMDls2bo7C0vOZn3ewscLOzoGNw/ZzgvZ5RCrvJsrwB2HDd99666u8VwL3G6KvR6/kqpB2j/aUnueixmNaZ8PnKM7xxX4v/Nekf2J+Zx2eyLWmbuPs3lcoSSs6kcdG/uvhXQHTD+AUX7p6kUKDx9UHjW083jNaS+Nxa3yiUMHouCgcvhtu8g0YlUbYzk8Pxf8+NBzsEE+oUyuaEzWYMtGnRn9xIebaKLPfmqF3B3kUs8RQaNpH86yNrZxg7H/uKM/QN3YGPXsmP805Qpf/7DOABAQM4lnWMzNJMMwbadJRuXIpeYYlCEUK4WOIpNAIi+ddXvu2g1+tE5H+Jh3chYbkG5q+78L+H+wf2B2Br4lZzRdh0GAwUHzxBhls4ClQ0a1HHx4AKggmI5F+fdX0BgnowlFdAJZO+NYW0K8XfghyCCHcKF7V+TEBOO0pxiswl/yhktQGvZvWkTLgg1IJI/vWZQgmj5mJpoWOAzxLcdArmzj3+v4f7B/YnNjuWjNIMMwbZ+FX+sRhDuZIKmyg8w21RqsSvjdDwiVdxfWfvBUO/ILxyKU4u6TgnlPP7vupqGv0DxNCPKZT8uZ1SG2/UshPNW9f+DFdBqA9E8m8IokZC1ChGqf+NQWFg/+KLaLU6Ah0CCXcKZ0vCFnNH2HgVpVF8MZ+LAdVH7gVEiSWeQuMgkn9DMfhTrKyV9PZcimMl/LzwNFA99HM8+7gY+qkj+mOrKc/RkOUahcYdbBwtzB2SIBiFSP4NhY0rDP2cGP1S1NYZFB/KIT29WAz91LHSravQKa1RKYKIbNM4N/sITZNRdvgK1zJotVSeO0fFuXPoCwsxFBVjKCur3hru4IDSyQnLyAgswsKQlHdw6ljzEUjRo7jv1Pv8WPY1y+ac4Nl3uvxv6GdS80l196SaIm0ZJccuku7eGQklIWKJp9CIiORvJNqEBIo2baJk+59UnDmDXFX194NqNQprawylpaD7u5qfwsYGq1atsOvXD/shg1Ha2d26o4H/xSGuA82dt3Ehsz+Hd6XSP7A/M47NIKM0A08bkaCMRb60naJUFZdbRiNb6HEPtDd3SIJgNCL514JBq6Vw1SryFy+m8sxZAKxatsTpwUlYtWiJZVRzVC4uSJaWSJKELMsYSkvRZWdTceoUZUePUnbgIBnvvEPmhx9i178/zpMnYxUddfNO7Tyh79v0WTeNk6pO7F1+kcH/7ssMZrA1cau4+zeiiu3LMFSo0Fo1xz/SAcU/1GcXhIZGJP+7YCgtJX/xYvJ+XIAuOxuL5pF4vPYqdgMGoPa8+Z23JEkobW1R2tpiERSEw7BhyLJMxanTFK5cQeHadRStXYv90KG4vfA8Gl/fmi/UdiqK2F8Zbfia9dlvcH5zBeFO4WxO2CySv7EYDJTs2UehXRAq7IhqI5Z4Co2LmPC9A7LBQOHq1VwaOIisTz7FIjQE/x/mE/TbbzhPnvyPif9mJEnCKiYaz7feImT7H7g88TjF27ZxedBgsr/++trho78oFDD0SwJUx7G0PE3Cvgz62w8VG76MKSOW4kQdcQHRyMj4NxdLPIXGRST/21Rx5gyJ4x8gbfqrqDw9CfjlF/znz8emc2eMdRa90tYW9xdeoNnmTdgNGkjOrNkkTJiINjHxxsae0Uidn2K8/Sfo0KPa0wxkxJp/I9EdWUVFjpo8l2isfCQsbW880k8QGjKR/G9B1unImfMt8ePuQ5uaitd//kPgksVYt2ldZ32qPTzw+fhjfL78Am1iIpdHjaZwzZobG/Z4BUtbDeG2aylPrqK7dogo82wkpX9sRKtxRCX5E9MuwNzhCILRieT/D7TJySROepDsL7/Erl9fmq1bi+PoUUgK0/xvsx84kODVq7CKiiLtlelkfz2Da06/tLRH0fdtBlj/TIWymIhzvTiTeZbUklSTxNdoFaVRciaTFM/qifdmLcRpaULjI5L/TZTs2kX8mLFUxsXh/ckn+Hz+OUpH0x/Yrfb0xP/7eTiMHk3OrFmkvTIdg1b7d4NWE8CrBQMdZqIsU9MivRdbE8SGr9qQz22gKMOSRJ8YZFsdzt425g5JEIxOJP/ryLJMztzvSH7scdReXgStXIHDsKFGG9e/G5JGg9cH7+P2wgsUrV1L8uOPY/jrYGmFAsXgj4jSHMCgSaBNan+2n91ltlgbg4rtq9BXWaC3CCe4hatZ/+0Foa6I5H8Vg1ZL2rSXyf78c+wHDSLw119uvtzSxCRJwvWJx/H674eU7T9AyrPP/f0JwL8T+uajuc/+MxSyEocTIaQUp5g34IZKW0rxoZPkOYWixIKotmK8X2icRPK/Ql9cTPKjj1G0fj1uL72E92eforC2NndYN3AcORKv9/6P0l27SH3xpf8tBVX2ewcXdSZK68OEZ3dk08E/zRtoQ3XpD4pT1Vzyi8Gg1OMTZvqhPkEwBZH8garMLBInTqLsyBG8P/4I18cerdcf9R3HjsXjzTco+f130qa/imwwgFMAcodHmWrzNRWqUrK2SddODgu3pergKirz1RQ5RuMQrEalvoPaS4LQgDT55K9NSSXxgQeoSk7G79s5OAwfbu6QbovzhAm4vfQSRRs2kP311wCo7nkZhUZBpfNmHPK9OLjvrJmjbGAMekp27KDUxhsVzrTu0MzcEQlCnWnSyV+blETipEnoS0rwX7AA265dzR3SHXF59BEcxo4hd863FKxaBdbOKLr/iymqxeRbZXB4TRIGvcHcYTYcKYcoSdCT6B0DQHCMu5kDEoS602STf+XleBInTkIuLyfgxx+wiok2d0h3TJIkvN56C+uOHUl/8y3KDh9G3eUJ7NUuJHmuhQINZ/akmzvMBsNwYg3FmRake8aAa5U4uEVo1Jpk8tcmJ5M0eTKyXo//TwuwjIw0d0h3TdJo8P36KzQ+PqQ8+xxVuYWo+v6bHqqdpNtdYt+ai2grdLe+kEDZ9g1olQ4olEFEt68fq7wEoa40ueRflZlJ0pSpyFotAT/+gGVYmLlDqjWlgwO+s2ZiqKwk9V/TsIi5l/YGF/b7r0ZbYiD292Rzh1j/5Vyk+Hwe6e4tAIgWJR2ERq5JJX9dXh5JDz2MvqAAv3nzsAgNNXdIRmMRHIzXu+9SfuQI2d/MxKfPm/ipz5PidJqjW5IoK9Le+iJNmHxuPUVpliT6tqDKplLs6hUavSaT/A2lpSQ/+hhVKSn4zZndIMf4b8Vh2FAc77uP3HnfIxe40LnKll0BK9Fp9RzdVENlUOF/KnetRltpTZVlGAExTvV6qa8gGEOTSP6yTkfKiy9Sce4cPl99iXX79uYOqc54vP4aFpGRpL32Or3DnqTYMosC17Oc3JlCcV6FucOrn4ozKT56kTzn5ihQ06FLuLkjEoQ61+iTvyzLZLz7LqU7d+H59lvY9exp7pDqlMLCAp/PP0OurESz7BAxlSoOeP0CMhxaH2/u8Oqn8+spTrUg3rcFVWotXs0czB2RINS5Rp/8c+fMoWDZclyeeByncePMHY5JWAQF4f7yNEp37+b+9BbE2xShcIvj3N508jNKzR1evVN1cAXl+ZYU20djF6JEoWz0vxaC0LiTf9GGDWR/9TUOI4bj9vzz5g7HpJweeACbLl0IWXUS31yZDPs5KDUKDq4Vd//XKM+nZN8xChxCUUjWdOnacJf9CsKdaLTJv/zkSdJeex2rtm3xfO+9JjeBJ0kSXh/+B0mjYdpGC3bZleHinkjckSyyk4rNHV79cWELxSkakrxboFfoCInxMndEgmASjTL5V2VmkvLU06hcXPCd8TUKjcbcIZmF2sMDzzffxDu5jDaxCmz5BAtrJQfXibv/v+iPr6Q405Ic15bgo0VtIQq5CU1Do0v+hvJyUp56GkNpKb6zZ6NydjZ3SGZlP3QIlt26Mn6HgROU4umVQMKJHDLiC80dmvlpyyjdvYdi20AUCic6dWs8+z4E4VYaXfLX5+djKC/H+9NPsQxv+Lt3a0uSJHzffRelQon/HitaFf8HSxsVB9dcNndo5nfpd4qTFKR4tkEv6WkpqngKTUijS/5qb2+CV6/Crncvc4dSb6h9fKh4aDQtL8ukZJbh759A8tl80i7mmzs0s5JPraEw3YpMj1ZoPUqwsFKZOyRBMJlaJX9JkpwlSdoqSdLFK/91ukk7vSRJx6/8WVObPm8rLrW6rrtocFo9+Trx3krUB22JyfoQa3s1+1dfbroHvugqKdu1lWKNPyhdaNVZ1PIRmpba3vm/Cvwuy3Io8PuVr2tSLsv/396dR0dZ5/kef39TVVkJCWRfISEBWcIakKVJUJYB3MD6L+EAABXeSURBVBC0tV0accEZdWbU6VanOfY4ffW21+5rq2jrYE8r2vQA2iCIIE0U2bcQSCTsJBASsofsVLb63T9Ie+wre0ieJPV9nZNTVcmT+n1+cM4nv3rqqecxw1u/usbVUroZT09vjs6fjJcTmjLrSIg/TeHxKvIPuenq/8TX1OS2UBQ2EhcuJozvfqf7UOpS2lr+dwCLW+8vBma18flUO5ow6QG+HCXUHPcj/sgr9Ojlxa7P3XP1b75dSVWBLwXhw6kNrsTH3z2PCFPuq63lH2aMKQRovb3YpY+8RSRdRHaKiP6BsMiosFFsnh5JjZ+Nxl1OBiYWUJxbzakD5VZH61hNTpzb11Mt0RhHKImj9Ipdyv1ctvxFJE1EDlzg646rGCfWGJMM3Ae8ISIXPKxCROa3/pFILy0tvYqnV1fCQzyYPOg2Ft8MzgpPwrf8ip7B3uz+PNe9Vv8nvqb6hIui0BG4cPEPNydbnUipDnfZ8jfGTDHGDLnA1yqgWEQiAFpvSy7yHGdab3OAb4ARF9lukTEm2RiTHBISco1TUpdyW/xtbBkM+TE9ce5tIik+j9K8GnL3l1kdrcOY7JVU5vuSHzmK6l7l+Af4Wh1JqQ7X1t0+q4G5rffnAqv+/w1EpJeIeLXeDwYmAAfbOK66RvGB8QwKHsyy2cG0NHnQc/UrBIb6nN/373KD1X+TE+e29VRJH1yOMGJGXvAANaW6vbaW/6vAVBE5BkxtfYyIJIvIH1q3GQiki0gmsBF41Rij5W+h2+JvY5dvHseGxNFwpImhUSeoOFPH8YwLvnDrXk58RXWOoTAsmRZp5o5p46xOpJQl2lT+xphyY8xkY0xi621F6/fTjTGPtt7fboxJMsYMa7397+sRXF27GXEzsImNbfeNAgf4LPs/9I7wZffnubhaXFbHa1fmwAqq8v0oiEimPKiYgAB/qyMpZYlu9wlfdXlBPkGMjxxPRvNOskaOpOlMM0P8MqksrufonmKr47Wfxjqc29dTZu+PsQfSZ4R7n/dJuTctfzd1W7/bKKovIvf+OXgEuPBctpDgKD/2rMmlpbuu/g+vpSYXikKTafJo4K7pKVYnUsoyWv5u6qaYm/B3+FPhuYfNyeNx1bQwsGEz1WVOjuwosjpeuzCZSzlb0IuisBGUhBTQ0093+Sj3peXvprzt3syIm8HXp9Oouf0JHFEtOFZ9QGi0D3vW5tLS1M1W/7UlnNu9lWLHQIzNj9gRQVYnUspSWv5u7M7EO2loaSAo8gRrhk6EFhcJRWuprWgge+sZq+NdXwdWUJ3rRWH4GJz2Wu6Zpmd9Ve5Ny9+NDQ4aTEJgAn/N+5zalH/CJ7EJr7RPCY9ykL7uJE0NLVZHvG7MvqWUFYZQFpxEYWgevXwDrY6klKW0/N2YiDArYRZZZVncNNKHTwfchN2rhfhD/8O56kayNp62OuL1UXaM2oyDFAaOBnGQODbc6kRKWU7L383dEn8LNrGxq3Q9Z4bOw29wE94ZG4kKN+z7ax4N9U1WR2y7rOVUnfQlP3I8pb6nuX/SdKsTKWU5LX83F+wTTEp0CqtPrObBlBv4c5/JePZsInbXIhrqm9m3Ic/qiG3jctGyZylnqvpT7xdNWXQxPTx7WJ1KKctp+StmJcyi3FnOWTI53Pcn9BjRiE9uFrG968j8Op/66karI167k5upyS7lTNg4WmhiwqRBVidSqlPQ8ldMjJ5IiE8Inxz9hAcnJfFx8DT8IpxEffM2LU0tpK89aXXEa5fxERWngygMH0NO0AHuHDrF6kRKdQpa/gqHh4PZibPZVrCNhMhGdoTcjd/wZnzO5tHXu4jszQVUltRbHfPq1VfQuPsLTrWMwmXz4VxcHd52b6tTKdUpaPkrAOYkzkFEWHFsBfNuTuKPvtMJ7FdH5NcL8bDBrlU5Vke8elnLqDzm4EzkOGodZdye+iOrEynVaWj5KwAiekQwMWoiK46tYMqgYL4JvBPvJIOPqSau8VuO7y2h+GS11TGvnDGYPYvJLxlAZeAADoVnMK3fRKtTKdVpaPmr7/x4wI8pd5azOf8b5k5K4n2P6QQPPEvE1g/w8oIdK493ncs9FuylZn8up4JScdGI90BPHB4Oq1Mp1Wlo+avvTIicQIRfBJ8c/YRZI6JY73cH9v42fANaiD+TRsGRyq5zsfeMxZScDKcw/EaOhKTz2PjZVidSqlPR8lffsXnYmJM4h52FOymsP839KYN5q+U2woYUEXZgNT28mtj+l+Od/5TPzioat6/gmC0V4+EgJ/owI8OGWZ1KqU5Fy1/9ndmJs7GLnaWHl3LvmBjWec2kMcYf/7524rP/zNmierI3F1gd89IyPqLiqJ2CyFSK/Y4waegERMTqVEp1Klr+6u+E+IYwre80Vh5fiQsnc1MH8ppzFmEDTxNUmE6Io4Ldn+firOukp31oacZs/y+OVqfQ4N2LjKgtzBt2l9WplOp0tPzVDzww8AHqmupYdWIVD47rw1deUznbO4ygYQ7itr9L47lmdq/JtTrmhR1eQ1VWGXkhKZzzKMcR60OIb4jVqZTqdLT81Q8khSQxPGQ4Sw4twcsuPJySyMv1swmOO0mgZw0xdQc4sCmfijN1Vkf9AbPjHY6duZGqgH7sjd3Iw8PvtjqSUp2Slr+6oPsH3c/pmtNsKdjCT8f1YbvXjzjtl0DoiFpi932M3cPF5mVHO9ehn/l7qdudydHgGTRRy/Gwb5keN9nqVEp1Slr+6oKmxE4h3C+cPx38E35edh5NTeD5mnvpGVJAYLw//XJWUXDkLEd3daLr/e78PUfyR1MZ2J89MX8lNXYKDpse26/UhWj5qwuye9i5d8C97CraxZGKI/x0XF+O+Q4j3WcC4f2PEnHqG3rbKtn66XHO1XaCs35W5FK/ZQ2He95Oi6kjO3IbTyQ/aHUqpTotLX91UXf1vwsfuw8fZn9IDy87T96UwM+rZuPwdxKcGk3CzndoqGti+4oTVkeFzb/l6MmRVAYmsidyI/EBQ4kPiLc6lVKdlpa/uqgArwDu7n8363LXcbrmNA+MjaUpIJ7VnrcQFLSD3kFC3/JtHN5eSMGRs9YFLT+Bc/Nysn1mgauOb2M38uSoudblUaoL0PJXlzR38Fw8xIMPDnyAl93Gs1P788vKmbT49CTiR83EZn+Kn83J1x8fotHZbE3ITa+RlZNKZWAiu0J34ucZxKSYVGuyKNVFaPmrSwr1DWVWwiw+O/4ZxXXFzBoRRURYBK/zU3xd+wienMSA9N9TU+5ky/JjHR+w9ChVX60lK/BehGr2J6zhgUE/weZh6/gsSnUhWv7qsh4e8jAu4+Kjgx9h8xCenzGA96rHUtQrmdDgzYT41RFXsZ3D2ws5kVHSodnMN79mR9E9NHj3ZnX0djzEzn2D9BO9Sl2Olr+6rGj/aGbGzeSTo59w1nmWmwaEMjY+iPmVDyLiJHJmb2IPLCPQVsXGJYepPdvQMcEKMihYn0FO0DQ8HKXkR6UxNXYGAV4BHTO+Ul2Ylr+6Io8kPYKz2cni7MWICP95+xCyG0JJC3kQ39o0Qu+ezA3b36DZ2UTaB9ntf+ZPVwuuz55ma/1j2Ewzf4jajXg0808jH27fcZXqJrT81RXpF9iPGXEzWHJoCSX1JQwI92fuuL48eSoFZ6/+BPuuI6h/GDccW0rB0Uq2tvf+//Q/snNHHOU9B3GudxUNoZtIjZqsh3cqdYW0/NUVe2rEUzSbZt7NfBeAp6cm0tPPlxf4F2g4S1RKIxFnM4mvy+DApgIOtNepn2uKyfufJez3vZ9gKebd4N2IrYF/HvmP7TOeUt2Qlr+6YjH+Mdwz4B5WHltJTlUOPb0dvDBjIJ8V9mbfDf+Go3QjkY9Mok/6B4RJEVuWHm2X4//PrXqJtMon8GquZc/wIBxB2xgfkcKA3gOu+1hKdVda/uqqPJb0GF42LxZmLARg9ogoRvftxdzsYTjjp+Ff+gHhTzzAgM2/wc9Wz9p3syg8XnndxjcH1/DlV/E4HYHcMMGb1dVpiO0cT43QVb9SV0PLX12VIJ8gHhryEGl5aWSWZuLhIfzmrmE0t8AzzscwvkH0al5C8KxpJG1+GS+PBla/tZ/ThyvaPLar9DgbFqZzxmsUg32O8XKdJ15BWxgTPpakkKTrMDul3IeWv7pqcwfNJcg7iFd3vUqLq4W+wX4suGUg63KaWDf4t0h9OeGRWwkadQPD0l7Ez9HIF29ncTKr7JrHdJ2rZf0rqzhmUuhXvYMTM1M40fg52Gr55xFPXsfZKeUetPzVVfN1+PLc6Oc4UH6A5UeXA3D/jbGk9A/h2W02iqYvQsoOET2+lF43DmPo+l8Q4OXki3ez2LHyOC3NV3cYaFNDM+teWk6OcwSJJRsY/KsH+e2ub/EO3sLMuJkMDx3eHtNUqlvT8lfXZEbcDMZFjOPNjDcpqS9BRHhtzlC87DYe3R5I4y1v4pG/hZhJdfS+aSxJ654nPqCCjPV5/OW1vVQUXtlVwHL3l/Dn59ZxsqovA05/Rur/nc9/bitGgr7A027jmVHPtPNMleqe2lT+InK3iGSLiEtEki+x3XQROSIix0XkhbaMqToHEeHFsS/S7Grm1d2vAhAe4M3rPx5G9plqnjkyGNe0V5Bja4gakk3vmTfT97MXGVX7JdWldSz91S7WvJ1Jzr7SH3wgrNHZzMlvy/jinf2sfe8ApqKK5INvMeHXj/KXEhsbT+3Ao0cWjyY9QrhfuBXTV6rLs7fx9w8As4H/utgGImID3gGmAvnAHhFZbYw52MaxlcViesYwf+h8Fu5byKbTm0iNSWXywDBemH4Dv153mMSwaTx9Tx9kxeNEhBfht+BJbO8sYfTBbZTP/BdOnbKz7kA5dk8PfHt64uPviTFQmleDcRlsrgb65a5lgN9hopd/TKbTk/9Yto2gxHUE+kXy0OCHrP4nUKrLatPK3xhzyBhz5DKbjQGOG2NyjDGNwFLgjraMqzqPeYPnkRCYwC+3/5LS+lIA5qfEM2dkNG+kHePzxlHwyHrEZiMg50XinxhI0NhBRHz6EmPWPMHohq+I71VJL49KpKocU1pIfPk3DN//Jj/a9nNGT2im74r1lPkE8I9/yqB31FbOST4/H/0zvO3eFs9eqa6rrSv/KxEFnP7e43zgxg4YV3UAh83Baymvcd8X9/H8ludZNHURdg87/3v2EPIq6nh62X6a7hrK7Mc3w9bfYd+9iOjwZhqfnU5Vjp3q3Vvw37Hi757TJ6iRnmOi6fn477EPTqG+sZnHP96L0+MEth7ruSXuFqb2mWrRjJXqHi5b/iKSBlxox+oCY8yqKxhDLvA9c5Gx5gPzAWJjY6/gqVVnkNgrkQVjF/Dithd5L/M9nhrxFF52G398aDSPf7yXZ5dnUj5zII9N+18w9gnY/Bs8D64ixLuM4InQ0uiBcQE2HyQyCfv05yBhMgDF1U4eWbyHg0UlxCR9io8jnAU3LrB2wkp1A5ctf2PMlDaOkQ/EfO9xNHDmImMtAhYBJCcnX/APhOqcZiXMYm/xXhZlLWJk6EjGR43H39vBB/NG88yy/byy9hAFled4fvoN+Nz6Otz6OtSVISWHsJ+rgNBB0DsevncRluwzVTzyYTo1ziZSx21lX0Upb978If6e/hbOVKnuoSMO9dwDJIpInIh4AvcCqztgXNXBfnHjL+gX2I9nNz1LZmkmAF52Gwt/MpKHxvflw+0nmfL6Jr48UIQxBvyCIW4iDLoDghO/K/4aZxPvbDzOj9/bgQjMnZFHenkajw97XI/pV+o6EWOufYEtIncCC4EQoBLYb4z5BxGJBP5gjJnZut1M4A3ABvzRGPPK5Z47OTnZpKenX3M2ZY3iumLmrZ/HWedZ3p/2PkOCh3z3s1055fzH6mwOF9Uwum8vJg0IZWx8bxJC/SmrbaCw0smu3HI+3H6SGmczqf1DmDjqGG/uf5VpfabxWsprenlGpS5DRPYaYy566P1327Wl/NuTln/XVVRXxENfPkR1YzXvT3ufwUGDv/tZc4uLj3acYumePI4W117w96cPDufJmxLIbdjEgq0LSI1O5XeTfofD5uioKSjVZWn5K0udqT3DvC/nUeGs4PkxzzMncQ4if//ef3ltA3tOVnCyvJ6wnl5EBPjQN8iPEH8HH2Z/yFv73mJM+Bjenvw2XjYvi2aiVNei5a8sV1JfwoKtC9hZuJPJsZN5adxLBHoHXvJ38qrzWLB1AftL9zO1z1RenvAyvg7fDkqsVNen5a86BZdxsTh7MW/tewsfmw/T46Zze7/bGRYy7LtXAk2uJvYU7mFD3ga+yPkCu4edBTcuYGbczB+8WlBKXZqWv+pUDlccZnH2YtJOpeFscRLoFYiP3QeHh4OzDWepaazBx+7DzbE388zIZwjzC7M6slJdkpa/6pRqG2vZcGoDWWVZNLU00ehqxNfuS2p0KuMix+kpG5RqIy1/pZRyQ1da/no+f6WUckNa/kop5Ya0/JVSyg1p+SullBvS8ldKKTek5a+UUm5Iy18ppdyQlr9SSrmhTvshLxEpBU5ZneMaBANlVofoYDpn96Bz7hr6GGNCLrdRpy3/rkpE0q/k03Xdic7ZPeicuxfd7aOUUm5Iy18ppdyQlv/1t8jqABbQObsHnXM3ovv8lVLKDenKXyml3JCWfzsSkZ+JiBGRYKuztDcR+Y2IHBaRLBFZKSKXvlhvFyUi00XkiIgcF5EXrM7T3kQkRkQ2isghEckWkX+1OlNHERGbiOwTkTVWZ2kPWv7tRERigKlAntVZOsgGYIgxZihwFPh3i/NcdyJiA94BZgCDgJ+IyCBrU7W7ZuDfjDEDgbHAk24w57/5V+CQ1SHai5Z/+/kd8BzgFm+qGGP+aoxpbn24E4i2Mk87GQMcN8bkGGMagaXAHRZnalfGmEJjTEbr/RrOl2GUtanan4hEA7cAf7A6S3vR8m8HInI7UGCMybQ6i0UeBtZZHaIdRAGnv/c4Hzcowr8Rkb7ACGCXtUk6xBucX7y5rA7SXuxWB+iqRCQNCL/AjxYAvwCmdWyi9nepORtjVrVus4DzuwqWdGS2DiIX+J5bvLITkR7AX4CnjTHVVudpTyJyK1BijNkrIpOsztNetPyvkTFmyoW+LyJJQByQKSJwfvdHhoiMMcYUdWDE6+5ic/4bEZkL3ApMNt3zGOJ8IOZ7j6OBMxZl6TAi4uB88S8xxqywOk8HmADcLiIzAW+gp4j8yRjzgMW5ris9zr+dichJINkY09VODnVVRGQ68DqQaowptTpPexARO+ffzJ4MFAB7gPuMMdmWBmtHcn4FsxioMMY8bXWejta68v+ZMeZWq7Ncb7rPX10vbwP+wAYR2S8i71kd6HprfUP7KWA959/4XN6di7/VBOBB4ObW/9f9rSti1cXpyl8ppdyQrvyVUsoNafkrpZQb0vJXSik3pOWvlFJuSMtfKaXckJa/Ukq5IS1/pZRyQ1r+Sinlhv4fC0rLwvqcHgkAAAAASUVORK5CYII=\n", "text/plain": [ "
" ] @@ -408,7 +408,149 @@ "source": [ "## Gaussian process with a mean function\n", "\n", - "TBA" + "In the previous example, we created an GP regression model without a mean function (the mean of GP is zero). It is very easy to extend a GP model with a mean field. First, we create a mean function in MXNet (a neural network). For simplicity, we create a 1D linear function as the mean function." + ] + }, + { + "cell_type": "code", + "execution_count": 21, + "metadata": {}, + "outputs": [], + "source": [ + "mean_func = mx.gluon.nn.Dense(1, in_units=1, flatten=False)\n", + "mean_func.initialize(mx.init.Xavier(magnitude=3))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We create the GP regression model in a similar way as above. The difference is \n", + "1. We create a wrapper of the mean function in model definition ```m.mean_func```.\n", + "2. We evaluate the mean function with the input of our GP model, which results into the mean of the GP.\n", + "3. We pass the resulting mean into the mean argument of the GP module." + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [], + "source": [ + "m = Model()\n", + "m.N = Variable()\n", + "m.X = Variable(shape=(m.N, 1))\n", + "m.mean_func = MXFusionGluonFunction(mean_func, num_outputs=1, broadcastable=True)\n", + "m.mean = m.mean_func(m.X)\n", + "m.noise_var = Variable(shape=(1,), transformation=PositiveTransformation(), initial_value=0.01)\n", + "m.kernel = RBF(input_dim=1, variance=1, lengthscale=1)\n", + "m.Y = GPRegression.define_variable(X=m.X, kernel=m.kernel, noise_var=m.noise_var, mean=m.mean, shape=(m.N, 1))" + ] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Iteration 10 loss: -6.288699675985622\t\t\t\t\n", + "Iteration 20 loss: -13.938366520031717\t\t\t\t\n", + "Iteration 30 loss: -16.238146742572965\t\t\t\t\n", + "Iteration 40 loss: -16.214515784955303\t\t\t\t\n", + "Iteration 50 loss: -16.302410205174386\t\t\t\t\n", + "Iteration 60 loss: -16.423765889507315\t\t\t\t\n", + "Iteration 70 loss: -16.512277794947106\t\t\t\t\n", + "Iteration 80 loss: -16.5757306621185\t\t\t\t\t\t\n", + "Iteration 90 loss: -16.6410597628529\t\t\t\t\t\t\n", + "Iteration 100 loss: -16.702913078848557\t\t\t\t\n" + ] + } + ], + "source": [ + "import mxnet as mx\n", + "from mxfusion.inference import GradBasedInference, MAP\n", + "\n", + "infr = GradBasedInference(inference_algorithm=MAP(model=m, observed=[m.X, m.Y]))\n", + "infr.run(X=mx.nd.array(X, dtype='float64'), Y=mx.nd.array(Y, dtype='float64'), \n", + " max_iter=100, learning_rate=0.05, verbose=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 25, + "metadata": {}, + "outputs": [], + "source": [ + "from mxfusion.inference import TransferInference, ModulePredictionAlgorithm\n", + "infr_pred = TransferInference(ModulePredictionAlgorithm(model=m, observed=[m.X], target_variables=[m.Y]), \n", + " infr_params=infr.params)" + ] + }, + { + "cell_type": "code", + "execution_count": 26, + "metadata": {}, + "outputs": [], + "source": [ + "xt = np.linspace(-5,5,100)[:, None]\n", + "res = infr_pred.run(X=mx.nd.array(xt, dtype='float64'))[0]\n", + "f_mean, f_var = res[0].asnumpy()[0], res[1].asnumpy()[0]" + ] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAY4AAAEKCAYAAAAFJbKyAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAIABJREFUeJzt3Xd8zPcfB/DXJzFir9QWq3ZCjBhV41Stqt20av5qNaVojRoNSrUVFC2NUdoaLTGKohQXRVUlalOrNWILITJIcu/fH+9EgoRc3N3n7vJ+Ph73cOOb+74vcve+z3p/FBFBCCGESC8X3QEIIYRwLJI4hBBCmEUShxBCCLNI4hBCCGEWSRxCCCHMIolDCCGEWSRxCCGEMIskDiGEEGaRxCGEEMIsWXQHYA3u7u5UpkwZ3WEIIYTD2L9//00ieiE9xzpl4ihTpgxCQ0N1hyGEEA5DKXU+vcdKV5UQQgizSOIQQghhFkkcQgghzOKUYxzC+cXFxSEsLAyxsbG6Q3F6bm5uKFmyJLJmzao7FGEnJHEIhxQWFoY8efKgTJkyUErpDsdpERHCw8MRFhaGsmXL6g5H2AnpqhIOKTY2FoUKFZKkYWVKKRQqVEhaduIRkjiEw5KkYRvyexaPk8TxmIgI3REIIYR9k8SRwpdfAp6ewKVLuiMRQgj7JYkjhebNgTt3gHbtgKgo3dEIIYR9ksSRQvXqwE8/AQcPAj16ACaT7oiEPTt37hwqV66Mvn37wtPTE926dcO2bdvQsGFDVKhQAfv27UNUVBTeeecd+Pj4oGbNmli3bt3Dn23UqBFq1aqFWrVqYc+ePQCAHTt2oGnTpujSpQsqV66Mbt26gYh0vkwhniDTcR/Tti0wfTrwwQeAvz8webLuiMSzDB3Kyd6SvL2BmTOffdyZM2ewcuVKzJ8/Hz4+Pvjxxx+xe/durF+/Hp999hmqVq2KZs2aYdGiRYiIiEDdunXRvHlzFC5cGFu3boWbmxtOnz6Nrl27PqyvduDAARw7dgzFixdHw4YN8ccff+Dll1+27AsU4jlI4kjFkCHA2bNAuXK6IxH2rmzZsvDy8gIAVKtWDa+88gqUUvDy8sK5c+cQFhaG9evXY9q0aQB4GvGFCxdQvHhxDBo0CAcPHoSrqytOnTr18Dnr1q2LkiVLAgC8vb1x7tw5SRzCrkjiSIVSwNdfJ982mQAX6dSzW+lpGVhL9uzZH153cXF5eNvFxQXx8fFwdXXF6tWrUalSpUd+bsKECShSpAgOHToEk8kENze3VJ/T1dUV8fHxVn4VQphHPg6fYeVKoH594N493ZEIR9SyZUt8/fXXD8cpDhw4AAC4c+cOihUrBhcXFyxZsgQJCQk6wxTCLFoTh1JqkVLqulLqaBqPN1VK3VFKHUy8jLN1jAUKAPv3AwMGADJGKczl7++PuLg4VK9eHZ6envD39wcAvPfee/jhhx9Qv359nDp1Crly5dIcqRDpp3TO2FBKNQZwD8BiIvJM5fGmAIYTUVtznrdOnTpkyY2cPv2UB8q//x7o1ctiTyuew4kTJ1ClShXdYWQa8vt2fkqp/URUJz3Ham1xENFOALd0xpAeo0cDTZsCAwcCKcYwhRAiU3KEMY4GSqlDSqlflVLVdATg6gosXQq4uQHr1+uIQAgh7Ie9z6r6G0BpIrqnlGoDYC2ACqkdqJTqD6A/AHh4eFg8kBIlgOPHgcKFLf7UQgjhUOy6xUFEd4noXuL1TQCyKqXc0zh2PhHVIaI6L7zwglXiSUoaoaFA4kJfIYTIdOy6xaGUKgrgGhGRUqouONGF64zJZOIB8qgo4PBhIG9endEIIYTt6Z6O+xOAPwFUUkqFKaX6KKXeVUq9m3hIFwBHlVKHAHwF4C3SXLjHxQX49lvg4kXgww91RiKEEHronlXVlYiKEVFWIipJRAuJaC4RzU18fDYRVSOiGkRUn4jsooOoQQNg5Ehg4UJg40bd0QgdLl68CIPBgCpVqqBatWqYNWuWRZ//4MGD2LRpU5qPlylTBjdv3rToOYVj+/BDYOdO25zLrsc47NmECYCXF9Cvn2z+lBllyZIF06dPx4kTJ7B3717MmTMHx48ft9jzPytxCJHShg3AjBnAvn22OZ8kjgzKnh347jvAzw/ImVN3NMLWihUrhlq1agEA8uTJgypVquBSKjuAtW/fHosXLwYAzJs3D926dXvimJUrV8LT0xM1atRA48aN8eDBA4wbNw4rVqyAt7c3VqxYgfDwcLRo0QI1a9bEgAEDpNS6eITBAEydygVabcGuB8ftXe3afBH6NW365H2+vsB77wHR0UCbNk8+3rs3X27eBLp0efSxHTvSf+5z587hwIEDqFev3hOPzZ8/Hw0bNkTZsmUxffp07N2794ljJk6ciC1btqBEiRKIiIhAtmzZMHHiRISGhmL27NkAgMGDB+Pll1/GuHHjsHHjRsyfPz/9AQqnZjIBuXIBw4fb7pzS4rCArVuBZs2AmBjdkQhbu3fvHjp37oyZM2cibypT7IoUKYKJEyfCYDBg+vTpKFiw4BPHNGzYEL1798aCBQvSLHa4c+dOdO/eHQDw2muvoUCBApZ9IcIhHTnC210fPmzb80qLwwJcXIDgYGDiRODzz3VHkzk9rYWQM+fTH3d3N6+FkSQuLg6dO3dGt27d0KlTpzSPO3LkCAoVKoTLly+n+vjcuXPx119/YePGjfD29sbBNHalUkqZH6RwWkTcor5+nRco25K0OCzglVe4y2PaNODYMd3RCFsgIvTp0wdVqlTBh0+Zl71v3z78+uuvOHDgAKZNm4b//vvviWPOnj2LevXqYeLEiXB3d8fFixeRJ08eREZGPjymcePGWLZsGQDg119/xe3bty3/ooRDWbIE2L0bmDIFKFTIxicnIqe71K5dm2ztxg2iggWJGjcmMplsfvpM5/jx41rPv2vXLgJAXl5eVKNGDapRowZt3LjxkWNiY2OpevXqtH//fiIiWrduHTVt2pRMj/2BdOzYkTw9PalatWo0ePBgMplMFB4eTnXq1KEaNWrQ8uXL6ebNm/Tqq69SzZo1aejQoeTh4UE3btyw2evV/fsWj7p9m6hwYaL69YkSEizznABCKZ2fsVrLqluLpcuqp9eCBUD//sCmTUDr1jY/faYiZb5tS37f9iUggKt2h4QAiZP7nps5ZdVljMOC+vQBihUDWrXSHYkQwpkNGwa89JLlkoa5ZIzDglxcgLZtec/yFN3TQghhESYTcOsWb/Xw8sv64pDEYQVGI1CyJHDokO5IhBDOZPFioEIF/RvKSeKwgpo1gaxZgaFDZZ9yIYRlRERwjbyKFYEXX9QbiyQOKyhQAJg0idcGrFmjOxohhDMYP56rHMyZw93iOknisJJ+/bgI4vDhQGys7miEEI7s8GFg9mzg3Xf1DYinJInDSrJkAWbOBM6dA7Zs0R2NsLYJEyZg2rRpTz1m7dq1Fq2gm5rLly+jy+OFt1Lx2WefWTUOYVlr13JPxqef6o6ESeKwombNgH/+Adq31x1JJhcQwDVhUgoO5vttyBaJo3jx4li1atUzj5PE4VjGjQOOHgVSKXWmhSQOK6tUif+9elVvHJmajw+Xyk1KHsHBfNvH57medvLkyahUqRKaN2+OkydPPrx/wYIF8PHxQY0aNdC5c2dER0djz549WL9+PUaMGAFvb2+cPXs21eMeN2HCBPTo0QPNmjVDhQoVsGDBAgBc8WHEiBHw9PSEl5cXVqxYAYAr9Xp6egIAvv/+e3Tq1AmtWrVChQoVMHLkSADAqFGjEBMTA29vb3Tr1g1RUVF47bXXUKNGDXh6ej58LqHf3bv85RMAihbVG8sj0rvE3JEuOkqOPM1PPxFlzUp09KjuSJyH2SUwjEYid3cif3/+12h8rvOHhoaSp6cnRUVF0Z07d6h8+fI0depUIiK6efPmw+PGjh1LX331FRER9erVi1auXPnwsbSOS2n8+PFUvXp1io6Ophs3blDJkiXp0qVLtGrVKmrevDnFx8fT1atXqVSpUnT58mX677//qFq1akRE9N1331HZsmUpIiKCYmJiyMPDgy5cuEBERLly5Xp4jlWrVlHfvn0f3o6IiHgiDik5oseHHxJlz050+bL1zwUzSo5Ii8MGXn2VK7R+9JHuSDIxg4F33Zo0if81GJ7r6Xbt2oWOHTsiZ86cyJs3L9q1a/fwsaNHj6JRo0bw8vLCsmXLcCyNypfpPa59+/bIkSMH3N3dYTAYsG/fPuzevRtdu3aFq6srihQpgiZNmiAkJOSJn33llVeQL18+uLm5oWrVqjh//vwTx3h5eWHbtm346KOPsGvXLuTLly+DvxVhSceOAbNmAT17ckUKeyKJwwYKFQLGjOH9yY1G3dFkUsHBQGAg4O/P/z4+5pEBaZU57927N2bPno0jR45g/PjxiE1jWl16j3v8PEopUDoXCGXPnv3hdVdXV8THxz9xTMWKFbF//354eXlh9OjRmDhxYrqeW1gPETBoEJA3L2CPw1GSOGxk8GDAw4On55pMuqPJZJLGNIKCeNOUoKBHxzwyoHHjxvj5558RExODyMhI/PLLLw8fi4yMRLFixRAXF/ewFDqAJ0qlp3Xc49atW4fY2FiEh4djx44d8PHxQePGjbFixQokJCTgxo0b2LlzJ+rWrZvu+LNmzYq4uDgAPBMrZ86c6N69O4YPH46///7bnF+FsILly3kd2OTJvF+MvZEihzbi5sZ/BP368Zxsb2/dEWUiISGcLJK6pwwGvh0SkuEuq1q1auHNN9+Et7c3SpcujUaNGj18bNKkSahXrx5Kly4NLy+vh8nirbfeQr9+/fDVV19h1apVaR73uLp16+K1117DhQsX4O/vj+LFi6Njx474888/UaNGDSilEBAQgKJFi+LcuXPpir9///6oXr06atWqhZ49e2LEiBFwcXFB1qxZERgYmKHfibCcCxeABg242rY9krLqNmQy8eyq4sV1R+L4MkuZ7wkTJiB37twYbssNpVORWX7f9iQ+nteD2Yo5ZdWlq8qGXFw4aRABaewiKoTIxI4dA7Zv5+u2TBrm0hqaUmoRgLYArhORZyqPKwCzALQBEA2gNxE5fAfssGHch3nmDM+2EiItEyZM0B2CsBEinvB3/DhXnMidW3dEadPd4vgewNO2PWoNoELipT8Ap+h87dgRuHIF+Ppr3ZE4NmfsZtUtPh64dy/59tmzwOnThJs3gbFjgfXreVGasLzFi4Fdu4AvvrDvpAFoThxEtBPAracc0h7A4sT1KXsB5FdK2dmMZvM1agS0acN/ILdv647GMbm5uSE8PFyShwWYTEB4OHD6NO8hExaW8lFCZGQ4jh93w5QpwO72AZjeNvhhGZe4OCSXb9FQxsVZhIfzjMuXXgLeeUd3NM9mx71oAIASAC6muB2WeN8VPeFYzuTJvG/HjBk8Q1SYp2TJkggLC8ONGzd0h+LQYmP5Qys+nneVy5WL+9ZPnEg+pkABN7zzTkn07g2c+MYH1Sb6Ah1GI6GLLybeH41Rps9h+mg08iRNeRZmGzWKv0QGBuovmZ4e9p44UlthlepXTKVUf3B3Fjw8PKwZk0V4ewOdOwM//cR19l1ddUfkWLJmzYqyZcvqDsPhHeoWgCW7fPDGNwa0aAHcuQMcnBEM018hWFp8JC5eBHLkAPLn5+qsdeoYUHxuEAoP8sW9Rq3xybrhWIruaPPJ59j+fhBeb2yA/Cmbr3ZtoFQpoHp13ZGkU3prk1jrAqAMgKNpPDYPQNcUt08CKPas57S3WlVpuXKF6N493VGIzObXX4kWLODrpu1GMrm7U/A4I3XoQPRqFiNdhzs1hZGKFyeqX5+oVi2icuWIcucm4iFcoq8K+BMBFFuvERFAi8v4E0Dk40P04IHe1ycyBk5Uq2o9gJ6K1Qdwh4gcvpsqSdGi3DUQHw9ERemORjg7Ih5Xa9MGmDsXiIsDfo01oHeOIFSb6IvG28ZhTVZf3JgdhI1RBly6BPz5J7B/Pw+S37nDYyCrBgaj571ALEYPZP1rN3aW6YGudwJh9A9Gx468bbJIn9mzgW+/dcAtptObYaxxAfATeLwiDjx+0QfAuwDeTXxcAZgD4CyAIwDqpOd5HaXFQUQUE0NUqRLRsGG6IxHOLCGBqF8/bi28+SbRkSNELVvy7QoViE68wS2ImBH+tGoV0aefEvXsSdS4cXILYtkyolUDjfQgvzvFT51O8QXc6edG0+mGcqcP1XSKzOFOkeu56vD27UTt2xOlKAAsHnP6NJGbG1GnTrojYTCjxaF1jIOIuj7jcQIw0EbhaOHmBtSty/sIDxtmf1UwheMjAt5/H1iwABg9Gnj5ZZ7ZR8STMwZWDUbWboEIbe2P0lMDMRsG7IABJUoAL76YvBBt717AbU4IZiMIjfxDkMcQhOaTDch6viYafhGCdn8FoWnXEFRaaEB0NPDrr7zlyaZNQOXKen8H9oYIGDAAyJ7dQaflpzfDONLFkVocRPzNw9WVa+8LYQ1TphANH040YQKRUkTe3kRnzxLtm2Kk+IK8P8nu3URfdTTSg3zuFL0x9f1Kzp8nWrqUqFcvopw5ierVS35s/36iunW5FdO3L9GOHUSFCxMVKMDXRbJFi/j3NHeu7kiSwYwWh/YPeWtcHC1xEPEbMUcOoqtXdUcinMnt2/xvfDxR9+78ju/RgzcVa9WKaASm0FcdH0sSRiNnmnQ8d9L+TjdvEr31Ft8eM4aTU5UqPBBfpQpvZHbggIVfnIO6c4eTaaNG3IVoLyRxOGDiOHmSyMWF6PPPdUcinMXq1fwBFRKSnDQ+/ZRo3Tqi/PmJ8uYlmj6dKDb2+c+1bRtRnjycIMaPJ9q8mahIEZ6JtWIFUUAAkcn0/OdxFhs3Ep04oTuKR0nicMDEQUT055/29Q1EOK4jR4hy5eLpsW+/nZw0fvyRr9esSfTvv5Y955UrRN268fO//DLRX39xl5iLC9E33/AxZ88SzZ9v2fM6kshI3RGkzZzEYe/TcTOV+vV51WhCgu5IhCO7fRvo0AHIk4cHpX/8kXfMHTsWaN2ad6Pcswew9PrJokWBpUv50nRfALaOCcauXTz99733gLlvBuNw9wCc7h+A5QMe20QrE5QruXqVJxv88IPuSJ6fJA478/PPQPnyXAZCCHPRFK4ldeEC0K0bsGQJMKdLMOrvDEB0NK8AnzyZZ/NZS7duwLsLfTD6oC9yhwRj2TJgRrtgdA7yxdmCPsjd1AevzPfFmvcTk0fSDo0+PtYLSjMi4N13eS1M/fq6o7GA9DZNHOniqF1VRNzFABD5++uORDii+5uNdCe7O81oZ6QsWYhG+hgpIhuvBF+zxsbBGHlV+sIS/nTXzZ0CfY0PB+c/bsgr1P9u60/kzrO6nNl33/H7eto03ZGkDTLG4biJg4ioY0cevLxzR3ckwhFdWmqkG8qd5hTyf5g0ksYYbM30MS8s/AT+1LIl0dix/KnzxhtES8vxYwljnftb0unTPEmgSROe3WavzEkc0lVlh8aMASIiuFKmEOmRkMBdIfv3A69/acB32f3wXvgkzHrgh9emGuDnpyGo4GCouYGAvz9G5A5E3G885jFxInBjZTBevxSI2BH+cJkXyN1VTmrPHu4aXLLEiYqZpjfDONLF0VscREQtWvDiKUtMlRTOb9o0/ibfti1RUxgpJo87TcvJXUSm7Rq6gYzGR7ugjBzTKy5GmtDESPdyckvIz4/o3i/cnXb0a+ftrnKE3gM4SskRkbYpU4DoaC5JIMTTnDsH+PsDDRsC9zYEY72bL9zWBeHNCga4HTdAvZm4T4bBYLugQkIePafBALd1Qfh6cQgKFwZyjguCz2YDpk4FlDLgjnsQyo8Mga/BgGrVbBemNe3axbsptm4N5M2rOxrLUpxonEudOnUoNDRUdxhC2ESHDsDWrbxfxsCoAHh09sFb8wzJ3SLBwfxBPnKk1jhTMpmAmTN5f+2FC7mG1nffcVfOnj2AA2yp81RXrgC1agH58gGHDwPZsumO6NmUUvuJqE56jpUxDjsWHc391kuX6o5E2KstW4B164CqVYHLl4FttUai9w8GHD+e4iCDwa6SBgAcOMAhnT4NdOwIfP45MGgQf0Nv1cqxp6PHxwNvvcV7s69a5RhJw1ySOOxYjhzAX3/xvHuTSXc0wh41bQoMHgyEhvIHrtHIi/28vHRH9nS1a/Ng8a5dQJujARhSPRgTJvDEkJgYIHK94y4IHDsW2LkTmDcP8PTUHY2VpHcwxJEuzjA4nuSnn3jQ8+efdUci7I3JxJMnKlUiKlmSS4w0bWrfUz4ft2ABD+bfye5OfcoZKVcuouNzeGDdtN3oUK+FiCg0lN+v776rOxLzwYzBcRnjsHPx8UDFikDhwrwbm0ptF3aR6Zw/D7RsCTRuzPtsVK3K/eqHDwMlS+qOzjyTJgG7JwXjl5y++Ib80ONeIOKXBWHaft6F0JGmsRJxiZcuXRxvYouMcTiRLFmAESO4y2rnTt3RCHvx8cfAf/8Bixfzh9SiRfwB62hJA+DX8tURA7IN9sPQu5PwbRY/NJ9sQIuDAbjyUzDeey/F1qp2WtPq/Hng2DH+Ytetm+MlDXPJdFwH0Ls3cOqU4880EZbx9988YaJcOeD6dZ6dVKKE7qgyTimg0uVgIDAQh9v7Y9DWQGw9bsCWbD74JYcvXp8fhI/yGTClVXDy1GI7cvcu0LYt/3v6tHMOhj9OWhwOIEcO3uLT0tVMheOhKQFY1jcYefMC//7LyWPdUPv8Fp5uiUUO438MQu8LE+FLQVjn5ov9fwNzDUFY7+aL3FPHIfp1DetRniEqipPGP//wtOLMkDQASRwOZe9eYNYs3VEInQ5n98GoA754NUswihQBCh4ORu9NDl5ZNnGxYJZXDVi3Dvg7nwH98gbhw5dDMGKTAUca+mEcJuFCGz+7ShoxMUC7dsAffwDLlgHNm+uOyIbSO4ruSBdnmlWV0gcf8N7k58/rjkToYjIRzWjPlWWn5fSncFd3ivvNuUp17N1LlD07b606rhG/1hNvJFfRDQ21jw3PJk7kLXKXLNEdiWVAquM6Z+I4f54Txwcf6I5E6GAy8Q5yRYsSzczLlWX/7e6clWWXLUuuueVX2Ug5cxL9M9dIcQW43lXnzkRRUXpjjI0l2rJFbwyWZE7ikK4qB+LhAbz5Jk+/jIjQHY2wpYQEoEEDoGtXoPLVYHSLDMSKiv4ou9k5K8u+/TawdHAI3NYFYfwOA9zdgWYTDbg9Nwjj24RgzRqgSROegmxLYWHAG28At27xzKkWLWx7fruR3gzjSBdnbXEQER04wO3Ezz/XHYmwpSVL+P/91SxGupPNna7+ZKRz5+jJKrROaN8+orVrifLk4b3S790jWreOFzyWKkW0Z49t4ggNJSpenOOw1TltCY7S4lBKtVJKnVRKnVFKjUrl8d5KqRtKqYOJl7464rQn3t5cB8eaW38K+xIfD3zyCfDCC4B3fAgi5gehyFsGlC4NHiwOCuIBZif04AHQuTMwahQXQTx0COjenWcy7d7NCwN37LBuDCYTMG0at/hcXPi8DRpY95x2L70ZxtIXAK4AzgIoByAbgEMAqj52TG8As819bmducYjMZ9Eibm1kzcrfeH19dUdkW9u38yD0O+8QzZrFv4vhw/mxO3eSS6wEBxMdPGj5848Zw+fs2JHo5k3LP7+9gIO0OOoCOENE/xLRAwDLAbTXGI9DSUgANm9OsaJWOI+AgIfjFgkJXJKjQ75gfBAXgMuXgXr1NMdnY82acdn1RYuAIkWAgQO5BTB/Pu9z4erKvyc/P6BmTaBXL+DChec7561bvM8JwOdbuBBYvRooVOi5X45zSG+GsfQFQBcA36a43QOPtS7ALY4rAA4DWAWg1FOerz+AUAChHh4eFs3E9mjFCv4W9OuvuiMRFvfYuMXuSTwltVMBIxUvThQdrTk+DR48IKpXjyhfPqL//iNaWGkKveJipN9+Sz7m7jojbWg8hbJn5+m8vXoRHT5s3nmOH+cWRr58RC1bWvIV2D84wnRcAG+kkji+fuyYQgCyJ15/F4AxPc+dGbqq7t/nbovmzXVHIqwiKXn48/avr7gYCSD65hvdgelz9izRJ58QxcURRW0wUrirO72W00hHjtAjyfb8eaIBA3gQe9Mm/tkjR7gS799/E126RHTtGtGtW8nP/eOPRN7e/Ino4kLUrh3RoUNaXqY2jpI4GgDYkuL2aACjn3K8K4A76XnuzJA4iHhmFWCdfl2h3z++vFZjkvInDw+iMmX4C4Pg5HFtuZFuurjTjLz+FF/wyZllUVHJ4x9J75XHLxER/Pj48UR16xLNnEl05YptX4u9MCdxaCurrpTKAuAUgFcAXAIQAuBtIjqW4phiRHQl8XpHAB8RUf1nPbczlVV/mtu3gVKluDrq99/rjkZYEhmDEdHSF/Nd/PDOg0DEfB+E294G1KihOzL99u3jmYUbNgAFZo5DsQWTML+IP7qenog8eVL/GSLg7FkuEHn7Ns9Ui4sDevTgcQsi2bLAnLLq2lociQmrDTh5nAUwNvG+iQDaJV7/HMAx8IyrYACV0/O8maXFQUT0/vu8kc+DB7ojERZjNNL9fO7UFEZydSWa0sr512qY49o1/nUMqGgkk7s7ne7qT9fhTiPqGOV98BzgCF1V1rxkpsRx965j7fgm0mHKFBpaw0i5c/M7tGxZosvLjERTpuiOzG4Ej+MJA0v7cDLdOJxvf9HSSCaT5uAclDmJQ0qOOLg8eXg6YmwscP++7mhEhqWYgvtXk5GYeciABrHBGJ8jAEoBL/gagJEjNQdpP5rmCkFg0yD8b7EBx44BbaYasKlXEMK3hGDMGN3ROT9JHE7g0iWuY7Voke5IRIb5+AC+vkBwMIoXBwZWDcayeF/8HuODUaN4J0iRwsiR8AsyIG9e4Icf+K6e3xkQ+e5IfPEFb24lrEf2HHcCRLwo7M4d4MQJLosgHFDihkYP+vjhbkAg+uQOwv68Bpw96/xbkWbUuXNA6dLJA9sJCVwIdPVq3kq3e3et4TkU2XM8k1EK+OAD3l520ybd0YgMMxjwd30/ZJsyCd+QH9ZHGjBihCSNpylThv/+L1zgSrmurrw28cBzAAAgAElEQVStbrNmvOXyzz/rjtA5SYvDTOHhvDH91at8uXOHp/XFxfEfbf78fClcGKhUCShe3DbT/OLieBvRihWB7dutfz5heXfWBuNBR18szOqHARSIg6ODUG+UATlz6o7MvkVHcwJp1IhbGgBw7x6XPA8NBdavB1q10hqiQ3CY6bjWulhiVlVCAtE//xD98APRe+/xbmTu7pTqIqKnXXLn5oVFw4cTbdzIs6CsZcoUPuexY9Y7h7ASo5Hu5eQpuADRzk9kCq45khb4rVuXfN/t27wa3M2NCyCKp4MjLAC0poy2OM6dA7Zt48v27cDNm3x/7txAjRpAlSp8KVcOKFqUC67lz88b1GfNyv2rERG8wOjKFeDkSb4cOAD89ReXiM6aFWjdGujWDXj9dSBHDsu97ogILjvduLEsZnI0cZMD8NZ0H2w3GaAUMHYsMLx2MJdLl9lUzxQXx1sOREUBx4/jYSvtxg2gaVN+b2/YYFdbltsdaXFkoMURHc2F0QCuAdWzJ9e2OXLEMuskoqK4PPSHH/LzA0R583JL5NKl539+4diSSqcn1UoaMkR3RI4nOJh/f/6P7aZ77RpRtWpEOXIQbd2qJTSHAFnHYb4cOYBly/jbSlgYT/Hr2xfw9OSxi+eVMycP2E2fzgN527cDbdoAX37J/bN9+wIXLz7/eUwmYMgQXhYgHIfJBLi780C4UsDQobojcjxNm/Isqlu3Hr2/cGGesPbii9zK37xZS3hORbqqNPv3X04mCxfyNNpRo4Dhw/FcA6Kvv85dYxcuyE6BjuLkSaByZe727NQJ+Okn3RE5JpMp7enoN28Cr74KHDsGLF7M9a5EMpmO60DKlQPmzAH++Ye3wxw/nsdRnudb0QcfcN+ufPg4hnPvBWDTiGC4uvI42PDh4K/I0mw0W1LSCAl5uBD/IXd33ma2QQPg7bf5fScyRhKHnShThreO3rGDB+Nbt+adx6KjzX8ugwHw8gJmzJAdAu3dmTPAO4E+6P6LL0bUCcaYMUDtu7wQED4+usNzSETc9dunD5fiSSlfPv5S9vrrwKBBPAnBZNITp0NL72CII10cvchhTAwPogNEFStmbL+NhQv557dvt3x8wnLef5/I1ZWoKYwUl583bpJpuM9v2zb++588OfXH4+KI+vblY7p04ckrmR2kOq5jJ44k27cTlShBlDMnbxVrjpgY/lA6dco6sYnnNGUKRa7nCri5chGVL090t2MPSnVakMiQjh35dxsWlvrjJhPRtGlEShHVri2zGyVxOEniIOLdyF56if+nxozhhYnCCRiNFJUrecHfUEwnExRRjx7S4rCQs2d5in2PHk8/bv16XqhbpEjm/rWbkzhkjMPOFS0KGI3cX/vZZ1zAzZzy6SEhydVDhR0xGDChShBWKl8sc+2J6RiOB59N4+k+QUEPK+WKjCtXDvD359lq9JSxvtdfB/buBQoUAJo3ByZPlnGPZ3paVgHgkd4MZE8XZ2pxJElqVgNELVoQ3buXvp/r1Yub67dvWzU8kQF79xJ9At5XPKTKY1+LjbJxk61FRhK9/Ta/x5o3J7pwQXdEtgULtjjWJl1RSq22ZgITT6cUMGwYr/fYto3no9++/eyfGzKEyzAsXGj9GEX6xcUBxnHB8EMgJsEfNa/9+mgLwyAbN1kKEbByJZcceZrcubmy7vz5wJ9/8uLfRYtkZmKqnpZVABxI7bq9X5yxxZHS6tVE2bIR1alDFBHx7OObNCEqXZpnkgj9jh8nap+XtzrtU95Ib79N3MKQsQ2rSEggqlmTyMODSwulx9mz/L4BiFq2JDp50qoh2gVYsMVBaVwXGnXqBKxZwwUNX3uNWxRPM3Qol4Jft8428Ymnmz0bqHwvBL4IwoiNBixdCm5hBAXxoJSwKBeX5FI/6d0ZsFw5HlucNQvYs4dbHx99BERGWjdWh/G0rAIgAcBdAJEA4hOvJ92+m97sZOuLs7c4kqxcyQXxmjV7+jep+Hiebjhvnu1iE6mLiOAxpxw5iBo00B1N5tK+Pc+eunrVvJ+7coWod29ufRQpQvTll8657gOWanEQkSsR5SWiPESUJfF60u281k1p4lm6dAG+/567xt96i8u6p8bVlb/I9u9v0/BEKr77jluIMTHcj7527bN/RljG1Km8knzcOPN+rmhR/n/buxeoVg348EOgfHkuUBoRYZ1Y7Z1Mx3VwPXoAX33Fu5x9+GHaxynFUwwPHbJdbOJRJhPXR8qTB8iVCyhYEGjZUndUmUeFCsDEiVxFNyPq1eOq1r//zlN8hw0DSpQABgzIfO8rrYlDKdVKKXVSKXVGKTUqlcezK6VWJD7+l1KqjO2jtH+DBnFhw6++4j7ZtHz2GVCnDnDpku1iE8mUAsaM4X7ymBhem2PJjbzEs40eDXTt+nzP0bgxt/JDQ3ld1eLFvIlUtWrAJ5/w1gw6ZmIR8VbWtqAtcSilXAHMAdAaQFUAXZVSVR87rA+A20T0IoAZAKbYNkrHMXUq0LEjJ5C0BsG7dUv+1itsTylg924gSxa+/d57euPJrO7f5/fLzp3P9zy1a/N03bAwnvDwwgucOKpVAzw8gF69ePHtsWNAfLxlYk9y8yb/LX37Lfc0NG/OO5LWqGHZ86RF234cSqkGACYQUcvE26MBgIg+T3HMlsRj/lRKZQFwFcAL9IygHWk/DkuKjubJOceO8X4c1ao9eUznzlyB9+LF59vzQ5jn9Glu8f34I++RYjDI+IYuMTFApUq8wdO+fWnv35ERV65wt/H27TwrKzyc78+enWdmlSsHlC7NiaVQIa7Wmy8fbymtFF9iY3kcLCqKf/7aNb6EhfEWuOfOPdqyyJGDn9vLC6heHRg8OGNbR5uzH4fOxNEFQCsi6pt4uweAekQ0KMUxRxOPCUu8fTbxmJtPe+7MmjgA4PJloFYtIG9eflPkz//o47t2cVN77lzumxW2MWQIt/QSEnhQvHhx/vAQeixZAvTsybt+vv22dc5hMnG31cGDfDl8mD/0L1wwr2wQwOVQihfn7RfKlOEEVKUKJ8DSpS2zS6mjJI43ALR8LHHUJaL3UxxzLPGYlImjLhGFp/J8/QH0BwAPD4/a58+ft8GrsE+7d/M32pYt+dtPym9URLzNQ4ECwNat+mLMTCIjgZIlOWnUrs2Dq0Ivk4nH+27f5k3Usme37blv3OBz37nDl/j4pB3nOZbcuXkCRaFC3AWWLZv14zIncWSxdjBPEQagVIrbJQFcTuOYsMSuqnwAHttRmBHRfADzAW5xWDxaB/LyyzxIPnAg97l+8knyY0rx4sFixfTFl9ksWQLcvcvX//sPOHKEuxWEPi4uwJQpQIsWQGCgbfd4d3Hh8YgiRWx3TkvTOasqBEAFpVRZpVQ2AG8BWP/YMesB9Eq83gWA8VnjG4L5+fHg3KRJXNsqJQ8P7lO19ICdeBIR8PXX3HWYIwd/yyxdWndUAuB6b8OGAXXr6o7E8WhLHEQUD2AQgC0ATgAIIqJjSqmJSql2iYctBFBIKXUGwIcAnpiyK1KnFPepV6kCdO8OXL366ON793ICOXBAT3yZRXQ0T9W8e5cLG/bowUlE2Idp04CXXtIdhePRuo6DiDYRUUUiKk9EkxPvG0dE6xOvxxLRG0T0IhHVJaJ/dcbraHLlAlas4D7U7t0fXVleuTJ/mM2YoS++zCBXLp694+LCLbyBA3VHJB534wbPRJL1TeknK8ednKcnd5Vs3w588UXy/fnz8wK0n36SN4y1XLzI3YQLF3I3VZMmqU+RFnpFRvIswwkTdEfiOCRxZAJ9+nAtq/HjeX1HkiFDeIbH7Nn6YnNmM2fyzLaoKGDECP79C/tTrhyPCS5axDOsxLNpm45rTZl5HUdaIiJ4cZCbG49r5MrF93fpwq2Rixd5CqCwjHv3eAouEddICgnJ2KIsYRvXr3PhwpYtgVWrdEejhznTcaXFkUnkz8/lD86cAYYPT75//Hh+oyQlEmEZS5fy2NLdu0DZssnTcYV9KlyY3xerV/PCWfF0kjgyEYOB69rMnQts2sT3eXkBr7wi34YtyWTidTT58/NirrVreXaVsG8ffshbDxQurDsS+yeJI5OZPJmTxTvvJNfRiY7mN01QkN7YnMV///H056S9Gjp3lgWXjiBPHmDePC7pIZ5OEkcmkz07r2QOD+cpiACPe/z2G/Dpp3rKQTub8uU5MQNck0im4DqWo0eTJ46I1EniyIRq1AA+/pgrta5dy2sMRo7kUhibN+uOzrFFRvIsqu++46qnXl5cAkY4jsOHeW+bFSt0R2K/ZFZVJhUXx6UWrlzhMux58vA35Rdf5E1qRMb4+QG//MJrY+rWBfr2Bfr10x2VMIfJBNSsyTPjTpywTYFBeyCzqsQzZc3K+5UndVlly8abQO3YwWW/hflu3+bd4KKjuaWxdy8nDuFYXFyAzz8H/v2XF2+KJ0nicHDR0cDff/OH1LFjwPnz6a/1n7LLasMG3p+jXz8u4yzMt2AB/3/cvs27LSZtzCMcT+vWQKNGvEd5VJTuaOyPdFU5mCtXuCz6q68CFSsCGzcCbds+ekz27JwImjfn+kiurml/gD14wHtERERw4pECfBnz4AGv14iP53GOuDheVFm0qO7IREb9+Se/j0aPzhyLY6Wrygn9/juvtyhRAhg0iBMGwP3oq1bxuoygIN6DeNCg5JpIc+dyvao5c1JfhJYtG7CxSQAqhAVjVGLt4SNHgOUDgoGAANu8OCewZg3vvnj9Oifq5s0laTi6Bg14+npmSBpmIyKnu9SuXZucRUICUZs2vDdY8eJEn3xCdOxY+n9+3TqiOnX45wsWJJo7lyg+/rGDjEaKzOFOTWGknTuJFv/PSNfhTifnGi36WpzKlClExuTfT3w80bhGRhqBKQQQbdigMTZhURs3Es2erTsK6wMQSun8jNX+IW+NizMkjpQf7h9/TDRjBlF0dMafb+9eoiZN+H+8R48nH4/eaKSbLu40u6A/JRR0p7a5jNShQ8bP5/SMRiJ394fJI3qjkW4od2qf10jlyqWSnIXD6tGDyM2NKCxMdyTWJYnDwRPH338TeXo+8oXWIkwmouXLifbs4dv373OLJsmZt/2JAApu5E8TJvBfR2ioZWNwKonJY42XP93Jxi02V1eiadN0ByYs6d9/ibJmJRowQHck1mVO4pAxDjuzdClQrx5PkyULz1tQCnjzTe67BXgabtu2iaVHgoNR/rdA/Ozpj2q7AuH7QjDc3bnwm6XjcBoGA6518kPHI5Mw39UP0XUNOH9e1m04m7JlgXff5fHD06d1R2MfJHHYCSLeH7xHD15pfOQI0KyZdc9ZowaXVB9ULRjxnX2BoCA03D4RffMEodgQXyzsHoy6dWVv8jQFByPHD4H4zNUfvWIC8XmLYJQoITPTnNHYsVya5+OPdUdiHyRx2IlffgHGjQN69uSyH4UKWf+cfn7A7t1AlXsh8KUgHCpoQOHCQIdZBnSMD0KRCyGYMoUXC4rHBAcjoYsvOsUFYW7xifhfziDUmuKL+5tl2b0zKlKEZ1g1b647Evsg6zjsBBHXjerQwfaLxk6e5HUhJhPv15E9O7d2DhzgHdEOHeLyC5072zYuuxYQgC93+WDUFgPi4rguVZ9ywZj+VggX/hLCwZizjkMSh0YmE3/G9O0LVK6sN5YLF7gceJMmfPvkSd4xsFMnrrv0zz+cVKQbJtkff3A5+oMHeQHg2rVA+/a6oxLWFBcHzJ/P741GjXRHY1myANABmEw84DZ9evJiPp08PJKTxtKl3OoYMwZYvpy3l715k2+LZB4eXO7lhRd4APXxFfzC+SQkAF98IZNGJHFoMm4c1zYaM4a/tdqLpKKHrVtz7aqKFXk3Oz8/Xn2+e7fuCPULDwfef58nM5hM3CIbNIhXjAvn5uYGfPIJby+7dq3uaPSRxKHB4sU80Na3L2+eZE+F8AoVAn7+mSuDvvkm8PXXfD1nTqB0aY45NlZ3lHrNnAnMng0sW8bjPl9+mbxxk3B+PXty1/KYMZl3xqGWMQ6lVEEAKwCUAXAOgC8R3U7luAQARxJvXiCidul5fnse4yDigWginj1lrzOWli0DuncHPvqIt0FdtgwIDOSmer9+XHo6MwoP524pDw8uCnnwIE9rFpnLmjX8peHbb4E+fXRHYxnmjHFksXYwaRgFYDsRfaGUGpV4+6NUjoshIm/bhmZdSvGYRmys/SYNgMuC//471zncs4erhH7/PbBzJycNkylzJo+pU3mG2dWrQNWqPONMEkfm07EjJ4zy5XVHooeut357AD8kXv8BQAdNcdhMTAzvY3zzJg8858unO6JnmzGD15fUrw9Mm8aziBYu5G9bPj5cPjwzuXqVtxT18eGWx6VLwOrVuqMSOijFrY2mTXVHooeuxFGEiK4AQOK/hdM4zk0pFaqU2quUcujkMngwf+jYaQ9aqnLlAl57ja+//DIQWDYA6z8IhosLd9G89x5AxsxTfp0I8PXlPVE8PIA7d+xrYoOwvVu3eFX57Sc62p2b1RKHUmqbUupoKhdzZrp7JPa5vQ1gplIqzYahUqp/YpIJvXHjxnPHb0k//MDfTsaMAVq10h2N+fbt426Zgi19sCjKF8dmB2P8eCBsaTDud/Dlr+CZQLFivHL44sXkfakbN9YdldDp4kXeZjaTfHdKlt5qiJa8ADgJoFji9WIATqbjZ74H0CU9z29P1XGPHCHKkYPIYCCKi9MdTcbExxPVrk1UrBjR/K68V8ept/zpdlZ3apXdSMeP647Q+ubMITo/cAr1LGWkMmW4cvCSJcQVcqdM0R2e0KhbN+couw4HqI67HkCvxOu9AKx7/AClVAGlVPbE6+4AGgI4brMILWTYMF5t/eOPQBZdUxGek6sr8M033Md/xN2AoEJ+qLB8Ekz9/bA/rwHr1+uO0LqOHeN1G3P2+WDaRV983DAYLVoAbxYO5r6rTNLiEqlLWs8zYYLuSGwovRnGkhcAhQBsB3A68d+CiffXAfBt4vWXwFNxDyX+2ye9z29PLY4bN4j++kt3FJbRvz/RKy5GisntTlvxCkVny0u3VqfYNMRJv323bEmUPz9RlSpEPUsZyeTuTuTv/8hGTiJzGzqUyMWFHLr1DTNaHFq+AxNROIBXUrk/FEDfxOt7AHjZODSLOXMGKFMGcHfnizMIaB2M+AW+2No/CKdOAXU2dESenh2AAmtx6hRQargvcqwP0h2mRW3eDGzZwgv8Fi0CWgwx4H42P7hNmgT4+wMGg+4QhR0YO5Zb5Nmy6Y7ENqTIoRXcusVF0Jo141XiTiMgADGePsjRxoCICKBv+WB8F9ERuZrWQdQfh9D+fhCGbTA8nInl6OLjeY3Ggwe8cj4iAnjxYjDW5/BFrmF+vCIyKEiSh3AKUuRQs8GDgWvXgKFDdUdiYSNHIkcb/pA8dAjo9LUBM0yD4WLcDrcP/BBR04CuXYGjRzXHaSEJCcAbb/Bir8OHgS6FgrGcfHF/cRAwcSInDV9fIFj24BDs1Cne7MkJv48/QhKHhf3yC5fn+PhjoFYt3dFYx9GjvPApfmswhmTjHfDUvEBsGRWM3Lm5Suz167qjfH7Zs/P/49q1QKVKQPbDIVjcNggFOye2MAwGTh4hIXoDFXZjxw6uQ+f0BRDTOxjiSBddg+O3bvGU1erVie7f1xKCzYxrZKQbcKczC4xUsCDRoGo8aHziGyO5uRGNGKE7wuczbBjRhg1ECxbw1NsuXYiUIjp1Sndkwp7FxfEkigoViB480B2NeeAA03Gd0rVrPBD+3XfOP0j2fv0QdHUNwhd/GTBrFjD7mAFr3gxC5cgQ7NoFfPaZ7ggz7tdfeZ+UvXu5hHb9+nx/ly5AhQp6YxP2LUsWYMoU4PRp3vDJWcnguIVlpuJ/H37IJcYPHOBZJUYjcORIcuG3a9eAlSt5rwpHERUFVKsG5MgB/O9/XB3YaOReqfv3uftKiKch4r+X48cda9dMGRy3sagoYPx4LvqXWZIGwLNRy5cHzp0D5s7lVlbv3jyoDPB977/PG0E5iuDWASh7PhhTp/JObwYDUPUa1+OSpCHSQykuCvr22/xF0imlt0/LkS62HuMYMYL7wXftsulp7UJ8fPL1xYv595C0BjA+nqhjRx4bWLFCT3zmOHCAyAAj3XVzp5ntjeTiQjSzPZdYub5CFvoJ5wYzxji0f8hb42LLxHHgAJGrK1HfvjY7pd2JjydavpwHAzt3JsqWjejQIX4sOpqoUSOiLFmIfvlFb5zPYjJx/anjczhZ/FLTn24odxrTQJKGyJgdO4g+/VR3FOkjicNGiSM+nqhuXaLChYnCw21ySru0eTP/JQUGcomVIkWIvLyIYmP58Tt3iOrUIapZ89EWir1ISCC6cIGvm0xEzZoRBbj5EwH0CfwpJERvfMJxjRjBLe79+3VH8mzmJI5M1CNveXPncsnxGTOAggV1R6NPixZAkybAuHG8q+HChTxIPnYsP543L5ft2LyZCybam5kzuWz8qVPAunWAyRiMga6BmOrmjyHZAlEnUhb4iYwZMwYoVIiLnZIzzUNKb4ZxpIutWhynThF9/DF/S83s9u/nb1bDh/Pt997jVsiGDY8eFxdH1L070datto8xNZs3c1djhw5EkZFEbxUxUrirO/0+wUhZsxKdnm+UYobiucyZw++Fdet0R/J0MKPFIdNxhcW88w6wdCm3NkqX5vUPYWG8W2DJknxMeDjPVDp5kqfqtmunL95Dh4BGjYBy5YBdu3iWWLZZAeg+0wfVhxhw7RpQpAi4pEhICDBypL5ghcOKjwe8vHi24dGj9rvGS6bjWtnvvwMdOjhHWQ1L+vxzLgp4+zbg5sbVOGJjeVpifDwfU6gQl2Xw9gY6deLyLDpcvcrb4ubNC2zYwMnuq6+A6IEjkbc9lxQpUiTxYINBkobIsCxZeEr6wIE8VdcZSOIwU1wc/wEcOgTkzq07GvtSpAiP+SSttK5YEZg3j7/NjxuXfFzBgsC2bfxtv3t3XqVta4UKcfHCjRt5tX+fPkCpUkDfvrw6fO5c28cknFeLFsCQITwG6AwkcZhp1izeEW7WLC61LR6lFHDvHpdduH8f6NYN6NePWyNBKbbqyJOHB8zffRd4+WXbxXfhAl+yZgW+/ppbSJMmAf/8wyUipkzhroT27W0Xk8g8li7lxcIOL72DIY50sdbg+KVLRLlzE732mlWe3mls28aDgUnz12NjiV56iShnTl73kpapU5PXf1jD8eNEJUsS1auXPKFhxw7eua1XL6Lduznujz+2Xgwicxs0iP/eDh/WHcmTIOs4rJM4BgzgxW1nzljl6Z1Kly5E2bMnV5O9coU/tEuXJrp+/cnjb90iKlqUf78BAZZf77F6NVGBArzGJCk5Xb3K1YwrViS6fZvXmZQsSXTvnmXPLUSSmzf577BZM/ubjSmJw0qJIzycaP16qzy107l8mShfPiKDIfkNEhJC5ObGrY+oqCd/5vp1LlECEDVoQLRnz/PHce8etyYAXoSYlPTj44maN+d4Dh0iOnqU9xVfvvz5zynE03z1Ff89rlmjO5JHSeKwcOKIj7fPFc/2bu5c/gtbtCj5vpUreb1Hmzap71dgMhH98AO3DPLk4VXnGZGUrO7f59X9/v6Pnm/iRI5twYLk+8LD7e9boHA+Dx4QVatGVKZMcnUFeyCJw8KJ45tvuBvj5k2LPq3TS0ggGjyY6NixR++fN4//8rp142NSExlJ9PvvfN1kIvL1JZo9O/VurpTnO3KE6LPPuPvp1i2+//EEtXw5J6/u3fm5//wz7TiEsIbdu4nWrrWvLyqSOCyYOG7eJCpY8NEuF2E+k+nR399nn/Ffn5/fsz+0r13j2ldctIHIw4OodWui337jx0+eJGrXjqhQoeRjXn019bGorVuJsmblwovR0dwdphTRF19Y7rUKYQ57+VwxJ3Fk0TmjyxGMGwfcucPTb51l8Y6t3bvH+3S0bcv/AsCoUUBEBBAQANy9y7smpjXHvXBh4PBhvmzaxKtvjx7lfVAAIDqa60y1bcs1s5o2BcqWffJ59u/ntRuVKwPr1/N9//sfr9/w87PwixYiHQIC+O966VLdkZhHEsdTHDrEC8EGDuSSASJjcuQAbtzgTZ0aNeLNn5TijZLy5+dCcLdvcwmSp62NqV6dL4/z9gZOnHh6DKGhQJs2vPBv82Y+78iRXPrkt98cZ5c24Vzi47l6Qs+evEjQYaS3aWLJC4A3ABwDYAJQ5ynHtQJwEsAZAKPS+/yW6qrq3Zu7P5L6ykXGnT/Ps6waNOBChynNm8fdRXXrEp07Z/lzr1vHa0jKlEmeHvznnzyfvn9/y59PiPSKjSWqUIHoxReJYmL0xgIHKKt+FEAnADvTOkAp5QpgDoDWAKoC6KqUqmqb8Ni8ebzfdIECtjyrc/LwAAIDgT//BCZOfPSx/v2B1au51eDtDaxZY5lzEnH9qQ4deB/xvXu5nAjABecaNwamTrXMuYTIiOzZgTlzeG/ygADd0aSflsRBRCeI6OQzDqsL4AwR/UtEDwAsB2CTQhAxMdwvny1b6l0jImO6dgV69QIWLOBxo5Q6dgQOHABefBHo3BkYMIC7tzLqv/+A1q25PlC7dlzg9mHRQgANG/J90kUldHv1VeDNN7ncza1buqNJH3uuVVUCwMUUt8MS77O6L74AKlXifndhWXPncoXyfPmefKx8eeCPP3jTm4ULudz5J58AkZHpf/7ISH4DVqvGzzVrFrdmcuXix2fO5OdPSLDM6xHCEmbM4GKgjrIhnNUSh1Jqm1LqaCqX9LYaUpvDlObmIUqp/kqpUKVU6I3n+Kp67hw3GZs0kS4qa3Bz4705TCZg2jTenyOlbNn4/qNHebBwwgTu5urVi3fni4l58jmjo4GdO3k/kGLFeMZWixbA8ePA4MHJuw7u3AkMH86tERd7/sokMp1ixYBatfzM0WMAAAhJSURBVPi6Q3xhTe9giDUuAHYgjcFxAA0AbElxezSA0el53ucZHO/cmQdSL17M8FOIdDh+nGtZNWr09NpQe/cS9ejB5UAAHtAuWpTI25vo5ZeJSpRIXruROzdR376plyo5c4ZXo1esSBQRYb3XJcTz+Pxzrp+mY0IOnGQdRwiACkqpsgAuAXgLwNvWPOH27dyt8emnyTvWCeuoUgVYvJjHPV5/nTdTSm0qbr16fImL4w2gdu7kTZiuXeP1H82b87hIlSpAy5ap75Fy4QLQrBlPffz559S7yYSwBy1aAB9/DHz0EZf5t1vpzTCWvADoCB6zuA/gGhJbFgCKA9iU4rg2AE4BOAtgbHqfP6MtDj8/orJl9U+Ly0yWLuVWRPPmvJLbGjZv5tbG/v3WeX4hLGn4cG5B79hh2/NC9hzP2J7jRPxNtmhRKwQl0rRkCU/J3baNZztZyt27ybOmoqKSB8iFsGfR0YCnJ1dSOHSIxwVtQfYczyClJGno0KMH8O+/yUnj8uXnf86tW3mWVtKaEEkawlHkzMlryK5cAQ4e1B1N6iRxCLtQrBj/+8svPA33m28yNmU2IYG3qW3Vir8EyDoc4YhefZVneNavrzuS1EniEHalQQOeCj1wIK8i37CBuxDTY8sWrik2ZgwvqNq7lwfOhXBEBQvy335QEHD/vu5oHiWJQ9gVd3cuQpj0Znn9daBPn+THUyaR+Hhg3z7g0iW+nbQmZNUqLhwn3VPC0f31F38JerxMj24yOC7sVlwcsGgRL9br148HDUuV4uSRJQvfjorielPDh/OiQqLkBX9COIP//Y8nkOzdC9RJ19B1xpgzOC6JQziMK1c4ScTF8VhGtmzctdWsGfDCC7qjE8I6bt/mWVb58vGeMjlyWOc85iQOe14AKMQjihUDvvxSdxRC2FaBAtzybtWKy+nMmqU7IkkcQghh91q25LptPj66I2GSOIQQwgGMH598nUjvVtYyq0oIIRzIF19wjTedw9OSOIQQwoG4ugIrVvDeNrpI4hBCCAcybBgPlH/wgb6SJJI4hBDCgbi48JYEhQrx4sB79zTEYPtTCiGEeB4vvAD8+CNw/jzw+++2P7/MqhJCCAfUpAlvg5xUINSWpMUhhBAOKmVV6T/+sN15pcUhhBAO7MEDrtV29y7w99+2aYFIi0MIIRxYtmzA6tWcOL77zjbnlBaHEEI4OE9P4MABoEIF25xPEocQQjiBihVtdy7pqhJCCGEWSRxCCCHMIolDCCGEWSRxCCGEMIuWxKGUekMpdUwpZVJKpblVoVLqnFLqiFLqoFJK9oIVQgg7oGtW1VEAnQDMS8exBiK6aeV4hBBCpJOWxEFEJwBA6dzCSgghRIbY+xgHAfhNKbVfKdX/aQcqpforpUKVUqE3btywUXhCCJH5WK3FoZTaBqBoKg+NJaJ16XyahkR0WSlVGMBWpdQ/RLQztQOJaD6A+YnnvqGUOp+hwPVxB5DZuuTkNWcO8podQ+n0Hmi1xEFEzS3wHJcT/72ulPoZQF0AqSaOx37uhec9t60ppUKJKM2JAs5IXnPmIK/Z+dhtV5VSKpdSKk/SdQAtwIPqQgghNNI1HbejUioMQAMAG5VSWxLvL66U2pR4WBEAu5VShwDsA7CRiDbriFcIIUQyXbOqfgbwcyr3XwbQJvH6vwBq2Dg0nebrDkADec2Zg7xmJ6OISHcMQgghHIjdjnEIIYSwT5I47JBSarhSipRS7rpjsTal1FSl1D9KqcNKqZ+VUvl1x2QNSqlWSqmTSqkzSqlRuuOxNqVUKaVUsFLqRGJ5oSG6Y7IVpZSrUuqAUmqD7lisRRKHnVFKlQLwKoALumOxka0APImoOoBTAEZrjsfilFKuAOYAaA2gKoCuSqmqeqOyungAw4ioCoD6AAZmgtecZAiAE7qDsCZJHPZnBoCR4FXzTo+IfiOi+MSbewGU1BmPldQFcIaI/iWiBwCWA2ivOSarIqIrRPR34vVI8AdpCb1RWZ9SqiSA1wB8qzsWa5LEYUeUUu0AXCKiQ7pj0eQdAL/qDsIKSgC4mOJ2GDLBh2gSpVQZADUB/KU3EpuYCf7iZ9IdiDXJnuM29rRSLADGgBc6OpX0lJ9RSo0Fd28ss2VsNpJaNc9M0aJUSuUGsBrAUCK6qzsea1JKtQVwnYj2K6Wa6o7HmiRx2FhapViUUl4AygI4lFg1uCSAv5VSdYnoqg1DtLhnlZ9RSvUC0BbAK+Sc88PDAJRKcbskgMuaYrEZpVRWcNJYRkRrdMdjAw0BtFNKtQHgBiCvUmopEXXXHJfFyToOO6WUOgegjrPvRaKUagXgSwBNiMgpyxorpbKAB/5fAXAJQAiAt4nomNbArEjxt58fANwioqG647G1xBbHcCJqqzsWa5AxDqHbbAB5wNWPDyql5uoOyNISB/8HAdgCHiQOcuakkaghgB4AmiX+vx5M/CYunIC0OIQQQphFWhxCCCHMIolDCCGEWSRxCCGEMIskDiGEEGaRxCGEEMIskjiEsLLESrH/KaUKJt4ukHi7tO7YhMgISRxCWBkRXQQQCOCLxLu+ADCfiM7ri0qIjJN1HELYQGL5jf0AFgHoB6BmYqVcIRyO1KoSwgaIKE4pNQLAZgAtJGkIRyZdVULYTmsAVwB46g5EiOchiUMIG1BKeYN3dqwP4AOlVDHNIQmRYZI4hLCyxEqxgeA9KS4AmApgmt6ohMg4SRxCWF8/ABeIaGvi7W8AVFZKNdEYkxAZJrOqhBBCmEVaHEIIIcwiiUMIIYRZJHEIIYQwiyQOIYQQZpHEIYQQwiySOIQQQphFEocQQgizSOIQQghhlv8D/4WbRWBoEDwAAAAASUVORK5CYII=\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "plot(xt, f_mean[:,0], 'b-', label='mean')\n", + "plot(xt, f_mean[:,0]-2*np.sqrt(f_var), 'b--', label='2 x std')\n", + "plot(xt, f_mean[:,0]+2*np.sqrt(f_var), 'b--')\n", + "plot(X, Y, 'rx', label='data points')\n", + "ylabel('F')\n", + "xlabel('X')\n", + "_=legend()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The effect of the mean function is not noticable, because there is no linear trend in our data. We can plot the values of the estimated parameters of the linear mean function." + ] + }, + { + "cell_type": "code", + "execution_count": 36, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The weight is 0.021969 and the bias is 0.079038.\n" + ] + } + ], + "source": [ + "print(\"The weight is %f and the bias is %f.\" %(infr.params[m.mean_func.parameters['dense1_weight']].asnumpy(), \n", + " infr.params[m.mean_func.parameters['dense1_bias']].asscalar()))" ] }, { @@ -417,7 +559,26 @@ "source": [ "## Variational sparse Gaussian process regression\n", "\n", - "TBA" + "In MXFusion, we also have variational sparse GP implemented as a module. A sparse GP model can be created in a similar way as the plain GP model. " + ] + }, + { + "cell_type": "code", + "execution_count": 39, + "metadata": {}, + "outputs": [], + "source": [ + "from mxfusion import Model, Variable\n", + "from mxfusion.components.variables import PositiveTransformation\n", + "from mxfusion.components.distributions.gp.kernels import RBF\n", + "from mxfusion.modules.gp_modules import SparseGPRegression\n", + "\n", + "m = Model()\n", + "m.N = Variable()\n", + "m.X = Variable(shape=(m.N, 1))\n", + "m.noise_var = Variable(shape=(1,), transformation=PositiveTransformation(), initial_value=0.01)\n", + "m.kernel = RBF(input_dim=1, variance=1, lengthscale=1)\n", + "m.Y = SparseGPRegression.define_variable(X=m.X, kernel=m.kernel, noise_var=m.noise_var, shape=(m.N, 1), num_inducing=50)" ] } ], @@ -437,7 +598,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.6" + "version": "3.6.0" } }, "nbformat": 4, diff --git a/examples/notebooks/pilco.ipynb b/examples/notebooks/pilco.ipynb new file mode 100644 index 0000000..db09fa9 --- /dev/null +++ b/examples/notebooks/pilco.ipynb @@ -0,0 +1,51119 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# PILCO: A Model-based Policy Search" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Obtaining file:///Users/erimeiss/workspace/mxfusion\n", + "Requirement already satisfied: networkx>=2.1 in /Users/erimeiss/.pyenv/versions/anaconda3-5.3.1/lib/python3.7/site-packages (from MXFusion==0.3.0) (2.1)\n", + "Requirement already satisfied: numpy>=1.7 in /Users/erimeiss/.pyenv/versions/anaconda3-5.3.1/lib/python3.7/site-packages (from MXFusion==0.3.0) (1.16.3)\n", + "Requirement already satisfied: decorator>=4.1.0 in /Users/erimeiss/.pyenv/versions/anaconda3-5.3.1/lib/python3.7/site-packages (from networkx>=2.1->MXFusion==0.3.0) (4.3.0)\n", + "Installing collected packages: MXFusion\n", + " Found existing installation: MXFusion 0.3.0\n", + " Uninstalling MXFusion-0.3.0:\n", + " Successfully uninstalled MXFusion-0.3.0\n", + " Running setup.py develop for MXFusion\n", + "Successfully installed MXFusion\n" + ] + } + ], + "source": [ + "!pip install -e ../.." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Common reinforcement learning methods suffer from data inefficiency, which can be a issue in real world applications where gathering sufficiently large amounts of data pose economic issues and may be impossible. Deisenroth & Rasmussen (2011) propose a model-based policy search method known as PILCO in part to address this issue. PILCO uses a Gaussian process (GP) for learning the dynamics of the environment and optimizes a parametric policy function using the learned dynamics model.\n", + "\n", + "In this notebook, we demonstrate a straight-forward implementation of PILCO. This implementation follows the idea of PILCO and have a few enhancement compared to the published implementation. The enhancement are listed as follows: \n", + "\n", + "- **Use Monte Carlo integration instead of moment estimation.** We approximate the expected reward using Monte Carlo integration instead of the proposed moment estimation approach. This removes the bias in the expected reward computation and enables a wide range of choices of kernels and policy functions. In the original work, only RBF and linear kernel and only linear and RBF network policy can be used.\n", + "- **Use automatic differentiation.** Thanks to automatic differentiation, no gradient derivation is needed.\n", + "- **An unified interface of Gaussian process.** MXFusion provides an unified inferface of GP modules. We allows us to easily switch among plan GP, variational sparse GP and stocastic variational GP implementations." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Preparation" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This notebook depends on MXNet, MXFusion and Open AI Gym. These packages can be installed into your Python environment by running the following commands.\n", + "```bash\n", + "pip install mxnet mxfusion gym\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Set the global configuration." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'\n", + "from mxfusion.common import config\n", + "config.DEFAULT_DTYPE = 'float64'\n", + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Example: Pendulum\n", + "\n", + "\n", + "We use the inverted pendulum swingup problem as an example. We use the [Pendulum-v0](https://gym.openai.com/envs/Pendulum-v0/) enironment in Open AI Gym. The task is to swing the pendulum up and balance it at the inverted position. This is a classical control problem and is known to be unsolvable with a linear controller.\n", + "\n", + "To solve this problem with PILCO, it needs three components:\n", + "\n", + "- Execute a policy in an real environment (an Open AI Gym simulator in this example) and collect data.\n", + "- Fit a GP model as the model for the dynamics of the environment.\n", + "- Optimize the policy given the dynamics model learned from all the data that have been collected so far.\n", + "\n", + "The overall PILCO algorithm is to iterate the above three steps until a policy that can solve the problem is found." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Execute the Environment \n", + "\n", + "The Pendulum-v0 environment can be loaded easily." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "import gym\n", + "env = gym.make('Pendulum-v0')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The state of the pendulum environment is a 3D vector. The first two dimensions are the 2D location of the end point of the pendulum. The third dimension encodes the angular speed of the pendulum. The action space is a 1D vector in [-2, 2]." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We write a helper function for executing the environment with a given policy." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "from matplotlib import animation\n", + "\n", + "def run_one_episode(env, policy, initial_state=None, max_steps=200, verbose=False, render=False):\n", + " \"\"\"\n", + " Drives an episode of the OpenAI gym environment using the policy to decide next actions.\n", + " \"\"\"\n", + " observation = env.reset()\n", + " if initial_state is not None:\n", + " env.env.state = initial_state\n", + " observation = env.env._get_obs()\n", + " env._max_episode_steps = max_steps\n", + " step_idx = 0\n", + " done = False\n", + " total_reward = 0\n", + " frames = []\n", + " all_actions = []\n", + " all_observations = [observation]\n", + " while not done:\n", + " if render:\n", + " frames.append(env.render(mode = 'rgb_array'))\n", + " if verbose:\n", + " print(observation)\n", + " action = policy(observation)\n", + " observation, reward, done, info = env.step(action)\n", + " all_observations.append(observation)\n", + " all_actions.append(action)\n", + " total_reward += reward\n", + " step_idx += 1\n", + " if done or step_idx>=max_steps-1:\n", + " print(\"Episode finished after {} timesteps because {}\".format(step_idx+1, \"'done' reached\" if done else \"Max timesteps reached\"))\n", + " break\n", + " if render:\n", + " fig = plt.figure()\n", + " ax = fig.gca()\n", + " fig.tight_layout()\n", + " patch = ax.imshow(frames[0])\n", + " ax.axis('off')\n", + " def animate(i):\n", + " patch.set_data(frames[i])\n", + " anim = animation.FuncAnimation(plt.gcf(), animate, frames = len(frames), interval=20)\n", + " return total_reward, np.array(all_observations, dtype=np.float64,), np.array(all_actions, dtype=np.float64), anim\n", + " else:\n", + " return total_reward, np.array(all_observations, dtype=np.float64,), np.array(all_actions, dtype=np.float64)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's first apply a random policy and see how the environment reacts. The random policy uniformly samples in the space of action." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "def random_policy(state):\n", + " return env.action_space.sample()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The animation is generated with the following commands:\n", + "```python\n", + "anim = run_one_episode(env, random_policy, max_steps=500, render=True, verbose=False)[-1]\n", + "\n", + "with open('animation_random_policy.html', 'w') as f:\n", + " f.write(anim.to_jshtml())\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "
\n", + " \n", + "
\n", + " \n", + "
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " Once \n", + " Loop \n", + " Reflect \n", + "
\n", + "
\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from IPython.display import HTML\n", + "\n", + "HTML(filename=\"pilco/animation_random_policy.html\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Fit the Dynamics Model\n", + "\n", + "The dynamics model of pendulum can be written as\n", + "$$p(y_{t+1}|y_t, a_t)$$\n", + "where $y_t$ is the state vector at the time $t$ and $a_t$ is the action taken at the time $t$. PILCO uses a Gaussian process to model the above dynamics.\n", + "\n", + "Given a sequence of state and action, we break them into the pairs of input and output for the above GP model. The below helper function is written to do so." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [], + "source": [ + "def prepare_data(state_list, action_list, win_in):\n", + " \"\"\"\n", + " Prepares a list of states and a list of actions as inputs to the Gaussian Process for training.\n", + " \"\"\"\n", + " \n", + " X_list = []\n", + " Y_list = []\n", + " \n", + " for state_array, action_array in zip(state_list, action_list):\n", + " # the state and action array shape should be aligned.\n", + " assert state_array.shape[0]-1 == action_array.shape[0]\n", + " \n", + " for i in range(state_array.shape[0]-win_in):\n", + " Y_list.append(state_array[i+win_in:i+win_in+1])\n", + " X_list.append(np.hstack([state_array[i:i+win_in].flatten(), action_array[i:i+win_in].flatten()]))\n", + " X = np.vstack(X_list)\n", + " Y = np.vstack(Y_list)\n", + " return X, Y" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this example, we do a maximum likelihood estimate for the model hyper-parameters. In MXFusion, Gaussian process regression model is available as a module, which includes a dediated inference algorithm." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": {}, + "outputs": [], + "source": [ + "import mxnet as mx\n", + "from mxfusion import Model, Variable\n", + "from mxfusion.components.variables import PositiveTransformation\n", + "from mxfusion.components.distributions.gp.kernels import RBF\n", + "from mxfusion.modules.gp_modules import GPRegression\n", + "from mxfusion.inference import GradBasedInference, MAP\n", + "\n", + "def fit_model(state_list, action_list, win_in, verbose=True):\n", + " \"\"\"\n", + " Fits a Gaussian Process model to the state / action pairs passed in. \n", + " This creates a model of the environment which is used during\n", + " policy optimization instead of querying the environment directly.\n", + " \n", + " See mxfusion.gp_modules for additional types of GP models to fit,\n", + " including Sparse GP and Stochastic Varitional Inference Sparse GP.\n", + " \"\"\"\n", + " X, Y = prepare_data(state_list, action_list, win_in)\n", + "\n", + " m = Model()\n", + " m.N = Variable()\n", + " m.X = Variable(shape=(m.N, X.shape[-1]))\n", + " m.noise_var = Variable(shape=(1,), transformation=PositiveTransformation(),\n", + " initial_value=0.01)\n", + " m.kernel = RBF(input_dim=X.shape[-1], variance=1, lengthscale=1, ARD=True)\n", + " m.Y = GPRegression.define_variable(\n", + " X=m.X, kernel=m.kernel, noise_var=m.noise_var,\n", + " shape=(m.N, Y.shape[-1]))\n", + " m.Y.factor.gp_log_pdf.jitter = 1e-6\n", + "\n", + " infr = GradBasedInference(\n", + " inference_algorithm=MAP(model=m, observed=[m.X, m.Y]))\n", + " infr.run(X=mx.nd.array(X, dtype='float64'),\n", + " Y=mx.nd.array(Y, dtype='float64'),\n", + " max_iter=1000, learning_rate=0.1, verbose=verbose)\n", + " return m, infr, X, Y" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Policy Optimization\n", + "\n", + "PILCO computes the expected reward of a policy given the dynamics model. First, we need to define the parametric form of the policy. In this example, we use a neural network with one hidden layer. As the action space is [-2, 2], we apply a tanh transformation and multiply the come with two. This enforces the returned actions stay within the range." + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": {}, + "outputs": [], + "source": [ + "from mxnet.gluon import HybridBlock\n", + "from mxnet.gluon.nn import Dense\n", + "\n", + "class NNController(HybridBlock):\n", + " def __init__(self, prefix=None, params=None):\n", + " super(NNController, self).__init__(prefix=prefix, params=params)\n", + " self.dense1 = Dense(100, in_units=len(env.observation_space.high), dtype='float64', activation='relu')\n", + " self.dense2 = Dense(1, in_units=100, dtype='float64', activation='tanh')\n", + " def hybrid_forward(self, F, x):\n", + " out = self.dense2(self.dense1(x))*2\n", + " return out \n", + " \n", + "policy = NNController()\n", + "policy.collect_params().initialize(mx.initializer.Xavier(magnitude=1))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To compute the expected reward, we also need to define a reward function. This reward function is defined by us according to the task. The main component is the height of the pendulum. We also penalize the force and the angular momentum. " + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "class CostFunction(mx.gluon.HybridBlock):\n", + " \"\"\"\n", + " The goal is to get the pendulum upright and stable as quickly as possible.\n", + " Taken from the code for Pendulum.\n", + " \"\"\"\n", + " def hybrid_forward(self, F, state, action):\n", + " \"\"\"\n", + " :param state: [np.cos(theta), np.sin(theta), ~ momentum(theta)]\n", + " a -> 0 when pendulum is upright, largest when pendulum is hanging down completely.\n", + " b -> penalty for taking action\n", + " c -> penalty for pendulum momentum\n", + " \"\"\"\n", + " a_scale = 2.\n", + " b_scale = .001\n", + " c_scale = .1\n", + " a = F.sum(a_scale * (state[:,:,0:1] -1) ** 2, axis=-1)\n", + " b = F.sum(b_scale * action ** 2, axis=-1)\n", + " c = F.sum(c_scale * state[:,:,2:3] ** 2, axis=-1)\n", + " return (a + c + b)\n", + " \n", + "cost = CostFunction()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The expected reward function can be written as\n", + "$$R = \\mathbb{E}_{p(y_T, \\ldots, y_0)}\\left(\\sum_{t=0}^T r(y_t)\\right)$$\n", + "where $r(\\cdot)$ is the reward function. $p(y_T, \\ldots, y_0)$ is the joint distribution when applying the policy to the dynamics model:\n", + "$$p(y_T, \\ldots, y_0) = p(y_0) \\prod_{t=1}^T p(y_t|y_{t-1}, a_{t-1}),$$\n", + "where $a_{t-1} = \\pi(y_{t-1})$ is the action taken at the time $t-1$, which is the outcome of the policy $\\pi(\\cdot)$.\n", + "\n", + "The expected reward function is implemented as follows." + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [], + "source": [ + "from mxfusion.inference.inference_alg import SamplingAlgorithm\n", + "\n", + "class PILCOAlgorithm(SamplingAlgorithm):\n", + "\n", + " def compute(self, F, variables):\n", + " \n", + " s_0 = self.initial_state_generator(self.num_samples)\n", + " a_0 = self.policy(s_0)\n", + " a_t_plus_1 = a_0\n", + " x_t = F.expand_dims(F.concat(s_0, a_0, dim=1), axis=1)\n", + " cost = 0\n", + " for t in range(self.n_time_steps):\n", + " variables[self.model.X] = x_t\n", + " res = self.model.Y.factor.predict(F, variables, targets=[self.model.Y], num_samples=self.num_samples)[0]\n", + " s_t_plus_1 = res[0]\n", + "\n", + " cost = cost + self.cost_function(s_t_plus_1, a_t_plus_1)\n", + "\n", + " a_t_plus_1 = mx.nd.expand_dims(self.policy(s_t_plus_1), axis=2)\n", + " x_t = mx.nd.concat(s_t_plus_1, a_t_plus_1, dim=2)\n", + " total_cost = F.sum(cost)\n", + " return total_cost, total_cost" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We optimize the policy with respect to the expected reward by using a gradient optimizer." + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [], + "source": [ + "from mxfusion.inference import GradTransferInference\n", + "from mxfusion.inference.pilco_alg import PILCOAlgorithm\n", + "\n", + "def optimize_policy(policy, cost_func, model, infr, model_data_X, model_data_Y,\n", + " initial_state_generator, num_grad_steps,\n", + " learning_rate=1e-2, num_time_steps=100, \n", + " num_samples=10, verbose=True):\n", + " \"\"\"\n", + " Takes as primary inputs a policy, cost function, and trained model.\n", + " Optimizes the policy for num_grad_steps number of iterations.\n", + " \"\"\"\n", + " mb_alg = PILCOAlgorithm(\n", + " model=model, observed=[model.X, model.Y], cost_function=cost_func,\n", + " policy=policy, n_time_steps=num_time_steps,\n", + " initial_state_generator=initial_state_generator,\n", + " num_samples=num_samples)\n", + "\n", + " infr_pred = GradTransferInference(\n", + " mb_alg, infr_params=infr.params, train_params=policy.collect_params())\n", + " infr_pred.run(\n", + " max_iter=num_grad_steps,\n", + " X=mx.nd.array(model_data_X, dtype='float64'),\n", + " Y=mx.nd.array(model_data_Y, dtype='float64'),\n", + " verbose=verbose, learning_rate=learning_rate)\n", + " return policy" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## The Loop\n", + "\n", + "We need to define a function that provides random initial states." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": {}, + "outputs": [], + "source": [ + "def initial_state_generator(num_initial_states):\n", + " \"\"\"\n", + " Starts from valid states by drawing theta and momentum\n", + " then computing np.cos(theta) and np.sin(theta) for state[0:2].s\n", + " \"\"\"\n", + " return mx.nd.array(\n", + " [env.observation_space.sample() for i in range(num_initial_states)],\n", + " dtype='float64')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The loop of PILCO iterates the above three steps." + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Episode finished after 100 timesteps because Max timesteps reached\n", + "\r", + "Iteration 1 loss: 183.19438945442164\t\t\t\t\r", + "Iteration 2 loss: 151.71942134183473\t\t\t\t\r", + "Iteration 3 loss: 120.76437985446427\t\t\t\t\r", + "Iteration 4 loss: 90.37715754239662\t\t\t\t\r", + "Iteration 5 loss: 60.62170569881482\t\t\t\t\r", + "Iteration 6 loss: 31.623024274306943\t\t\t\t\r", + "Iteration 7 loss: 3.431141954762154\t\t\t\t\r", + "Iteration 8 loss: -23.92777780034288\t\t\t\t\r", + "Iteration 9 loss: -50.42669502898963\t\t\t\t\r", + "Iteration 10 loss: -76.06646934620255\t\t\t\t\r", + "Iteration 11 loss: -100.8759268163551\t\t\t\t\r", + "Iteration 12 loss: -124.88561342741707\t\t\t\t\r", + "Iteration 13 loss: -148.12378769295128\t\t\t\t\r", + "Iteration 14 loss: -170.62435583550786\t\t\t\t\r", + "Iteration 15 loss: -192.42864381636554\t\t\t\t\r", + "Iteration 16 loss: -213.5807400140273\t\t\t\t\r", + "Iteration 17 loss: -234.12273086863553\t\t\t\t\r", + "Iteration 18 loss: -254.09447432916545\t\t\t\t\r", + "Iteration 19 loss: -273.5358786562007\t\t\t\t\r", + "Iteration 20 loss: -292.48731266753873\t\t\t\t\r", + "Iteration 21 loss: -310.98755147007927\t\t\t\t\r", + "Iteration 22 loss: -329.0718006155711\t\t\t\t" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/erimeiss/workspace/mxfusion/mxfusion/inference/inference_parameters.py:71: UserWarning: InferenceParameters has already been initialized. The existing one will be overwritten.\n", + " warnings.warn(\"InferenceParameters has already been initialized. The existing one will be overwritten.\")\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Iteration 100 loss: -1145.489716701466\t\t\t\t\n", + "Iteration 200 loss: -1285.3929126768035\t\t\t\t\n", + "Iteration 300 loss: -1331.7954137173099\t\t\t\t\n", + "Iteration 400 loss: -1357.6716807213334\t\t\t\t\n", + "Iteration 500 loss: -1374.5650793437253\t\t\t\t\n", + "Iteration 600 loss: -1386.6350141511623\t\t\t\t\n", + "Iteration 700 loss: -1395.7585508375987\t\t\t\t\n", + "Iteration 800 loss: -1402.93562993511\t\t\t\t\t\t\n", + "Iteration 900 loss: -1408.7594436983916\t\t\t\t\n", + "Iteration 1000 loss: -1413.6055169247031\t\t\t\t\n", + "Iteration 59 loss: 5544930.409976534\t\t\t\t\t" + ] + }, + { + "ename": "KeyboardInterrupt", + "evalue": "", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 28\u001b[0m \u001b[0minitial_state_generator\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnum_grad_steps\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnum_grad_steps\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 29\u001b[0m \u001b[0mnum_samples\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnum_samples\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlearning_rate\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mlearning_rate\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 30\u001b[0;31m num_time_steps=num_time_steps)\n\u001b[0m", + "\u001b[0;32m\u001b[0m in \u001b[0;36moptimize_policy\u001b[0;34m(policy, cost_func, model, infr, model_data_X, model_data_Y, initial_state_generator, num_grad_steps, learning_rate, num_time_steps, num_samples, verbose)\u001b[0m\n\u001b[1;32m 22\u001b[0m \u001b[0mX\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0marray\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel_data_X\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'float64'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 23\u001b[0m \u001b[0mY\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0marray\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel_data_Y\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'float64'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 24\u001b[0;31m verbose=verbose, learning_rate=learning_rate)\n\u001b[0m\u001b[1;32m 25\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mpolicy\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/workspace/mxfusion/mxfusion/inference/grad_based_inference.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, optimizer, learning_rate, max_iter, verbose, **kwargs)\u001b[0m\n\u001b[1;32m 102\u001b[0m \u001b[0minfr_executor\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minfr\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparam_dict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mparams\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mparam_dict\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 103\u001b[0m \u001b[0mctx\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmxnet_context\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptimizer\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0moptimizer\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 104\u001b[0;31m learning_rate=learning_rate, max_iter=max_iter, verbose=verbose)\n\u001b[0m\u001b[1;32m 105\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 106\u001b[0m \u001b[0;32mclass\u001b[0m \u001b[0mGradTransferInference\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mGradBasedInference\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/workspace/mxfusion/mxfusion/inference/batch_loop.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, infr_executor, data, param_dict, ctx, optimizer, learning_rate, max_iter, n_prints, verbose)\u001b[0m\n\u001b[1;32m 51\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmax_iter\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 52\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mmx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mautograd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrecord\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 53\u001b[0;31m \u001b[0mloss\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mloss_for_gradient\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0minfr_executor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mzeros\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mctx\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mctx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 54\u001b[0m \u001b[0mloss_for_gradient\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 55\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.pyenv/versions/anaconda3-5.3.1/lib/python3.7/site-packages/mxnet/gluon/block.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args)\u001b[0m\n\u001b[1;32m 538\u001b[0m \u001b[0mhook\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 539\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 540\u001b[0;31m \u001b[0mout\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 541\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 542\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mhook\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_forward_hooks\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalues\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.pyenv/versions/anaconda3-5.3.1/lib/python3.7/site-packages/mxnet/gluon/block.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, x, *args)\u001b[0m\n\u001b[1;32m 915\u001b[0m \u001b[0mparams\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m{\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mj\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mctx\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mj\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_reg_params\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mitems\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 916\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 917\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mhybrid_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mndarray\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mparams\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 918\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 919\u001b[0m \u001b[0;32massert\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mSymbol\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m\\\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/workspace/mxfusion/mxfusion/inference/inference_alg.py\u001b[0m in \u001b[0;36mhybrid_forward\u001b[0;34m(self, F, x, *args, **kw)\u001b[0m\n\u001b[1;32m 81\u001b[0m \u001b[0madd_sample_dimension_to_arrays\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mF\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkw\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mout\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mvariables\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 82\u001b[0m \u001b[0madd_sample_dimension_to_arrays\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mF\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_constants\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mout\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mvariables\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 83\u001b[0;31m \u001b[0mobj\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_infr_method\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcompute\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mF\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mF\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvariables\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mvariables\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 84\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mautograd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpause\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 85\u001b[0m \u001b[0;31m# An inference algorithm may directly set the value of a parameter instead of computing its gradient.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/workspace/mxfusion/mxfusion/inference/pilco_alg.py\u001b[0m in \u001b[0;36mcompute\u001b[0;34m(self, F, variables)\u001b[0m\n\u001b[1;32m 82\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mt\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mn_time_steps\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 83\u001b[0m \u001b[0mvariables\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mX\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mx_t\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 84\u001b[0;31m \u001b[0mres\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mY\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfactor\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpredict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mF\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvariables\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtargets\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mY\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnum_samples\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnum_samples\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 85\u001b[0m \u001b[0ms_t_plus_1\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mres\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 86\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/workspace/mxfusion/mxfusion/modules/module.py\u001b[0m in \u001b[0;36mpredict\u001b[0;34m(self, F, variables, num_samples, targets)\u001b[0m\n\u001b[1;32m 362\u001b[0m \u001b[0malg\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnum_samples\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnum_samples\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 363\u001b[0m \u001b[0malg\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtarget_variables\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtargets\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 364\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0malg\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcompute\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mF\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvariables\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 365\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 366\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_get_algorithm_for_target_conditional_pair\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0malgorithms\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtargets\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvariables\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mexact_match\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/workspace/mxfusion/mxfusion/modules/gp_modules/gp_regression.py\u001b[0m in \u001b[0;36mcompute\u001b[0;34m(self, F, variables)\u001b[0m\n\u001b[1;32m 167\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 168\u001b[0m X, noise_var, X_cond, L, LinvY, kern_params = arrays_as_samples(\n\u001b[0;32m--> 169\u001b[0;31m F, [X, noise_var, X_cond, L, LinvY, kern_params])\n\u001b[0m\u001b[1;32m 170\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 171\u001b[0m \u001b[0mKxt\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mkern\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mK\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mF\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mX_cond\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mX\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkern_params\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/workspace/mxfusion/mxfusion/components/variables/runtime_variable.py\u001b[0m in \u001b[0;36marrays_as_samples\u001b[0;34m(F, arrays)\u001b[0m\n\u001b[1;32m 114\u001b[0m \u001b[0mmax_num_samples\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmax\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnum_samples\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mmax_num_samples\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 116\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0mk\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mas_samples\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mF\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mv\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmax_num_samples\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mk\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mv\u001b[0m \u001b[0;32min\u001b[0m \u001b[0ma\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mitems\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m}\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ma\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdict\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32melse\u001b[0m \u001b[0mas_samples\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mF\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0ma\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmax_num_samples\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0ma\u001b[0m \u001b[0;32min\u001b[0m \u001b[0marrays\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 117\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 118\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0marrays\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/workspace/mxfusion/mxfusion/components/variables/runtime_variable.py\u001b[0m in \u001b[0;36m\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 114\u001b[0m \u001b[0mmax_num_samples\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmax\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnum_samples\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mmax_num_samples\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 116\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0mk\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mas_samples\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mF\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mv\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmax_num_samples\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mk\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mv\u001b[0m \u001b[0;32min\u001b[0m \u001b[0ma\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mitems\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m}\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ma\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdict\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32melse\u001b[0m \u001b[0mas_samples\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mF\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0ma\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmax_num_samples\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0ma\u001b[0m \u001b[0;32min\u001b[0m \u001b[0marrays\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 117\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 118\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0marrays\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/workspace/mxfusion/mxfusion/components/variables/runtime_variable.py\u001b[0m in \u001b[0;36mas_samples\u001b[0;34m(F, array, num_samples)\u001b[0m\n\u001b[1;32m 97\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0marray\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 98\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 99\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mF\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbroadcast_axis\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0marray\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0maxis\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msize\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnum_samples\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 100\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 101\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.pyenv/versions/anaconda3-5.3.1/lib/python3.7/site-packages/mxnet/ndarray/register.py\u001b[0m in \u001b[0;36mbroadcast_axis\u001b[0;34m(data, axis, size, out, name, **kwargs)\u001b[0m\n", + "\u001b[0;32m~/.pyenv/versions/anaconda3-5.3.1/lib/python3.7/site-packages/mxnet/_ctypes/ndarray.py\u001b[0m in \u001b[0;36m_imperative_invoke\u001b[0;34m(handle, ndargs, keys, vals, out)\u001b[0m\n\u001b[1;32m 90\u001b[0m \u001b[0mc_str_array\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mkeys\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 91\u001b[0m \u001b[0mc_str_array\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ms\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0ms\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mvals\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 92\u001b[0;31m ctypes.byref(out_stypes)))\n\u001b[0m\u001b[1;32m 93\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 94\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0moriginal_output\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mKeyboardInterrupt\u001b[0m: " + ] + } + ], + "source": [ + "num_episode = 20 # how many model fit + policy optimization episodes to run\n", + "num_samples = 100 # how many sample trajectories the policy optimization loop uses\n", + "num_grad_steps = 1000 # how many gradient steps the optimizer takes per episode\n", + "num_time_steps = 100 # how far to roll out each sample trajectory\n", + "learning_rate = 1e-3 # learning rate for the policy optimization\n", + "\n", + "all_states = []\n", + "all_actions = []\n", + "\n", + "for i_ep in range(num_episode):\n", + " # Run an episode and collect data.\n", + " if i_ep == 0:\n", + " policy_func = lambda x: env.action_space.sample()\n", + " else:\n", + " policy_func = lambda x: policy(mx.nd.expand_dims(mx.nd.array(x, dtype='float64'), axis=0)).asnumpy()[0]\n", + " total_reward, states, actions = run_one_episode(\n", + " env, policy_func, max_steps=num_time_steps)\n", + " all_states.append(states)\n", + " all_actions.append(actions)\n", + "\n", + " # Fit a model.\n", + " model, infr, model_data_X, model_data_Y = fit_model(\n", + " all_states, all_actions, win_in=1, verbose=True)\n", + "\n", + " # Optimize the policy.\n", + " policy = optimize_policy(\n", + " policy, cost, model, infr, model_data_X, model_data_Y,\n", + " initial_state_generator, num_grad_steps=num_grad_steps,\n", + " num_samples=num_samples, learning_rate=learning_rate,\n", + " num_time_steps=num_time_steps)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Policy after the first episode (random exploration):" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "
\n", + " \n", + "
\n", + " \n", + "
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " Once \n", + " Loop \n", + " Reflect \n", + "
\n", + "
\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 15, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "HTML(filename=\"pilco/animation_policy_iter_0.html\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Policy after the 5th episode:" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "
\n", + " \n", + "
\n", + " \n", + "
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " Once \n", + " Loop \n", + " Reflect \n", + "
\n", + "
\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 16, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "HTML(filename=\"pilco/animation_policy_iter_4.html\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Reference\n", + "\n", + "M. P. Deisenroth, C. E. Rasmussen. 2011. \"PILCO: A Model-Based and Data-Efficient Approach to Policy Search\". _in\n", + "Proceedings of the 28th International Conference on Machine Learning._ [http://mlg.eng.cam.ac.uk/pub/pdf/DeiRas11.pdf](http://mlg.eng.cam.ac.uk/pub/pdf/DeiRas11.pdf)\n" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.0" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/notebooks/pilco/animation_policy_iter_0.html b/examples/notebooks/pilco/animation_policy_iter_0.html new file mode 100644 index 0000000..95b984d --- /dev/null +++ b/examples/notebooks/pilco/animation_policy_iter_0.html @@ -0,0 +1,18186 @@ + + + + +
+ +
+ +
+ + + + + + + + + +
+ Once + Loop + Reflect +
+
+ + + diff --git a/examples/notebooks/pilco/animation_policy_iter_4.html b/examples/notebooks/pilco/animation_policy_iter_4.html new file mode 100644 index 0000000..bffd3bf --- /dev/null +++ b/examples/notebooks/pilco/animation_policy_iter_4.html @@ -0,0 +1,14098 @@ + + + + +
+ +
+ +
+ + + + + + + + + +
+ Once + Loop + Reflect +
+
+ + + diff --git a/examples/notebooks/pilco/animation_random_policy.html b/examples/notebooks/pilco/animation_random_policy.html new file mode 100644 index 0000000..850177e --- /dev/null +++ b/examples/notebooks/pilco/animation_random_policy.html @@ -0,0 +1,18111 @@ + + + + +
+ +
+ +
+ + + + + + + + + +
+ Once + Loop + Reflect +
+
+ + + diff --git a/examples/notebooks/pilco_neurips2018_mloss_slides.ipynb b/examples/notebooks/pilco_neurips2018_mloss_slides.ipynb new file mode 100644 index 0000000..8dc7fdc --- /dev/null +++ b/examples/notebooks/pilco_neurips2018_mloss_slides.ipynb @@ -0,0 +1,51558 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Obtaining file:///Users/erimeiss/workspace/mxfusion\n", + "Requirement already satisfied: networkx>=2.1 in /Users/erimeiss/.pyenv/versions/anaconda3-5.3.1/lib/python3.7/site-packages (from MXFusion==0.3.0) (2.1)\n", + "Requirement already satisfied: numpy>=1.7 in /Users/erimeiss/.pyenv/versions/anaconda3-5.3.1/lib/python3.7/site-packages (from MXFusion==0.3.0) (1.16.3)\n", + "Requirement already satisfied: decorator>=4.1.0 in /Users/erimeiss/.pyenv/versions/anaconda3-5.3.1/lib/python3.7/site-packages (from networkx>=2.1->MXFusion==0.3.0) (4.3.0)\n", + "Installing collected packages: MXFusion\n", + " Found existing installation: MXFusion 0.3.0\n", + " Uninstalling MXFusion-0.3.0:\n", + " Successfully uninstalled MXFusion-0.3.0\n", + " Running setup.py develop for MXFusion\n", + "Successfully installed MXFusion\n" + ] + } + ], + "source": [ + "!pip install -e ../../" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, + "source": [ + "# MXFusion\n", + "## A Deep Modular Probabilistic Programming Library\n", + "\n", + "**Eric Meissner**\n", + "Zhenwen Dai\n", + "\n", + "@Amazon - Cambridge, UK" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, + "source": [ + "# Why another one?\n", + "\n", + "Existing libraries had either:\n", + "- Probabilistic modelling with rich, flexible models and universal inference or\n", + "- Specialized, efficient inference over a subset of models\n", + "\n", + "**We needed both**" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, + "source": [ + "# Key Requirements\n", + "- Integration with deep learning\n", + "- Flexiblility\n", + "- Scalability\n", + "- Specialized inference and models support\n" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "notes" + } + }, + "source": [ + "- Bayesian Deep Learning methods\n", + "- Rapid prototyping and software re-use\n", + "- GPUs, specialized inference methods" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "notes" + } + }, + "source": [ + "Modularity\n", + " - Specialized Inference\n", + " - Composability (tinkerability)\n", + " - Better leveraging of expert expertise\n", + "\n", + "Specialized inference methods + models, without requiring users to reimplement nor understand them every time.\n", + "Leverage expert knowledge. Efficient inference, flexible framework. \n", + "Existing frameworks either did one or the other: flexible, or efficient." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, + "source": [ + "# What does it look like?\n", + "\n", + "**Modelling**\n", + "\n", + "**Inference**" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, + "source": [ + "## Modelling \n", + "\n", + "### Directed Factor Graphs\n", + "* Variable\n", + "* Function\n", + "* Distribution\n", + "\n", + "### Example\n", + "\n", + "```python\n", + "m = Model()\n", + "m.mu = Variable()\n", + "m.s = Variable(transformation=PositiveTransformation())\n", + "m.Y = Normal.define_variable(mean=m.mu, variance=m.s)\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "notes" + } + }, + "source": [ + "* 3 primary components in modeling\n", + " * Variable\n", + " * Distribution\n", + " * Function \n", + "* 2 primary methods for models \n", + " * log_pdf\n", + " * draw_samples" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, + "source": [ + "## Inference\n", + "\n", + "### Two Classes\n", + "\n", + "* Variational Inference\n", + "* MCMC Sampling (*soon*)\n", + "\n", + "Built on MXNet Gluon (imperative code, not static graph)\n", + "\n", + "### Example\n", + "\n", + "```\n", + "infr = GradBasedInference(inference_algorithm=MAP(model=m, observed=[m.Y]))\n", + "infr.run(Y=data)\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "notes" + } + }, + "source": [ + "Primarily variational inference right now\n", + "\n", + "MCMC method coming soon\n", + "\n", + "```python\n", + "infr = GradBasedInference(inference_algorithm=MAP(model=m, observed=[m.Y]))\n", + "infr.run(Y=data)\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "notes" + } + }, + "source": [ + "## Modules\n", + " - Model + Inference together form building blocks.\n", + " - Just doing modular modeling with universal inference doesn't really scale, need specialized inference methods for specialized modelling objects like non-parametrics." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "notes" + } + }, + "source": [ + "# PILCO: A Model-based Policy Search\n", + "Common reinforcement learning methods suffer from data inefficiency, which can be a issue in real world applications where gathering sufficiently large amounts of data pose economic issues and may be impossible. propose a model-based policy search method known as PILCO in part to address this issue. PILCO uses a Gaussian process (GP) for learning the dynamics of the environment and optimizes a parametric policy function using the learned dynamics model.\n", + "\n", + "In this notebook, we demonstrate a straight-forward implementation of PILCO. This implementation follows the main idea of PILCO and has a few enhancements in addition to the published implementation. The enhancements are as follows: \n", + "- **Use Monte Carlo integration instead of moment estimation.** We approximate the expected reward using Monte Carlo integration instead of the proposed moment estimation approach. This removes the bias in the expected reward computation and enables a wide range of choices of kernels and policy functions. In the original work, only RBF and linear kernel and only linear and RBF network policy can be used.\n", + "- **Use automatic differentiation.** Thanks to automatic differentiation, no gradient derivation is needed.\n", + "- **An unified interface of Gaussian process.** MXFusion provides an unified inferface of GP modules. We allows us to easily switch among plan GP, variational sparse GP and stocastic variational GP implementations." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, + "source": [ + "# PILCO: A Model-based Policy Search\n", + "PILCO [1] is a model-based data-efficient algorithm that solves the RL problem by the following two step iterative process:\n", + "1. Fit a Gaussian Process that models the state dynamics, using calls to a simulator \n", + "2. Optimize a parametric policy using our GP instead of calling the simulator.\n", + "\n", + "## Enhancements MXFusion Brings\n", + "* Use Monte Carlo integration instead of moment estimation\n", + "* Use automatic differentiation\n", + "* A flexible interface for Gaussian processes, trivial to switch to sparse or stochastic variational \n", + "\n", + "[1] Deisenroth & Rasmussen (2011)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "skip" + } + }, + "source": [ + "## Preparation" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "skip" + } + }, + "source": [ + "This notebook depends on MXNet, MXFusion and Open AI Gym. These packages can be installed into your Python environment by running the following commands.\n", + "```bash\n", + "pip install mxnet mxfusion gym\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "skip" + } + }, + "source": [ + "Set the global configuration." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": { + "slideshow": { + "slide_type": "skip" + } + }, + "outputs": [], + "source": [ + "import os\n", + "os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'\n", + "from mxfusion.common import config\n", + "config.DEFAULT_DTYPE = 'float64'\n", + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "skip" + } + }, + "source": [ + "## Example: Pendulum\n", + "\n", + "\n", + "We use the inverted pendulum swingup problem as an example. We use the [Pendulum-v0](https://gym.openai.com/envs/Pendulum-v0/) enironment in Open AI Gym. The task is to swing the pendulum up and balance it at the inverted position. This is a classical control problem and is known to be unsolvable with a linear controller.\n", + "\n", + "To solve this problem with PILCO, it needs three components:\n", + "\n", + "- Execute a policy in an real environment (an Open AI Gym simulator in this example) and collect data.\n", + "- Fit a GP model as the model for the dynamics of the environment.\n", + "- Optimize the policy given the dynamics model learned from all the data that have been collected so far.\n", + "\n", + "The overall PILCO algorithm is to iterate the above three steps until a policy that can solve the problem is found." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "skip" + } + }, + "source": [ + "## Execute the Environment \n", + "\n", + "The Pendulum-v0 environment can be loaded easily." + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": { + "slideshow": { + "slide_type": "skip" + } + }, + "outputs": [], + "source": [ + "import gym\n", + "env = gym.make('Pendulum-v0')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "skip" + } + }, + "source": [ + "The state of the pendulum environment is a 3D vector. The first two dimensions are the 2D location of the end point of the pendulum. The third dimension encodes the angular speed of the pendulum. The action space is a 1D vector in [-2, 2]." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "skip" + } + }, + "source": [ + "We write a helper function for executing the environment with a given policy." + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": { + "slideshow": { + "slide_type": "skip" + } + }, + "outputs": [], + "source": [ + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "from matplotlib import animation\n", + "\n", + "def run_one_episode(env, policy, initial_state=None, max_steps=200, verbose=False, render=False):\n", + " \"\"\"\n", + " Drives an episode of the OpenAI gym environment using the policy to decide next actions.\n", + " \"\"\"\n", + " observation = env.reset()\n", + " if initial_state is not None:\n", + " env.env.state = initial_state\n", + " observation = env.env._get_obs()\n", + " env._max_episode_steps = max_steps\n", + " step_idx = 0\n", + " done = False\n", + " total_reward = 0\n", + " frames = []\n", + " all_actions = []\n", + " all_observations = [observation]\n", + " while not done:\n", + " if render:\n", + " frames.append(env.render(mode = 'rgb_array'))\n", + " if verbose:\n", + " print(observation)\n", + " action = policy(observation)\n", + " observation, reward, done, info = env.step(action)\n", + " all_observations.append(observation)\n", + " all_actions.append(action)\n", + " total_reward += reward\n", + " step_idx += 1\n", + " if done or step_idx>=max_steps-1:\n", + " print(\"Episode finished after {} timesteps because {}\".format(step_idx+1, \"'done' reached\" if done else \"Max timesteps reached\"))\n", + " break\n", + " if render:\n", + " fig = plt.figure()\n", + " ax = fig.gca()\n", + " fig.tight_layout()\n", + " patch = ax.imshow(frames[0])\n", + " ax.axis('off')\n", + " def animate(i):\n", + " patch.set_data(frames[i])\n", + " anim = animation.FuncAnimation(plt.gcf(), animate, frames = len(frames), interval=20)\n", + " return total_reward, np.array(all_observations, dtype=np.float64,), np.array(all_actions, dtype=np.float64), anim\n", + " else:\n", + " return total_reward, np.array(all_observations, dtype=np.float64,), np.array(all_actions, dtype=np.float64)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "skip" + } + }, + "source": [ + "Let's first apply a random policy and see how the environment reacts. The random policy uniformly samples in the space of action." + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": { + "slideshow": { + "slide_type": "skip" + } + }, + "outputs": [], + "source": [ + "def random_policy(state):\n", + " return env.action_space.sample()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "skip" + } + }, + "source": [ + "The animation is generated with the following commands:\n", + "```python\n", + "anim = run_one_episode(env, random_policy, max_steps=500, render=True, verbose=False)[-1]\n", + "\n", + "with open('animation_random_policy.html', 'w') as f:\n", + " f.write(anim.to_jshtml())\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, + "source": [ + "# Pendulum" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "metadata": { + "slideshow": { + "slide_type": "fragment" + } + }, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "
\n", + " \n", + "
\n", + " \n", + "
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " Once \n", + " Loop \n", + " Reflect \n", + "
\n", + "
\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "from IPython.display import HTML\n", + "\n", + "HTML(filename=\"pilco/animation_random_policy.html\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "notes" + } + }, + "source": [ + "## Fit the Dynamics Model\n", + "\n", + "The dynamics model of pendulum can be written as\n", + "$$p(y_{t+1}|y_t, a_t)$$\n", + "where $y_t$ is the state vector at the time $t$ and $a_t$ is the action taken at the time $t$. \n", + "\n", + "PILCO uses a Gaussian process to model the above dynamics." + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": { + "slideshow": { + "slide_type": "skip" + } + }, + "outputs": [], + "source": [ + "def prepare_data(state_list, action_list, win_in):\n", + " \"\"\"\n", + " Prepares a list of states and a list of actions as inputs to the Gaussian Process for training.\n", + " \"\"\"\n", + " \n", + " X_list = []\n", + " Y_list = []\n", + " \n", + " for state_array, action_array in zip(state_list, action_list):\n", + " # the state and action array shape should be aligned.\n", + " assert state_array.shape[0]-1 == action_array.shape[0]\n", + " \n", + " for i in range(state_array.shape[0]-win_in):\n", + " Y_list.append(state_array[i+win_in:i+win_in+1])\n", + " X_list.append(np.hstack([state_array[i:i+win_in].flatten(), action_array[i:i+win_in].flatten()]))\n", + " X = np.vstack(X_list)\n", + " Y = np.vstack(Y_list)\n", + " return X, Y" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "skip" + } + }, + "source": [ + "In this example, we do a maximum likelihood estimate for the model hyper-parameters. In MXFusion, Gaussian process regression model is available as a module, which includes a dediated inference algorithm." + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "metadata": { + "slideshow": { + "slide_type": "skip" + } + }, + "outputs": [], + "source": [ + "import mxnet as mx\n", + "from mxfusion import Model, Variable\n", + "from mxfusion.components.variables import PositiveTransformation\n", + "from mxfusion.components.distributions.gp.kernels import RBF\n", + "from mxfusion.modules.gp_modules import GPRegression\n", + "from mxfusion.inference import GradBasedInference, MAP" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, + "source": [ + "# Define and fit the model" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "metadata": { + "slideshow": { + "slide_type": "fragment" + } + }, + "outputs": [], + "source": [ + "def fit_model(state_list, action_list, win_in, verbose=True):\n", + " \"\"\"\n", + " Fits a Gaussian Process model to the state / action pairs passed in. \n", + " This creates a model of the environment which is used during\n", + " policy optimization instead of querying the environment directly.\n", + " \n", + " See mxfusion.gp_modules for additional types of GP models to fit,\n", + " including Sparse GP and Stochastic Varitional Inference Sparse GP.\n", + " \"\"\"\n", + " X, Y = prepare_data(state_list, action_list, win_in)\n", + "\n", + " m = Model()\n", + " m.N = Variable()\n", + " m.X = Variable(shape=(m.N, X.shape[-1]))\n", + " m.noise_var = Variable(shape=(1,), transformation=PositiveTransformation(),\n", + " initial_value=0.01)\n", + " m.kernel = RBF(input_dim=X.shape[-1], variance=1, lengthscale=1, ARD=True)\n", + " m.Y = GPRegression.define_variable(\n", + " X=m.X, kernel=m.kernel, noise_var=m.noise_var,\n", + " shape=(m.N, Y.shape[-1]))\n", + " m.Y.factor.gp_log_pdf.jitter = 1e-6\n", + "\n", + " infr = GradBasedInference(\n", + " inference_algorithm=MAP(model=m, observed=[m.X, m.Y]))\n", + " infr.run(X=mx.nd.array(X, dtype='float64'),\n", + " Y=mx.nd.array(Y, dtype='float64'),\n", + " max_iter=1000, learning_rate=0.1, verbose=verbose)\n", + " return m, infr, X, Y" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "notes" + } + }, + "source": [ + "### Policy Optimization\n", + "\n", + "PILCO computes the expected reward of a policy given the dynamics model. First, we need to define the parametric form of the policy. In this example, we use a neural network with one hidden layer. As the action space is [-2, 2], we apply a tanh transformation and multiply the come with two. This enforces the returned actions stay within the range." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, + "source": [ + "# Policy\n", + "\n", + "We define a neural network with one hidden layer and and output constrained between [-2,2] for the policy." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": { + "slideshow": { + "slide_type": "fragment" + } + }, + "outputs": [], + "source": [ + "from mxnet.gluon import HybridBlock\n", + "from mxnet.gluon.nn import Dense\n", + "\n", + "class NNController(HybridBlock):\n", + " def __init__(self, prefix=None, params=None):\n", + " super(NNController, self).__init__(prefix=prefix, params=params)\n", + " self.dense1 = Dense(100, in_units=len(env.observation_space.high), dtype='float64', activation='relu')\n", + " self.dense2 = Dense(1, in_units=100, dtype='float64', activation='tanh')\n", + " def hybrid_forward(self, F, x):\n", + " out = self.dense2(self.dense1(x))*2\n", + " return out \n", + " \n", + "policy = NNController()\n", + "policy.collect_params().initialize(mx.initializer.Xavier(magnitude=1))" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "skip" + } + }, + "source": [ + "To compute the expected reward, we also need to define a reward function. This reward function is defined by us according to the task. The main component is the height of the pendulum. We also penalize the force and the angular momentum. " + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": { + "slideshow": { + "slide_type": "skip" + } + }, + "outputs": [], + "source": [ + "class CostFunction(mx.gluon.HybridBlock):\n", + " \"\"\"\n", + " The goal is to get the pendulum upright and stable as quickly as possible.\n", + " Taken from the code for Pendulum.\n", + " \"\"\"\n", + " def hybrid_forward(self, F, state, action):\n", + " \"\"\"\n", + " :param state: [np.cos(theta), np.sin(theta), ~ momentum(theta)]\n", + " a -> 0 when pendulum is upright, largest when pendulum is hanging down completely.\n", + " b -> penalty for taking action\n", + " c -> penalty for pendulum momentum\n", + " \"\"\"\n", + " a_scale = 2.\n", + " b_scale = .001\n", + " c_scale = .1\n", + " a = F.sum(a_scale * (state[:,:,0:1] -1) ** 2, axis=-1)\n", + " b = F.sum(b_scale * action ** 2, axis=-1)\n", + " c = F.sum(c_scale * state[:,:,2:3] ** 2, axis=-1)\n", + " return (a + c + b)\n", + " \n", + "cost = CostFunction()" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "skip" + } + }, + "source": [ + "The expected reward function can be written as\n", + "$$R = \\mathbb{E}_{p(y_T, \\ldots, y_0)}\\left(\\sum_{t=0}^T r(y_t)\\right)$$\n", + "where $r(\\cdot)$ is the reward function. $p(y_T, \\ldots, y_0)$ is the joint distribution when applying the policy to the dynamics model:\n", + "$$p(y_T, \\ldots, y_0) = p(y_0) \\prod_{t=1}^T p(y_t|y_{t-1}, a_{t-1}),$$\n", + "where $a_{t-1} = \\pi(y_{t-1})$ is the action taken at the time $t-1$, which is the outcome of the policy $\\pi(\\cdot)$.\n", + "\n", + "The expected reward function is implemented as follows." + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "skip" + } + }, + "source": [ + "# Obtaining the policy gradients" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": { + "slideshow": { + "slide_type": "skip" + } + }, + "outputs": [], + "source": [ + "from mxfusion.inference.inference_alg import SamplingAlgorithm\n", + "\n", + "class PILCOAlgorithm(SamplingAlgorithm):\n", + "\n", + " def compute(self, F, variables):\n", + " s_0 = self.initial_state_generator(self.num_samples)\n", + " a_0 = self.policy(s_0)\n", + " a_t_plus_1 = a_0\n", + " x_t = F.expand_dims(F.concat(s_0, a_0, dim=1), axis=1)\n", + " cost = 0\n", + " for t in range(self.n_time_steps):\n", + " variables[self.model.X] = x_t\n", + " res = self.model.Y.factor.predict(F, variables, targets=[self.model.Y], num_samples=self.num_samples)[0]\n", + " s_t_plus_1 = res[0]\n", + "\n", + " cost = cost + self.cost_function(s_t_plus_1, a_t_plus_1)\n", + "\n", + " a_t_plus_1 = mx.nd.expand_dims(self.policy(s_t_plus_1), axis=2)\n", + " x_t = mx.nd.concat(s_t_plus_1, a_t_plus_1, dim=2)\n", + " total_cost = F.sum(cost)\n", + " return total_cost, total_cost" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "skip" + } + }, + "source": [ + "We optimize the policy with respect to the expected reward by using a gradient optimizer." + ] + }, + { + "cell_type": "code", + "execution_count": 13, + "metadata": { + "slideshow": { + "slide_type": "skip" + } + }, + "outputs": [], + "source": [ + "from mxfusion.inference import GradTransferInference\n", + "from mxfusion.inference.pilco_alg import PILCOAlgorithm\n", + "\n", + "def optimize_policy(policy, cost_func, model, infr, model_data_X, model_data_Y,\n", + " initial_state_generator, num_grad_steps,\n", + " learning_rate=1e-2, num_time_steps=100, \n", + " num_samples=10, verbose=True):\n", + " \"\"\"\n", + " Takes as primary inputs a policy, cost function, and trained model.\n", + " Optimizes the policy for num_grad_steps number of iterations.\n", + " \"\"\"\n", + " mb_alg = PILCOAlgorithm(\n", + " model=model, observed=[model.X, model.Y], cost_function=cost_func,\n", + " policy=policy, n_time_steps=num_time_steps,\n", + " initial_state_generator=initial_state_generator,\n", + " num_samples=num_samples)\n", + "\n", + " infr_pred = GradTransferInference(\n", + " mb_alg, infr_params=infr.params, train_params=policy.collect_params())\n", + " infr_pred.run(\n", + " max_iter=num_grad_steps,\n", + " X=mx.nd.array(model_data_X, dtype='float64'),\n", + " Y=mx.nd.array(model_data_Y, dtype='float64'),\n", + " verbose=verbose, learning_rate=learning_rate)\n", + " return policy" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "skip" + } + }, + "source": [ + "## The Loop\n", + "\n", + "We need to define a function that provides random initial states." + ] + }, + { + "cell_type": "code", + "execution_count": 14, + "metadata": { + "slideshow": { + "slide_type": "skip" + } + }, + "outputs": [], + "source": [ + "def initial_state_generator(num_initial_states):\n", + " \"\"\"\n", + " Starts from valid states by drawing theta and momentum\n", + " then computing np.cos(theta) and np.sin(theta) for state[0:2].s\n", + " \"\"\"\n", + " return mx.nd.array(\n", + " [env.observation_space.sample() for i in range(num_initial_states)],\n", + " dtype='float64')" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, + "source": [ + "# The Loop" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": { + "slideshow": { + "slide_type": "skip" + } + }, + "outputs": [], + "source": [ + "num_episode = 20 # how many model fit + policy optimization episodes to run\n", + "num_samples = 100 # how many sample trajectories the policy optimization loop uses\n", + "num_grad_steps = 1000 # how many gradient steps the optimizer takes per episode\n", + "num_time_steps = 100 # how far to roll out each sample trajectory\n", + "learning_rate = 1e-3 # learning rate for the policy optimization\n", + "\n", + "all_states = []\n", + "all_actions = []" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": { + "slideshow": { + "slide_type": "fragment" + } + }, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Episode finished after 100 timesteps because Max timesteps reached\n", + "\r", + "Iteration 1 loss: -147.44275894783664\t\t\t\t\r", + "Iteration 2 loss: -175.90537081675882\t\t\t\t\r", + "Iteration 3 loss: -203.08783994173928\t\t\t\t\r", + "Iteration 4 loss: -228.99512346183315\t\t\t\t\r", + "Iteration 5 loss: -253.65195491239757\t\t\t\t\r", + "Iteration 6 loss: -277.0934209762746\t\t\t\t\r", + "Iteration 7 loss: -299.3600581570322\t\t\t\t\r", + "Iteration 8 loss: -320.49807202924745\t\t\t\t\r", + "Iteration 9 loss: -340.56099060736454\t\t\t\t\r", + "Iteration 10 loss: -359.61034033457696\t\t\t\t\r", + "Iteration 11 loss: -377.71565978816744\t\t\t\t\r", + "Iteration 12 loss: -394.9550799167229\t\t\t\t\r", + "Iteration 13 loss: -411.41695470306104\t\t\t\t\r", + "Iteration 14 loss: -427.20182567104433\t\t\t\t\r", + "Iteration 15 loss: -442.42316993901187\t\t\t\t\r", + "Iteration 16 loss: -457.20516453335017\t\t\t\t\r", + "Iteration 17 loss: -471.67636880459304\t\t\t\t\r", + "Iteration 18 loss: -485.9597423546608\t\t\t\t\r", + "Iteration 19 loss: -500.16127762489947\t\t\t\t\r", + "Iteration 20 loss: -514.360528573828\t\t\t\t\r", + "Iteration 21 loss: -528.6057260995374\t\t\t\t\r", + "Iteration 22 loss: -542.9143027944718\t\t\t\t\r", + "Iteration 23 loss: -557.2778368461522\t\t\t\t\r", + "Iteration 24 loss: -571.6694615828881\t\t\t\t" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/Users/erimeiss/workspace/mxfusion/mxfusion/inference/inference_parameters.py:71: UserWarning: InferenceParameters has already been initialized. The existing one will be overwritten.\n", + " warnings.warn(\"InferenceParameters has already been initialized. The existing one will be overwritten.\")\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Iteration 100 loss: -1365.1946754085056\t\t\t\t\n", + "Iteration 200 loss: -1469.3300788900378\t\t\t\t\n", + "Iteration 300 loss: -1491.5593095277768\t\t\t\t\n", + "Iteration 400 loss: -1503.8953879089897\t\t\t\t\n", + "Iteration 500 loss: -1512.5250343137307\t\t\t\t\n", + "Iteration 600 loss: -1519.2204316627003\t\t\t\t\n", + "Iteration 700 loss: -1524.6930473364855\t\t\t\t\n", + "Iteration 800 loss: -1529.294080558935\t\t\t\t\t\n", + "Iteration 900 loss: -1533.229205462882\t\t\t\t\t\n", + "Iteration 1000 loss: -1536.6377727466997\t\t\t\t\n", + "Iteration 2 loss: 7108711.122901848\t\t\t\t" + ] + }, + { + "ename": "KeyboardInterrupt", + "evalue": "", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mKeyboardInterrupt\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m()\u001b[0m\n\u001b[1;32m 19\u001b[0m \u001b[0minitial_state_generator\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnum_grad_steps\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnum_grad_steps\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 20\u001b[0m \u001b[0mnum_samples\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnum_samples\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mlearning_rate\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mlearning_rate\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 21\u001b[0;31m num_time_steps=num_time_steps)\n\u001b[0m", + "\u001b[0;32m\u001b[0m in \u001b[0;36moptimize_policy\u001b[0;34m(policy, cost_func, model, infr, model_data_X, model_data_Y, initial_state_generator, num_grad_steps, learning_rate, num_time_steps, num_samples, verbose)\u001b[0m\n\u001b[1;32m 22\u001b[0m \u001b[0mX\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0marray\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel_data_X\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'float64'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 23\u001b[0m \u001b[0mY\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0marray\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmodel_data_Y\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m'float64'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 24\u001b[0;31m verbose=verbose, learning_rate=learning_rate)\n\u001b[0m\u001b[1;32m 25\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mpolicy\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/workspace/mxfusion/mxfusion/inference/grad_based_inference.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, optimizer, learning_rate, max_iter, verbose, **kwargs)\u001b[0m\n\u001b[1;32m 102\u001b[0m \u001b[0minfr_executor\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0minfr\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mparam_dict\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mparams\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mparam_dict\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 103\u001b[0m \u001b[0mctx\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmxnet_context\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0moptimizer\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0moptimizer\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 104\u001b[0;31m learning_rate=learning_rate, max_iter=max_iter, verbose=verbose)\n\u001b[0m\u001b[1;32m 105\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 106\u001b[0m \u001b[0;32mclass\u001b[0m \u001b[0mGradTransferInference\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mGradBasedInference\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/workspace/mxfusion/mxfusion/inference/batch_loop.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, infr_executor, data, param_dict, ctx, optimizer, learning_rate, max_iter, n_prints, verbose)\u001b[0m\n\u001b[1;32m 51\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmax_iter\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 52\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mmx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mautograd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrecord\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 53\u001b[0;31m \u001b[0mloss\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mloss_for_gradient\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0minfr_executor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmx\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mzeros\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mctx\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mctx\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 54\u001b[0m \u001b[0mloss_for_gradient\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 55\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.pyenv/versions/anaconda3-5.3.1/lib/python3.7/site-packages/mxnet/gluon/block.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, *args)\u001b[0m\n\u001b[1;32m 538\u001b[0m \u001b[0mhook\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 539\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 540\u001b[0;31m \u001b[0mout\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mforward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 541\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 542\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mhook\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_forward_hooks\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalues\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.pyenv/versions/anaconda3-5.3.1/lib/python3.7/site-packages/mxnet/gluon/block.py\u001b[0m in \u001b[0;36mforward\u001b[0;34m(self, x, *args)\u001b[0m\n\u001b[1;32m 915\u001b[0m \u001b[0mparams\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0;34m{\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mj\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mctx\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mj\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_reg_params\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mitems\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m}\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 916\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 917\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mhybrid_forward\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mndarray\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mparams\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 918\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 919\u001b[0m \u001b[0;32massert\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mx\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mSymbol\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;31m \u001b[0m\u001b[0;31m\\\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/workspace/mxfusion/mxfusion/inference/inference_alg.py\u001b[0m in \u001b[0;36mhybrid_forward\u001b[0;34m(self, F, x, *args, **kw)\u001b[0m\n\u001b[1;32m 81\u001b[0m \u001b[0madd_sample_dimension_to_arrays\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mF\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkw\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mout\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mvariables\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 82\u001b[0m \u001b[0madd_sample_dimension_to_arrays\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mF\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_constants\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mout\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mvariables\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 83\u001b[0;31m \u001b[0mobj\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_infr_method\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcompute\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mF\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mF\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvariables\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mvariables\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 84\u001b[0m \u001b[0;32mwith\u001b[0m \u001b[0mautograd\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpause\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 85\u001b[0m \u001b[0;31m# An inference algorithm may directly set the value of a parameter instead of computing its gradient.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/workspace/mxfusion/mxfusion/inference/pilco_alg.py\u001b[0m in \u001b[0;36mcompute\u001b[0;34m(self, F, variables)\u001b[0m\n\u001b[1;32m 82\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mt\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mn_time_steps\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 83\u001b[0m \u001b[0mvariables\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mX\u001b[0m\u001b[0;34m]\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mx_t\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 84\u001b[0;31m \u001b[0mres\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mY\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mfactor\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mpredict\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mF\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvariables\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtargets\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmodel\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mY\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mnum_samples\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnum_samples\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 85\u001b[0m \u001b[0ms_t_plus_1\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mres\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 86\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/workspace/mxfusion/mxfusion/modules/module.py\u001b[0m in \u001b[0;36mpredict\u001b[0;34m(self, F, variables, num_samples, targets)\u001b[0m\n\u001b[1;32m 362\u001b[0m \u001b[0malg\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnum_samples\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnum_samples\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 363\u001b[0m \u001b[0malg\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mtarget_variables\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtargets\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 364\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0malg\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcompute\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mF\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvariables\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 365\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 366\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m_get_algorithm_for_target_conditional_pair\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0malgorithms\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtargets\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvariables\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mexact_match\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;32mFalse\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/workspace/mxfusion/mxfusion/modules/gp_modules/gp_regression.py\u001b[0m in \u001b[0;36mcompute\u001b[0;34m(self, F, variables)\u001b[0m\n\u001b[1;32m 167\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 168\u001b[0m X, noise_var, X_cond, L, LinvY, kern_params = arrays_as_samples(\n\u001b[0;32m--> 169\u001b[0;31m F, [X, noise_var, X_cond, L, LinvY, kern_params])\n\u001b[0m\u001b[1;32m 170\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 171\u001b[0m \u001b[0mKxt\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mkern\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mK\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mF\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mX_cond\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mX\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkern_params\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/workspace/mxfusion/mxfusion/components/variables/runtime_variable.py\u001b[0m in \u001b[0;36marrays_as_samples\u001b[0;34m(F, arrays)\u001b[0m\n\u001b[1;32m 114\u001b[0m \u001b[0mmax_num_samples\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmax\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnum_samples\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mmax_num_samples\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 116\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0mk\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mas_samples\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mF\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mv\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmax_num_samples\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mk\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mv\u001b[0m \u001b[0;32min\u001b[0m \u001b[0ma\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mitems\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m}\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ma\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdict\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32melse\u001b[0m \u001b[0mas_samples\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mF\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0ma\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmax_num_samples\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0ma\u001b[0m \u001b[0;32min\u001b[0m \u001b[0marrays\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 117\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 118\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0marrays\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/workspace/mxfusion/mxfusion/components/variables/runtime_variable.py\u001b[0m in \u001b[0;36m\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 114\u001b[0m \u001b[0mmax_num_samples\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmax\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mnum_samples\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 115\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mmax_num_samples\u001b[0m \u001b[0;34m>\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 116\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m{\u001b[0m\u001b[0mk\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mas_samples\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mF\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mv\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmax_num_samples\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mk\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mv\u001b[0m \u001b[0;32min\u001b[0m \u001b[0ma\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mitems\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m}\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ma\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdict\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32melse\u001b[0m \u001b[0mas_samples\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mF\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0ma\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmax_num_samples\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0ma\u001b[0m \u001b[0;32min\u001b[0m \u001b[0marrays\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 117\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 118\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0marrays\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/workspace/mxfusion/mxfusion/components/variables/runtime_variable.py\u001b[0m in \u001b[0;36mas_samples\u001b[0;34m(F, array, num_samples)\u001b[0m\n\u001b[1;32m 97\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0marray\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 98\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 99\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mF\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mbroadcast_axis\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0marray\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0maxis\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0msize\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mnum_samples\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 100\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 101\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m~/.pyenv/versions/anaconda3-5.3.1/lib/python3.7/site-packages/mxnet/ndarray/register.py\u001b[0m in \u001b[0;36mbroadcast_axis\u001b[0;34m(data, axis, size, out, name, **kwargs)\u001b[0m\n", + "\u001b[0;32m~/.pyenv/versions/anaconda3-5.3.1/lib/python3.7/site-packages/mxnet/_ctypes/ndarray.py\u001b[0m in \u001b[0;36m_imperative_invoke\u001b[0;34m(handle, ndargs, keys, vals, out)\u001b[0m\n\u001b[1;32m 90\u001b[0m \u001b[0mc_str_array\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mkeys\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 91\u001b[0m \u001b[0mc_str_array\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0ms\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0ms\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mvals\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 92\u001b[0;31m ctypes.byref(out_stypes)))\n\u001b[0m\u001b[1;32m 93\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 94\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0moriginal_output\u001b[0m \u001b[0;32mis\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mKeyboardInterrupt\u001b[0m: " + ] + } + ], + "source": [ + "for i_ep in range(num_episode):\n", + " # Run an episode and collect data.\n", + " if i_ep == 0:\n", + " policy_func = lambda x: env.action_space.sample()\n", + " else:\n", + " policy_func = lambda x: policy(mx.nd.expand_dims(mx.nd.array(x, dtype='float64'), axis=0)).asnumpy()[0]\n", + " total_reward, states, actions = run_one_episode(\n", + " env, policy_func, max_steps=num_time_steps)\n", + " all_states.append(states)\n", + " all_actions.append(actions)\n", + "\n", + " # Fit a model.\n", + " model, infr, model_data_X, model_data_Y = fit_model(\n", + " all_states, all_actions, win_in=1, verbose=True)\n", + "\n", + " # Optimize the policy.\n", + " policy = optimize_policy(\n", + " policy, cost, model, infr, model_data_X, model_data_Y,\n", + " initial_state_generator, num_grad_steps=num_grad_steps,\n", + " num_samples=num_samples, learning_rate=learning_rate,\n", + " num_time_steps=num_time_steps)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, + "source": [ + "Policy after the first episode (random exploration):" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": { + "slideshow": { + "slide_type": "fragment" + } + }, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "
\n", + " \n", + "
\n", + " \n", + "
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " Once \n", + " Loop \n", + " Reflect \n", + "
\n", + "
\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 17, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "HTML(filename=\"pilco/animation_policy_iter_0.html\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, + "source": [ + "Policy after the 5th episode:" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": { + "slideshow": { + "slide_type": "fragment" + } + }, + "outputs": [ + { + "data": { + "text/html": [ + "\n", + "\n", + "\n", + "\n", + "
\n", + " \n", + "
\n", + " \n", + "
\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
\n", + " Once \n", + " Loop \n", + " Reflect \n", + "
\n", + "
\n", + "\n", + "\n", + "\n" + ], + "text/plain": [ + "" + ] + }, + "execution_count": 18, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "HTML(filename=\"pilco/animation_policy_iter_4.html\")" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "slide" + } + }, + "source": [ + "# Conclusion\n", + "\n", + "* Modular probabilistic programming library\n", + "* Flexibly pair specialized models/inference algorithms with a wide range of probabilistic models\n", + "\n", + "## Join us!\n", + "\n", + "[github.com/amzn/mxfusion](github.com/amzn/mxfusion)" + ] + }, + { + "cell_type": "markdown", + "metadata": { + "slideshow": { + "slide_type": "fragment" + } + }, + "source": [ + "## Reference\n", + "\n", + "M. P. Deisenroth, C. E. Rasmussen. 2011. \"PILCO: A Model-Based and Data-Efficient Approach to Policy Search\". _in\n", + "Proceedings of the 28th International Conference on Machine Learning._ [http://mlg.eng.cam.ac.uk/pub/pdf/DeiRas11.pdf](http://mlg.eng.cam.ac.uk/pub/pdf/DeiRas11.pdf)\n" + ] + } + ], + "metadata": { + "celltoolbar": "Slideshow", + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.0" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/notebooks/svgp_regression.ipynb b/examples/notebooks/svgp_regression.ipynb new file mode 100644 index 0000000..46f51c1 --- /dev/null +++ b/examples/notebooks/svgp_regression.ipynb @@ -0,0 +1,383 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Stochastic Variational Gaussian Process Regression\n", + "\n", + "**Zhenwen Dai (2019-05-29)**" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Introduction\n", + "\n", + "Gaussian process (GP) is computationally expensive. A popular approach to scale up GP regression on large data is to use stochastic variational inference with mini-batch training (Hensman et al., 2013). SVGP regression with Gaussian noise has been implemented as a module in MXFusion." + ] + }, + { + "cell_type": "code", + "execution_count": 32, + "metadata": {}, + "outputs": [], + "source": [ + "import warnings\n", + "warnings.filterwarnings('ignore')\n", + "import os\n", + "os.environ['MXNET_ENGINE_TYPE'] = 'NaiveEngine'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Toy data\n", + "\n", + "We generate some synthetic data for our regression example. The data set is generate from a sine function with some additive Gaussian noise. " + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "%matplotlib inline\n", + "from pylab import *\n", + "\n", + "np.random.seed(0)\n", + "X = np.random.uniform(-3.,3.,(1000,1))\n", + "Y = np.sin(X) + np.random.randn(1000,1)*0.05" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The generated data are visualized as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAX8AAAD8CAYAAACfF6SlAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAIABJREFUeJztnXt0lNW5/797LklIBERBEVDhICDKnQStrbUjROUqYMG0JagtB4i/s2wXKkJpQMlSNBrWr/zOORy09YZ4AlqgQLEKZCycQ7EJ3hARAUEFpFwERSAkM9m/P77Zvu9MZpIhyWRuz2etd73zXmZmz0zy7Gc/V6W1hiAIgpBaOGI9AEEQBKHlEeEvCIKQgojwFwRBSEFE+AuCIKQgIvwFQRBSEBH+giAIKYgIf0EQhBREhL8gCEIKIsJfEAQhBXHFegDhaN++ve7atWushyEIgpBQbN++/bjWukND98Wt8O/atSsqKipiPQxBEISEQin1eST3idlHEAQhBRHhLwiCkIKI8BcEQUhB4tbmH4rq6mocPHgQlZWVsR5KSpCRkYEuXbrA7XbHeiiCIDQzCSX8Dx48iNatW6Nr165QSsV6OEmN1honTpzAwYMH0a1bt1gPRxCEZiahzD6VlZW49NJLRfC3AEopXHrppbLKEhpPcTHg9Qae83p5Xog5CSX8AYjgb0HkuxYapLgYmDYtUMh7vTy3bx8wcaJ1zevlcU5ObMYqBJBQZh9BEOKI4mLA5QKWLwdKS4G8PMDpBF54AUhLA1avBnr1Am6/ndfeeANYsQLweDgRlJcDM2fG+lOkLAmn+ccTjz76KJ555pl671m9ejU+/vjjqI7j8OHD+OlPf9rgfU888URUxyGkGDk5wIIFwM03A34/hf7ixbw2bx4waxYwfz7wk58AS5cCw4fz2rRpgSsAMQ/FhOQV/nHyB9USwr9Tp054/fXXG7xPhL/QrHg81OS3bAHOnweqq3m+pgYoLAR27gR8PmDzZiA315oASkuB2bOp+ZvVg908NHo0MGJEoHlo2jRudmSCaBLJK/xzcqJib3z88cfRq1cvDBs2DLt37/7+/HPPPYecnBz0798fd911F86ePYutW7dizZo1ePjhhzFgwADs27cv5H3BPProo8jPz8ett96KHj164LnnngPACJyHH34Yffr0Qd++fbF8+XIAwIEDB9CnTx8AwIsvvojx48fjjjvuQI8ePTCzdlk9a9YsnDt3DgMGDMAvfvELnDlzBiNHjkT//v3Rp0+f719LEC4IjwcYM4ZCHqAgr6oCzp4Fxo+nGcjlAv73fwG3m5PE5ZdzRZCTY60eJkwARo0CJk8G/vIXwGETTV4vJ4zly8V/0JxoreNyGzx4sA7m448/rnOuXsrKtG7fXuvCQu7Lyi7s+UFUVFToPn366DNnzuhvvvlGd+/eXT/99NNaa62PHz/+/X1z5szRixYt0lprfc899+jXXnvt+2vh7rMzb9483a9fP3327Fl97Ngx3aVLF33o0CH9+uuv62HDhmmfz6ePHDmir7zySn348GG9f/9+ff3112uttX7hhRd0t27d9KlTp/S5c+f0VVddpb/44guttdZZWVnfv8frr7+up0yZ8v3xqVOnQn7mC/7OheTlqafq/g8VFGgNaJ2WprXLxceA1unpWmdk8PrQodb5vn25d7m0Linha5SUaK2U1u3a8dqgQVqPGqV127Z8btu2vGfUKK2zsrTOz+e+if/PyQqACh2BjE1ezR+gVlJQABQVce/xNOnltmzZgnHjxiEzMxNt2rTBmDFjvr/20Ucf4eabb0bfvn2xbNky7Ny5M+RrRHrfnXfeiVatWqF9+/bweDz4xz/+gf/5n//Bz372MzidTlx++eW45ZZbUF5eXue5Q4cORdu2bZGRkYHrrrsOn39et85T3759sXHjRjzyyCPYsmUL2rZt28hvRUgZglfTvXvTxn/ttdTeXbb4kW7dgK5def1//xdo1QpITwd27ACuuYYrhQcfBAYPBubOpaZ/8iTQty+wezfw1lvA6dPApk3AuXM0E3m9fK+lS7mqAMTs0wSaRfgrpZ5XSh1VSn0U5rpSSi1SSu1VSn2olBrUHO/bIF4v//gKC7kP9gE0gnDhj/feey/+/d//HTt27MC8efPCxsdHel/w+yilwEm9YdLT079/7HQ64TNLchs9e/bE9u3b0bdvX8yePRvz58+P6LWFFCLYb+bxUAiPHk2BvW8fz3/6KfDIIzTxGPbuBT75xDoeOpS6P8Dn9e5Ngf/uu8CZM3QYd+4M7NnD+xwO+g4AmpGMKWn7dk4Ya9bQTLRvn9j+G0lzaf4vArijnuvDAfSo3aYCWNxM7xseYxNcsYL2xRUrArWWRvDjH/8Yq1atwrlz53D69GmsXbv2+2unT5/GFVdcgerqaixbtuz7861bt8bp06cbvC+YP//5z6isrMSJEyfw9ttvIycnBz/+8Y+xfPly+P1+HDt2DJs3b8aQIUMiHr/b7UZ1rVPu8OHDyMzMxKRJk/DQQw/h3XffvZCvQkhmjNC3a/omdn/BAmrdRUUM3ywpoZD2+SjEAUApywcAAJWVwJtvWsIfAHbtsoS74cgR+gTM1qZN4HXz/F27gO++42TgdFq2f5kELohmifPXWm9WSnWt55Y7Abxca4/appS6WCl1hdb6q+Z4/5CUl1sxxYAVmVBe3mjzz6BBg3D33XdjwIABuPrqq3HzzTd/f62oqAg33HADrr76avTt2/d7gZ+Xl4d//dd/xaJFi/D666+HvS+YIUOGYOTIkfjiiy9QWFiITp06Ydy4cfj73/+O/v37QymF4uJidOzYEQcOHIho/FOnTkW/fv0waNAgTJ48GQ8//DAcDgfcbjcWL47+fCwkCEbojx9PTd+EaKanA9ddx/8jE71z8iRNPPv3W8+3C3mnk1q9iQQqKGBIaKgVr9/PicPsv/02cAVgMEESbjfw/PPMHwAsZU+IjEgcA5FsALoC+CjMtXUAfmQ73gQgu77XaxaHb4Iyb9687x3JsSZVvnMhiLIyrdu00TozU2u3m47YwYO5z8jgNmaM5cgNt6Wn05lrjgcN0trpbPh5QODzgs85HNy73c0W0JEsIM4cvqEM5XUM2EqpqUqpCqVUxbFjx1pgWIIghMTj4Qrg7Flq7d260d7evr1lg1+3ruHXOX+eYrtjRx4b82IkpUPsK4guXaxzgwdbqwG/v9kCOlKNlirvcBDAlbbjLgAOB9+ktX4WwLMAkJ2dHZl3Mwl59NFHYz0EIdWZNo1ROob9+4HWrYHjxxv3eqdOWSYcvx/IyAg0B9lRKlDwt2sHHDxoHZsJxLye2w0sWkThLxNAxLSU5r8GwOTaqJ8bAXyjG2nv1xFGvAhNR77rFCG4OJvXC7zyCgVzVpZ1Xxj/VB3sWr2JAKqsZNbujTcCHTpw5RB8ryH47+7kSetxWhqvZ2RQ8LdtC0yZQufw7bdbjmnJBm6Q5gr1/G8AfwfQSyl1UCn1K6XUdKXU9Npb1gP4DMBeAM8BuL8x75ORkYETJ06IUGoBtGY9/4yMjFgPRYg2OTnMnh071sqmVYpx+yaCpyEcDmrggCW8jbPXXJsxA/j734GjR+lE9vt5j8nmtWf1tmkTGDqqFI/NpGEcxrfcwjDuL7/kJHD//XQol5Zak9nChQwLlWzgAFS8CtLs7GxdUVERcE46ebUs0skrhZg2DVi2zArbBKj1O53AiRONe0232zLrjBkDbN1qRdy5XAzBrq7mJHD+vCX8jT0/PZ2Pq6vrmoKCcbmAfv1oEkpLA371K+C11zjJvPIKMH06k85SoIqoUmq71jq7ofsSqqSz2+2WrlKCEA3ef59atRHWDgft9ED9grdzZ+DQIevYaOg+H18rPZ3njhyxBH9ODhPF5s8H1q9nFq/LxecMGsTksMpKTghZWTz3zjv1j9/vp+DPzwcGDAAeeojCfulSZhS/+iqwapV1v5SUTvLyDoIgWISrdDtiBKNx7ILfHltfn8ZtF/zmXvtzf/QjCvidO4H33qOw9XiAtWuZJfy3v3GC8PmYO/Dee4zeMY7bmhpGGNmy1tGzZ91xaM0JZM0aYOBAThj793P1sXdvYNLZ/ffTP5DiZiAR/oKQKgTX5pk2DRg3jhrymjWW8zU4qSoU4UI1leLzHQ4gM5PaNUAtv7DQeu/33qM/wefjvSUljC6aPh147DGgooL3p6VZqwmA9376aej3drkYmnrrrQxL7dbNmtAqK7nauO02+gimTOH5FHYCi/AXhFTACLkVKygEJ0+mjb+ykoLyyivr1/Dt1GcG0tqaAIqK2M1r4kRq4+vW8fHcucCcOTTpDB1KAW+um+zeu+/mhPGjH/H8rbdSmw83Md1wA3DZZZaw79yZ0Un5+Tz2+znZbNjAFcaECSlfElqEvyCkAkbrB1i2YelSCsonngA++ojRMpFiF/xut7UKcDiooWtNAbtgAc/by6qYKrsOB00/GzfSFm/G1r07j5cs4XHnzla46d691gpAKU4a5ri8HPjiC77uNdcAhw+zNMV99wEjR1pjVIorjOHDGXZqx4SJpspqIJI04Fhsoco7CILQBOwlG9LSWCohP5/HpnxCVlZkpRfM5nTyOb17s+RCVpZVy7+khD0A7O/fvj1r9LdpE1iOoaws8F47pt5/bq5V2iE9ne+VkaF1x46B4x81ilubNjy+9lpeM/0C7P0GMjL43mVl7BsQPK4EBHFW3kEQhHjA56Nd/JFHGBWzdClNPw4Ht0ji+t1uatOZmXyO1rSht24N/OIXLLQ2eTI1f2NWsVfZ3bjRMgcZH4DHEz7yxucDJk2iySY/nyaj8+epud92GyOJ2rXjvabsxFdf8bOcO8fooZtu4mMTtmwyjZ1OfpZRo/g5Vq9OnSzhSGaIWGyi+QtCMzNkCAuh5edT801LszThK6+sX8O3F1TLzLS05alTuQ0fXldjtmvzobqA1aftB99nOvIZ7byw0FqlmAJzPXuGHnvXruwcZgrUmX1mJgvNmfuGDm2e7znGIELNP6GSvARBaCReLzN4/X6KuqoqK9Im2IlqkrPat2dphawsllfu3JlaMgCsXBlYMj2a47aXah47lnb7VauAWbMYQrp2LVcZGzZw9RGqDEV6OlcLBuMDMDkJAD/3m28mvOYfaZKXmH0EIdkpLma5g9WraZbx+SynbU0N6+TYqamhY3b8eF779ltW0jx5kg1cliyxnLjRxt6Xo7ycn2HVKj5+5x0K/tJSho5260bBb3fuGoIFv9H3fT4rlFQpq8RFCiCavyAkG8XFtLUbDdZo/Xl53IYPDxSGdpTiauAnP2GsvdbAvHkUkiZiqCU0/kgxK4PZs+kLqKri5HXJJcDXX0f+OllZDC0tK+Pz16+P3pijTFKWdxAEoR6M0LcLacAq1FZaSo3ZCP7geP2OHek89ftZcmHoUApUu6BvYje8Zqe8nIJ/wQJg2DBg82YWhTt4kD0A7KWg68M+URYVWY9NOYpkLAURiWMgFps4fAXhAjGOUeOMzcigY7dtWx4PHGg5N01XruDN4eA2dGjidMd66imGg7Zvz31mptY33VTXUV2fQ9tcLyiwXse+T4TvoRZIqKcgpBgeD+30w4ezrHF1NbX8yy4Dfvtb4IMPeJ9SzOo12G3+NTUMe9y4kVq+PRwzXpk5k2apFSu4X7eOiVw9elC0t2nTcMkKU0Po1VeZ7fzQQ/weFyyILzNXMyI2f0FIJrxemj9Mhyu/P1DwmRr7hsxMxv0bE5DDAWRnW1U0E7X6pdfLSeyHP2QUkNtNs1a4TGaT56A1v5+bbwa2bGF9ofnzW3bsTUSifQQhFQiu1Flaaj2urg5skAJYyVAABf7Zs3ysNROoLroI2L07suSreMU4gdet40Q4ahTQqlXgPRddFHh8ySVcNfj9LE2xZQvQqROLwMX7yqeRiPAXhETEtF7cvNkKT1y4EPjjH6npZ2byPnspYyCwB6/RdgEK/jfeYGTP3Xe3TBhntLCHh86cyXDQ1at5LSODn/m77wKfY76X3r2Bt9/m43/+k3WCRo9OyglAon0EIRHJyQEef9wy4dx+u2XisWv04ejbF9ixg48LCljW+b774i+UszGEW6l8801ga0igbpLbrl3cm/aS77/P79lMholoAguDaP6CkIh4PNRm7V2zampY094VgU63YwcdopmZnDRMk5WWSt5qacrLuaoB+D0Zs4+ZLIMxCWCPP87ewyZ8NolKQIvwF4REpbwcuPPOwIStYDOPHYeDvXQBOkC//JIx7StXJraNPxJmzqQvw+mkictu9tGa5aHt1NTQX/Db33KfDCuiIET4C0Ki4nKxKqfBmDSqqync7SaOrCwK/44d2TXrvvtYedOESCajth9M9+7U/t94g81j7FRV1b1/wwZOAps2Af37W4I/XDvMROsDEEkyQCw2SfISBBvBVTHLygJr73fpUn8Sk9udkAlLzYpJgispYeKbqe4ZSc8Cl8v63uzJdKGOYwwkyUsQkojg/ruzZrE+/WWXUaMPV8YgN5eavs/H56aKlh8KEwXk8zGiyfTxrQ+Hgz6RwYOZ9HXVVZZvxLSkTFSTUCQzRCw20fwFIQh7Xfv09MAa9aG01fx8rg7KyqjtDh8e608QP5iVk+kFEFzmwb6ZTmUA96YHQX4+zxUWxvazBAHR/AUhyXj6adqqi4qA665jlIppWB6My0W7/tq1VgP1BK5U2eyUlzNz9403eNy3L/c1NcD11wfeW1XFEFCXi07jzZuZU/HKK3Qe//73zLlIMET4C0KicPIknZDXXMOm6/VF9tx+u9UUPZVNPeGYOZNmMJ+PZrFJk6yG7jt3Bt5rMqV9Pqvc84MPsrHNfffRLPTSSwmXCCbCXxASAa8X+PhjZqju3Wt15AqFw8HSzPbyy8kYvtlU/H7gmWcYxz9zJvD3vzPD12AvCWFPBDPfe7t2wLhxnBS6dUu4CVYyfAUhESgvB669lo7H556rX+tPS2NzdqF+QpnBWrdmmGzbtiz54HIFftf2HghLl1rXgyfYBCiIJ8JfEOIV05zFNBRxuWhuaAgj+O0NXYTIuOsuTrCLF1PzP3cu8LrWwKWXAidO8NjnY+LcsmVcSSxZUrfvcJwiZh9BiAdCJQ65XCwqZoq3/e1v9b+Gw8HnbN8OLF+emOGHsSYnB3jtNQp0u+Bv08Z6bAS/Yf16oLKSFVUTKPRThL8gxAPBcfxeL2vJT57M8sLnzwNr1oSuQwNQ6NfUsEBbdTVr8se58IlLTFvIjRuBzp15rmdPNrG/6abAe81v4fMBgwYBv/41I7EKChLiuxfhLwjxQKjEoaIiaqGmfo/d3myvyz9oEPDWW4xW2buX4YcffJBw0SdxgekKVlQEHD3KJLn9+7kSuPNO+gIMdof7oUPAokXse5wgPQBE+AtCvODxUGs02uOMGdRCX3mFcehG2LjdwO9+ZxUj27EDeO89Cv6SEqBPn8RpwRiP5OSwfeOTT/J7nTKF+RIrVwKnTwc2yGndmvtDh/j7zJnD32zUqPj/7iPJBIvFJhm+Qsphz+C1Nw/PzdUBzcjT0vT3zcazsrS+9lo2LS8pqft6JhtViBx7HSXzm5jfwOm0Mn/N72G2ggKtp061frsYffeIMMM35kI+3CbCX0gpQhULy8zU+sYbWcIhPV3rNm0oYBwObhkZgfeLoI8OhYUUle3aWYK+detAwa8Uf5P09Jj/JpEK/2Yx+yil7lBK7VZK7VVKzQpx/V6l1DGl1Pu1WwQVlQQhhbC3HjSlga+6CqioYGnmX/4SyMsDXniB4qamhs3JjWNRErmig9dLG35+PjOsDadPW4+NL8Y0ifF4gPvvp79g376WH3OENFn4K6WcAP4DwHAA1wH4mVLquhC3LtdaD6jd/tDU9xWEpMJ00gIoMEaMoFDx+YDbbgP+679oc66s5GRQWChO3Whj4vVnz2YNoIKC0PfZHb8nTjDSavHiQN9AHNIcoxsCYK/W+jOtdRWAUgB3NsPrCkJqkpfH6J5du5hwtGYNBYxpMv7WWyxKJk7d6GIvAb1iBZO4gnsAh2L7du4zM/lbxinNIfw7A/jSdnyw9lwwdymlPlRKva6UurIZ3lcQkgd7kld5OfCDH/CxESQGt9t6nMw9d+MBsxoz+0OHOAHYf4P6+PnP+dvEaZev5hD+obJOgitOrQXQVWvdD8BGAC+FfCGlpiqlKpRSFceOHWuGoQlCgvCnP1nhgfv2Af/4R+j7Bg8O1PbF1t9yHDtGwe/zAZdfXv+9StH08+KLgY3f42giaA7hfxCAXZPvAuCw/Qat9Qmtteky/RyAwaFeSGv9rNY6W2ud3aFDh2YYmiAkAMXFFOrnzrEU8/btoQu3OZ3AO+8AEyaIth8LLruMv8uwYcA//2mdz8wMvK91a8sPsGsXk+88Hgr+0aM50ccBzSH8ywH0UEp1U0qlAcgDsMZ+g1LqCtvhGAC7muF9BSE5MPVkpk9naYZgU4+hpobCo6xMtP1YcPPN/I02brQyrK+/Hjh7NvA+eyQQALz5JieM0aN57913t8x4G6DJwl9r7QPwbwDeBIX6Cq31TqXUfKXUmNrbHlBK7VRKfQDgAQD3NvV9BSFp8HiA8ePZECRc7Z727alNrl9Pc4LQ8phJetIkhn3m5rLxi3ECX3FF4P0uFyN+qquBTZuAM2es/gFxgNLhGkLEmOzsbF1RURHrYQhC9Cgupn0/L49lBEy55rQ0tg4EAuv5OJ1Ap07AF1/EZrypTnExBfqCBQz7XLiQuRj79wM9erDMhv33UspqAG+OR47kCiKKKzel1HatdXZD90k9f0GIFfv2sQ58aSmQnm6dN4IfoCBxu61evS+FjJUQWgJTedUk4118MfDQQzQFvfCC9Ts5nVanNbvg1xpYtw648kpOJDE23cV3FoIgJBOhavafP09zQKjoNmMCqq6micHt5kQhxAZ7FjZA5+/06cAf/sBS2lOmAFlZvBZsvrMf/+EPVvRPDBHhLwgtRXDN/l69KECMdmgIFhwZGewRu349s3wlqSs22LOwzXHXrqz++dVXwPPPs/+Cw1G3v3JNDX/D9HSuGOIgWkuEvyC0FME1+xcsYBhgMEZwaM068kboA5LUFW/MnEkH7qpVnKSXLOFKLVQi2P79nOxPn6bvIMbx/iL8BaEl8XiA/v1Zs3/4cBZuC0fPnlbxNiP0JakrPvF4gAceoIZvCFXbx+9nb+A5czgBAHQc33ADMG1ai04I4vAVhJbE66UQz8yk/d4uLOy0bg3s2WMJCI8nIVoDpixeLzt5mUgt46Dv0gU4eNC6z+VimKjLBTz2GBvwLF7MVcMnnwCrV7fYkEXzF4SWwlSJzMuj5g+EF/5VVXQmzp0rNv54x+sFxo2jme6vfw2s/nnoUOC9Ph8nBGP+WbyYJqK0NAr+FpzgRfgLQkthokXy8mjv79OH50Mldg0cyISi+fPFxh/vlJcza9cI7wMHWK4DqOv4dTislYC5Vl3N5u8tvLIT4S8I0SBUWOe+fTT1eDysEf/++8All1hCoFs3TgRuN6/Nnk0NUWz88c3MmXT0GuE9bBjw7rt171Mq9ErP5WLE0LRp0R1nECL8BSEaBId1er0U/MuX08G3YAGFxNdf87rLxWiQYcOoCfbvL4I/EfF6uVpr1YqmHJfNrRqqmkLXrvydq6uZ8NeCJj4R/oIQDYLDOidOpFlg1Sp24Ro4kAXCBg2is8/noyDYuJE24/HjRfAnIsYEtG4d8Mgj/F1d9cTVHDjAfVYWV4EtaOIT4S8I0cCE7Jmwzosvphln1iygd29gwwYK/rNnGf7nclEQTJpEW38cZIAKjcCYgACr92+o8tx2evbk38FvftOiE76EegpCNMjJAcaOpWDPzAQ+/5xhfQ4H7b5durB0s8NB84DDAdxyC3vFzp5txfQLiYeJ6ho/Hvjww4bv//RTrvZmzLBCgVtgEhDNXxCihVIU/j/8IbU/I/gBRnwYB6DDQTPBxo0U/IWFovknMiaqq1cvYNs2rvQaav346qv0Bdm7fkUZEf6C0NwUFwP33ANcey0rOG7YQHt+TU1gWKfWrABpEri8XjqCi4okvDORMTWAfD6gpISrvr59w/dqAPi3UFjISQNokUxfMfsIQnNQXEyNzePh/uuvgS+/5DWHg5E8QGDEh9tN4b9xI53CixcHVo0UEhtjujl1ykrqM41fTDG/rl3p6/n2W+YGLFzITm3r1kV9eKL5C0JzYA/t9HhY3dEQKrZ7zBiG91VW8npREe2+IviTC68X+P3vqQC43YzsqqnhJOB0WtE+AH1A69bxb8GsBKO4AhDhLwjNQXBo58svswZ/KDIyaPNPS6NZyOEAhg6l5i+lHJIHezmPjRvZy9fpZFln0+wFCCwA53YzDHjhQvb8jaL9X4S/IDQXHg+196IiRnps3Rp43elkPLfPxwzQW24Bzp1jhM/GjdbkIRNAcmAcvyb71+NhrscPf8jrNTXAZZdZK8NLLuFqcNo0dgibP5/no6T9i/AXhOZi2jTg6aep8S9dyg5dDgc309rv3DkK/x49ODnMnm2Zeuylm4XEJ7j5i6GighnATidw9CjPOZ30EynFaq6dOgG7d7Ng3L59URmeCH9BaA68Xqu/7ubNNOeYzE6nk9pb587U8nr0YMu/tWsZ3WPX9KVef/Jir/55772BHdyMkmBMQYcO0XSoNc1GUUCEvyA0B+XlwBNP0J5bUwP8859WHP+TT/Keo0eZ8XnypBUZJJp+6mBKP8ybR8E+ciQ7uTkcNPcEBwZUVvLeKAUBSKinIDQHRlvfvRt44QXW4wco7E3DjpISK4tz4kQrrFMifFID8zdSXMxVn4noGTWK5R2Ccbv59xQlRPMXhEgIVaLZhOLZr/XqZXVxAljJc/FiK30fEI0/1bH7AsJN/rm5XBEsXx61AAAR/oIQCaFKNJtUfHNt4UIWbjOY5bzTyYQve9SG2PYFgH9Hb78deC4jA/jHP2hG1JoKRBQQ4S8IkRCqRLMx25SX03Y7a5blxLPX8fH7GeMt9XqEYEpLGQGWns5cj8xMKgu9ezP8d/VqoHv3qLy12PwFIVLscfyFhTwuLga2bAHeeotC3lTstDftdjrH98UfAAAgAElEQVT5Ty0IocjIoH1/zhwejxrFTm7r10fVJySavyBEitdL+31hoZWNu28f91pbhdvsgh9gJu/cuWLjF+rSvTtLOqxaZZkV3W6WB4lyIIBo/oIQCaEidCZOZJJWVRVt+0oFFm5Tilq/w8GoDdPkQxAMdr+PafxTWGhl90axvr9o/oIQCSZV32hj5eUU/Bs3Ah078lyoHq133MEJIIpRG0ISYIR8ZiYLwXm9gUEFUUA0f0FoDKZTV6dOTOgKhdacLGbMoGNPunMJoTBCfvVqHo8bxwSwtDSagyTJSxBiiAnn/Jd/Ye31iy9mBuYnn4R/jjH3zJghQl8IT/Cq8oEHaP656aao/t2I2UcQIsGEeu7cSYG/bVtoM4+dIUOiFqYnJBH2pC97UMEHH0TVVCjCXxAixZhwDPZM3lDs2CGx/ULk2IMK5s+PeolvEf6CEIy9XIN57PWyZPPixeGbtBhMx6aqqqhlZwpJSLD5J8plQJrF5q+UugPA7wE4AfxBa/1k0PV0AC8DGAzgBIC7tdYHmuO9BaHZMfb9FSv4eNw41uB3Olllce7c+p9fXc1+rP36idlHiJxQ4ZxRTPJqsvBXSjkB/AeAXAAHAZQrpdZorT+23fYrACe11tcopfIAPAXg7qa+tyBEBXsph/79KfjPn+f5uXPZpCUUSrGGf3U1+7E+8YQ4eoW4pTnMPkMA7NVaf6a1rgJQCuDOoHvuBFDb6QKvAxiqlFLN8N6CEB1MKYdNmyj8PR5gwwZq/wAFfTBduvD6DTewD6tk9ApxTHMI/84AvrQdH6w9F/IerbUPwDcALm2G9w6kvrK7gnAhTJsGLFrEevznz1Pwt2rFME+TyesKWjh//TXrsWzbBrzzjlTtFOKa5hD+oTT44Bi4SO6BUmqqUqpCKVVx7NixCx9JfWV3BSFSvF7g+ecp9AcMYNN1gNUXAQr+du24IgCAvn25P3++5ccqCI2kOYT/QQBX2o67ADgc7h6llAtAWwBfB7+Q1vpZrXW21jq7Q4cOFz6S+sruCkKkPP00MGIEhfmsWUy2CebkSe5zc4GPPuIKIStLonuEhKE5hH85gB5KqW5KqTQAeQDWBN2zBsA9tY9/CqBM64YyZBqBMe+YsrsFBYHnBSESHn4Y2LoVmD6dztsNG3jeFGmzs3Ur8MwzwBtvUOFYuVJq+AgJQZOFf60N/98AvAlgF4AVWuudSqn5Sqkxtbf9EcClSqm9AGYAmBX61ZqIqbeyaBEz5BYt4rGYfYQLwawgX3018PzIkVaDFlOxs6qKzt0VK2gGkvaMQoLQLHH+Wuv1ANYHnZtre1wJYEJzvFeD2Mvqah06KkMQwlFcbCkLpqn2RRcB330HrKld0BYUsHHL8uVcGZSWslyzPTlHEOKc5CrsVl7OKnheb2C3JammKESKCRpQioK9Z09gzx7g0kuBEydYcrdrV0by5OVR8Esil5CAJFd5BxNaZ++2ZD8vCA3h8bBO/7FjtO/v2QOMHs0wToeDqwET4unxUOOXvy8hAUku4e/18h919uzAwkgLF4rTV2gYkyfi8zF6x7RlXLOG9v20NJp85s4Vp66Q8CSX8C8vB37yE+Cxx/jPabS43/6WvVYFIRzFxdToR48GDhxg9E7nzpaD1+cDJkwAli3j31hpqSgUQkKTXMJ/5kzgwQeprY0dSw3tscdYYTEvL9ajE+KZffu4Whw6lObCSy4BDh0KvGfpUjp6b72VIZ0SRSYkMCoa4fbNQXZ2tq6oqGjck71eYNQo2mczM4F168ThK9SP10uFoaqK2btffWVdczop9AGWa27Vii335G9KiEOUUtu11tkN3Zdcmr+htNRKtTeTm6nHLkt1IRQeDwV6TU2g4O/Y0RL8DgcjgHJyRPALCU/yCX+vl3ZZv58OOoeDdtzRozkpyFJdCMd771HzN3TsCBw5Ql9Afj4VibQ0oKJCHL5CwpNcwr+4mJE9LhdQUsLl+Zkz3KqqLLv/woWs3QJI1c9UpbiYK0F7EcBZs6gsuN1AejoFv9NJZ+/KlSzj0KoVcPPNUW2vJwgtQXIJ/5wcoKyMjl6fD7jsMuta165Ar17AbbfRKTxsmFT9TGVycpihO3Ys/w4WLqRJx+kEpkxhkIDbDQwfTv+R388yDqtWUfhLGQchwUk+h68R6AMHBhbk8vut0g9pafwH/uADqfqZithLOIwbx8CAmhpq/W++SaHucgG7dzN7d+ZM/l2Vl0tClxD3pK7Dt7zcEvxmCW8cdqYOe00NOzQVFIjgT0VycqjNv/cekJ1Njd/vZ89dI/gLC2kmNMLe4xHBLyQVyVXbB+A/7oYNDPGsrg68phTrsBsH3sKFlvAXrS518HhY++nBB61SDU4n++6ePs2SDs88w/tE4xeSlOTS/L1eYMECOnsBCn/7BGBMXKb07vz5XPZL2efUwJRvMA5+t5t/C61aWavDTz8FBg3i6nHaNPEJCUlLcgn/8nIK9RkzgLvuss4Hl3U2ERzr13NCyMsT808qsG8fJ3uXC3joISoGDofVnhHg8fbtwB130CEsPiEhSUk+hy9gZfhefDFw2NZR0u3mP7xSFADV1bTtzp/fPIMW4huv1wrxray0zjscVg0fgH8bprjbyy+37BgFoYmkrsPXRPusW0dnnh1jAtKaj3Nzaff3eiXeP1kxph6AGvzjjwcKfiBQ8AMU/Lm50pJRSGqST/gb089777EUrz3WP5itW8Xun+yYyJ6FCyO73+1msEC3bsDatZLMJSQtySf8Z86khrdxIx2/paV1m24DVgjoqVNi909m7JE9vXrR1h+utWdGBvDkk8CkSdT6AUnmEpKW5BP+hvXr6fgFGM0RjMPBRK+iIuDXv2ZHJiF5sJt7fD62Y/z0U070Tmfo51RWsvdDXp4l9CW+X0hSklf4A5b9/6qrAs+3b8+qnxs2MKxv4ULghhtiM0YhOphevF4vHbh79ljXfL6693fpwn1GBgW+CH0hyUlu4V9ezu5Lu3YFanvHj1uPu3Rh4bcPP2Rct5AceDzA+PGs5vq737FQGxDaBAgABw+yc9dNN7XcGAUhhiS38J85kzHbmZnAr35FH4ARAgC1vDVrWObB4QA2b5aa/8lCcTFt/FVVjON3OPh3YI/sUYq+H8ORI8DDD7f8WAUhBiS38AeY7LVuHW36AwcGrgAqKykAXnqJ+y+/lJr/yUJODiO53G4K/rNnudmdvSbkNyODW6dOjAyS6B4hBUh+4W+if+wJPj16WNe1plCoquJkMG8ezUX2Ou/BZQGE+Ke0lILd7Qauvto6P2JEXdPPww8z/v/YMfbnlegeIQVIvsJu4TAhnzU1dP6ZbF+DSfp67DHguus4CQwbBmzbBsyeTefhihWxG79w4bjdzPPYs8fqyvX++4GmH7ebRdwcDpb6njFDQn6FlCD5NX9D9+40//TrxyW+KeRlp6yMq4Bt27gS+Mtf2MxjwQKp8ZIomFXakiVA794U/EpR8PfsCRw6xPvS0/l3UF1Nn8CZM5zw5TcWUoTUEf7G/PPOO9Tsg1P6AU4IJgywpgb40Y+ApUul7n8iYYq3jRpFZ7/TSdOeUozzB6jlP/EETT2G9HQ2bxGEFCF1hL/B6+U/ubH7u1x1Mz6VAgYPBrZsYR7AokXMBTA2f7H/xx9G48/Lo+/mzTcp9O2NfACGcz79NDBnDje3Gxg6lMJ/+XJx9gopQ2oJf5P0NX8+m7qUlFiaoR1T1nfMGE4U333HsgAul/T9jVdMUhdArd7nC53MdeIE/TrdunGSePJJlgJZvZp/B6WlLTtuQYgRqSX8TdE308wFYKZvsObv91Mj3LSJ9zqdLBGxfr3l+BUzUHxhkrqGDaPwD4XDQYF//jywfz8nf1MCxOPhBNC9e8uNWRBiSHLW848EU/O/Xz9GgNx3H/Dii1Zjj2uuAQ4coPAvLOS5oiKp/x/PeL0U/saf064dV3h2TK3+oUOp8QtCkhFpPf/UCfUMpryc0T/l5Zam+OqrTPT54gtg717agWfPpr3fNPcoKbFqvwixobiYZh7zGxQXU6jv3s19VRXP2wW/w8EVXHU1/TgffMDJQn5HIUVJLbOPHRP9M3Mm7bwjRgBz5wLXXmvF/58/z5DBM2doClq5kklA48ZJHaBYYi/aBlhtGZctAwYMYEinHePXqa5mkb8vvrByN8TBK6Qoqav5B+NwUPhrbZkGAODoUe6VokZ5/DhzAd5/P3ZjTXXKyym8R4+mnf+NN4AhQ6jNHzpkxfIb/H6rjs+LL1qvYS/bLAgpRura/IMx5R+0ppAP9b0oxfNZWezyJEIjNpiIq3btmMSVm8vObVdfzSgtw+DBgcd2B68gJCkt0sNXKXWJUmqDUmpP7b5dmPv8Sqn3a7c1TXnPqOHxsMbL+fOhBT9gNQKZP1/qv8QCE8vv8VDz37OHK7YNGwIFv1L8neyCH2AcvyAIAJpu858FYJPWugeATbXHoTintR5Qu41p4ntGB68X+P3vafKpj5oaCv+cHEn2amlMP94f/IAdtwYPtiJ7tm/nRDB1KjB9emD5jtxc7nfuFBu/INTSVOF/J4CXah+/BGBsE18vNni9bOBeVQVkZ7PAVzi0Zqx4SYkke7U0ph/vtm103m7fbnXgAjgRHDkCPP+8lbvhdjOks6CAqwFJ4hIEAE0X/pdrrb8CgNr9ZWHuy1BKVSiltimlwk4QSqmptfdVHDt2rIlDuwDKy1kW4PHHKVB+9avwfV4Bmob++lc6GwHR/luSjRsZzWM0/oMHrWsuF5vzVFfzcY8efJyWxpWAJHEJwvc0GO2jlNoIoGOIS3Mu4H2u0lofVkr9C4AypdQOrfW+4Ju01s8CeBagw/cCXr9pBPdqffDBhp+jNbXM0aPp/AW4gigvl96v0eTrr1mgLVQCl4nQ0poa/9GjTMpbuJDRWUuWiJNeEGppUPhrrYeFu6aU+qdS6gqt9VdKqSsAHA3zGodr958ppd4GMBBAHeGfcJgWkKZZjNT8jw7FxazWeegQ8O67tO0HC347WnNlcPfd/G0WLQI++USSugTBRlPNPmsA3FP7+B4Afw6+QSnVTimVXvu4PYAfAvi4ie8bPZYvZyjn5ZeHvycjg8LF5QJefpmOx9GjpeZPtMjJ4e/y1lu05Ycqx21HKfpl9u+ng3jVKpp8JEJLEL6nqcL/SQC5Sqk9AHJrj6GUylZK/aH2nt4AKpRSHwDwAnhSax2fwt/rBT77jNE8p09TqIeispKdodLTWQvo3Xdp/xfB33RMOGcwd9/NBuz27mvhMCG5Gzawh7MpxyHmOEH4niZl+GqtTwAYGuJ8BYAptY+3AujblPdpMUzm6IIFjCqZOzf8vUeOWG0hO3cGXnuNlT+XLGm58SYjpnSDyb51uaxOapdfzt8lHOb3aNWKk3KPHsz+FXOPINQhdWv7hGLmTKvc8+7dfFxQQA2/c2frPtMAvKaGj48e5V6agTQdj4ff/8SJ/D4feogTMkDHbX3U1ACtW1PwDx4MTJlivZb8LoIQgAj/YEzBt+7dWR542TJqjw88YMWK223O5vG6dWz7eM89wP33s1QEQKEzbZqEg14IHg+/63ffZZhmYSF7KZ85w+sOR90eDIazZ5nU9e67XDWYyUTs/YIQiNY6LrfBgwfrmDN1qtZt22pdVsbjUaO0BrR2OLi3bzfdpHWbNlqnpfG4d2/r+W3aWK8hNExZmdbt22tdWMjvz+nkd+p08jcoKbG+Z7OZ38Tl4vNLSrTOypLvXUg5AFToCGSsaP71sWQJI0UmTqT9f+NGav5paTQF2dm6lddMLfldu4CXXqKDMi9PbM6RYkJmjQN97tzAUg3Hj9MHcO+9wMiRVjLegAGBWbwzZjD/QjR+QQiJCP+GMCaIoiJgwgSGeVZWWtU93W7rXhN7buoDnT/PfV5ey4450bBH+JhSy716WT0WsrLYgMXvZ2mHCRM4MZuwz9xc+mgmTKCJzmTxSoSPIIRFhH9DeL3A4sW0O69cSYEycCA1/PHjqWm2aRP4HHvjcLvWal5P7P+BmIJto0fzcWkpm6x37Uo7f8+eLMp2442cCJ5/Hpg8mX6WESMY/792rdXAXQS+IDRMJLahWGxxYfM3tmdjNy4rs2z4gwbRxnzxxXXt/2Zr3577zEzLDm0eC4GUlGitlNbp6fyOlKId3+Wy7P1Op9YdOmg9dCjPud30qxjKyrR+6qnYfQZBiAMgNv9mwJgg7PZ6rWnGyc6mFnrqVPjnHz8O3HQTTRMPPsiwxaIisf+HYsYM2u3Pn+eqyph0/H7LtOb307T2t7/xnM9H8xAgdZUE4QIR4V8fJuzTUF7OMgFLlnBbu5b2fbvd307btnQEX3EFO00NG0ancXDMuZiCGA770UcU+j6fFUJr6vSYVow+H7fqapqJFixgaO3o0fwtUv17FIQIEeF/IQRPBh4P8LOfhS858M03rDe/fz9w0UUsN9C1a2DSkYluSYW+AKFKN0ybRnt/aanVaD0Y4ze54QYrwc7p5OR79dX0yUyezIkgFb5HQWgOIrENxWKLC5t/Q0ydylhypULb/DMzuTfXx4yhH6CkxIpjt/sUkp1gH4r5/jIytC4ooA0/nP/EfI9t22qdn2/5BACt+/ZNre9REOoBYvOPMl4vyw+YsM9QnD3LvdYsN7B1K0sVmLIRRUVWSehUwF66Ye5cVkStqgLuu485EZeF6wVUi9bALbfwecOG0RyUlQXs2MEM4FT5HgWhGRDh31jKy5kANnhw+FIDhksuAT7/3BL8OTlW+OjixalVdyY4byItjd9B9+6s118f119PZ+/o0fSdDB7MUNDcXOCVVxqu/SMIgkUky4NYbAlh9tGapgtTfiBc6QeAZo2sLK2vvbZu+GgqmCyeeoqf0V66ISND6xtuaNjcA7CcQ1YWTWnGTGQ3oRUUSBitIGgx+7Qchw/TIdmpE4+DG404ncwKfu45aqknTnAFYEwUHg+Pn366Zcfd0uTkAGPHAuPGMTnu4ov5vb3zTmQ1+quruWqaNAlYv56O8xUrGCK6YgWP162Tcg6CECmRzBCx2BJC8zdarNE+u3ULrbWa1cCNN2o9cmRgwbFU0fy15irJJMgpRQd4Qxq/fcvPj/UnEIS4B6L5twAmCWzGDJZ82L+f55Witm+oqWHnr127gC1b2CnMOD1N45JkcFaGCuW05zB07w5ccw3LLbvdwF//Wr+/JPhaaWlq+UcEIYqI8G8KJu5/4ULG8Buh/8wzdSOAjhxha8jrrguM9hk+PHlMFaYLV7gchhdesGr0A4z0Cf6e7Jhrbjdw1VU0oY0dKxOAIDQDIvybitdLDX7UKGDTJtqjFywI3QC+poZVKQ8cYIRLfj6jVFxN6qYZPwSHctrbMY4eDezdy/scjrp2/iuuCDxWikK/pAR4803g//wffrd5eckzWQpCDFG6Ps0rhmRnZ+uKiopYD6Nhioup2drNNl4vHZvffVe3qqfpBDZpEvDnP9PxefAgQxeTwfQDUPAXFTGUtaKCztgXXqDADyX4DUoFrgTy8xnTLwhCxCiltmutsxu8T4R/lFi4kMXcQtGlCwW+EXa9e7M5SaIXJSsuthqu9+9Pwd+pE30dY8aw9HJlZf2vYb4T04y9pIQ+FUEQIiJS4S9mn2jx7LO0U4fi4EHuzcQ7ZQpNQR06cNVgd5LaHzfkUG1pgsfjcnHCu+YaOncrKyn4AWDNmoYFP8DvZMwYmnwyMoA5c8TGLwhRQIR/tLj1VuCLL6xjR5iv2u1mqefFiyn8TSy8y8WiZ3aHaUMO1WgTLOxzcjjWadN4vHw5P8877zDyqaoq/Oe2k5VlRfY4ncBvfsOOXJMncxMbvyA0O0niaYxDTAlirWnXD1f3327/PnqUAtPpBH73O0bF/PznTADzeAIdqgUFnDBaIkzU+DXM5LNiBc+XlvLzLV9OB/fu3UCrVqy+uWEDnbhffdXw61dW8rOa76y0lCWzk8UHIghxiGj+0aR1a6BbNwr+4IbvweTmMvu3spLbuXPAkCEU8BUVltZtr43TlDDRCzEhGaEPUPCPHWuVYV69GnjgAY4nOxv4xS8o+C++uGHB36oV934/m7g89RQje1auFFOPIEQZEf7Ront3CsIDB1it0jRzBzghBLNpEycLgMLw4ospRAGgRw/gT3+iAF64sHnCRC/EhGRfcXi9zFM4exb49a953RSp27yZj3v2rL/DmaGy0mqE07MnHbvmvcTUIwjRJZI04FhsCVHeoT5MT9oxY7g3tf27dg0sWWB61Ibb7KUhRo3i40GDAstKNLY0hL3IWiSvY3rnulwsteB0Wn10p07V+qqrrHFfdFHdz5KRwcJ25jO1asV9x47cl5Q07nMIgvA9iLC8g4R6RosRIxjf/vLLLOfg8zGuf+tWOjjPnbOKwLVty65f9eF2s0TEkSP0E+Tn02xiCsutX8/7jH2+vNzKPzD9bc15e0ipPSZ//nzrfHD+wrRpVqy+KVaXlUU/xZVX0sTj94fO2HW5+Fmffpp+gfffBz75BPj2W6BvXz53wgTW9F+3Tmz9gtAEIg31jLmGH25LeM1fa6uMsf24pETr4cN5vqCgfq0/1AoA0HrwYO7dbkvzNgQXmyspYSE5UwLZXlBu6lSeGzqUWnlJiTXmkhKt09N5z6hR1grlppsCSzC3aRM4zuCuZubYlFw25ZgBrXNz6471qada9jcShCQDEWr+MRfy4bakEP4NUVYWvv5/pJNBWprWQ4YETjJlZWx32KkThW9uLvcjR1LAmuqabdtax6YdZUEBz2dlWfXz7YIf0PqSSy5srBkZ1oRx4418XWPisU9WIvgFoclEKvzF7BMrwpWACC5xUB+m4fmIEawZNH48a98AwMiRNC2Z18vPZxTNmTM02fTrB9x9N7NxTejm6NF0TDudDDmdNImO5rNn2TVr+3Y2ov/uuwv7rO3aASdP0nE9ZUrochjB5ihBEBqFlHeId6ZNY7ROTU1kma/hSEtjaWQAuP12+hYcDvoIrr+eAhuwJgrjZxg4kNm3AwYA7dsDN98MfPQRsHQpr3ftykilVq2An/6UY730UuD48QuboAzGtm8mGhH2ghAVIhX+kuQVK7p3p/a9bRuPc3Ot0E7Acu42RFUV8N57dKQah6tZSWzfbtXICS4w9957dMSa93c4WILBcOCAJeTvuw/Yt4/OaqBhwa8UJ43KSmuy+ewzOpTHjePzV69u+LMJghA1RPOPJSNGsAyCxwO89hqTtpYuZRTNmTOW4G4Ip5MC1+cDOneuvxF6ZibNOHbs7+Nw8LX8fr6u329dd7vrb7loSjSkpwO33cbJpEcP4MsvOUmZBjeTJjGDVxCEZkcKuyUC69cD//mfFPwrVjAsdMwYCn5T+rk+lKJg9vsp+Nu0qT+rtkMHCv42baxzwROM1hTybjcfK2Vdr67m5BEOrYEf/IDjWbMGuPFGCvm0NL7e2bO8LoJfEGJOk4S/UmqCUmqnUqpGKRV2plFK3aGU2q2U2quUmtWU90w6TCtI4wCtrqb27vcDgwZxFRAOEz+vFLdvv7UEdaj2iF9/TcH/7bfWueAJRmtOJH4/r9lXhkaAX3993XGY6z4fncMAtf2JE4F587gaGDoU+OADKd0gCHFAUzX/jwCMB7A53A1KKSeA/wAwHMB1AH6mlLquie+bPJhWkIaHH2bETWEhq4Jee23o55mVgcNB4ZuZGSjwQ5nz/H5L8LdrF35MPl/oVUd1NZ3DO3fWvb9nT47B52NVz9xctmwcPpwRRatXs2GNvUyEIAgxo0nCX2u9S2u9u4HbhgDYq7X+TGtdBaAUwJ1Ned+kxdTXWbGCztEJE+i0dToD7zOOWKOdp6Xx/lB1fowNP5iTJ+uWW66vmbrh+HHrsdvNlYnTCXz6KXDJJRT4w4bRoVxYyFDR2bOtCU5q9whCXNASNv/OAL60HR+sPScEYzcBeb30AfTubYVuApbgt/e8raqi7yCUtl+f3yCUySccStF5azDtGOfPZymIzEw6r7t1o4Y/ezavrVtHzd+u6Xs8EuYpCDGmwVBPpdRGAB1DXJqjtf5zBO8RSp0MKWWUUlMBTAWAq8J1wUpm7AKxvBxYu5Zlkz/7jAK+oIDa/gsvMKLHTAjV1QzFtHP99ZZ5xgh1E0UUKa1aMVHM5aI5Z88e61pNDZ3ThYU0U/n9tPV/+CEwfToF/sCBgZq+1OwRhLihQc1faz1Ma90nxBaJ4Aeo6V9pO+4C4HCY93pWa52ttc7u0KFDhC+fpBhfQPfu7If7zDPU7vPyKPSdTgrXJ5+s+9yePSn43W5G+Bjsgj+SDlvnznHv8wWeN2aodevoAPb7OTFVVLAe/2uvUfM3ph3R9AUh7miJJK9yAD2UUt0AHAKQB+DnLfC+ycHMmXVXBJMmWWUcRo/m3l524dNPrVWB281IG3s/AaCuySc4CshgHMsm7FNrvt7ll9PMA7B0xH/+Jx/bNX0R+IIQtzQpyUspNQ7A/wPQAcApAO9rrW9XSnUC8Aet9Yja+0YA+L8AnACe11o/3tBrp0SSV1MZPRr4y1+4Khg4kEljplREWhoFdXU1hbbbTQ3d77fMOAYT629CR83EYL/PJI/l59MUVV3N5zmdNA+tXi1mHUGIA1okyUtrvUpr3UVrna61vlxrfXvt+cNG8Ncer9da99Rad49E8AsR4vdT8M+YwWNjymnXjj4CkzNgYvcdDtrp7YLfnsSVnW21VjQx+4ZTp4CSEmr1Jsu3VSuanZRia0cJ3xSEhEEyfBOZ9estwV9aSoGdn88wzrQ0Ong7d2bMvRHwb7xhPb9vX8sZ3LkzbfZOJ+33BpeL25kzLCB3/jybt5SUsE3lggVsCJOXJ+GbgpBAiPBPFrp3pxB+4w1m0rZqxVDLAQMYc5+fz/uM1u5wADt2WM7bQ4eo+a9ezRVFZiYF/C9/yS09nYXn8vOZfDZjBss0rFjBFcKSJV8NzpQAAAU7SURBVGLjF4QEQqp6JgumIbs9TyC4guaaNVZ0jlkJ+P0suDZsmFXhs3t31vofOJBC3usFli9n+OjKlazyaU/aElu/ICQcovknC8E1gjweCnATFTRxIrBqFZupmHpABqUo5O1ROnl5fM7ChdzPnUuNf/58Kc8gCEmAlHROBUwzdoANX4YPBzZtYmSQ309fwcCBrMljx+sFRo0C7rqL5iT7qkJCOQUhLpGSzoKFSRgrL6fmv3YtzUEbNtC5W13NLl7B2rzHAzz4IHsMFBQEripE8AtCQiPCP5WYOZPtGUeOpLbv9TIbt6SEiWPB0TpeL7B4MUs4LF4sph5BSCLE7JOqzJ0LFBVRsM+fX/e6vcKoMfXYjwVBiEvE7COEJxKNPpQDWUoxC0LSIJp/qiEavSAkNaL5C6ERjV4QBIjmLwiCkFSI5i8IgiCERYS/IAhCCiLCXxAEIQUR4S8IgpCCiPAXBEFIQeI22kcpdQzA5014ifYAjjfTcGJJsnwOQD5LvJIsnyVZPgfQtM9ytda6Q0M3xa3wbypKqYpIwp3inWT5HIB8lnglWT5LsnwOoGU+i5h9BEEQUhAR/oIgCClIMgv/Z2M9gGYiWT4HIJ8lXkmWz5IsnwNogc+StDZ/QRAEITzJrPkLgiAIYUha4a+UKlJKfaiUel8p9ZZSqlOsx9RYlFJPK6U+qf08q5RSF8d6TI1FKTVBKbVTKVWjlEq4yAyl1B1Kqd1Kqb1KqVmxHk9jUUo9r5Q6qpT6KNZjaSpKqSuVUl6l1K7av61fx3pMjUUplaGU+odS6oPaz/JY1N4rWc0+Sqk2Wutvax8/AOA6rfX0GA+rUSilbgNQprX2KaWeAgCt9SMxHlajUEr1BlADYAmAh7TWCVO6VSnlBPApgFwABwGUA/iZ1vrjmA6sESilfgzgOwAva637xHo8TUEpdQWAK7TW7yqlWgPYDmBsgv4uCkCW1vo7pZQbwP8A+LXWeltzv1fSav5G8NeSBSBhZzmt9Vtaa1/t4TYAXWI5nqagtd6ltd4d63E0kiEA9mqtP9NaVwEoBXBnjMfUKLTWmwF8HetxNAda66+01u/WPj4NYBeAzrEdVePQ5LvaQ3ftFhXZlbTCHwCUUo8rpb4E8AsAc2M9nmbilwDeiPUgUpTOAL60HR9EggqZZEUp1RXAQADvxHYkjUcp5VRKvQ/gKIANWuuofJaEFv5KqY1KqY9CbHcCgNZ6jtb6SgDLAPxbbEdbPw19ltp75gDwgZ8nbonksyQoKsS5hF1RJhtKqYsA/AnAb4JW/gmF1tqvtR4ArvCHKKWiYpZzReNFWwqt9bAIb30VwF8AzIvicJpEQ59FKXUPgFEAhuo4d9RcwO+SaBwEcKXtuAuAwzEai2Cj1j7+JwDLtNYrYz2e5kBrfUop9TaAOwA0u2M+oTX/+lBK9bAdjgHwSazG0lSUUncAeATAGK312ViPJ4UpB9BDKdVNKZUGIA/AmhiPKeWpdZL+EcAurfXCWI+nKSilOphoPqVUKwDDECXZlczRPn8C0AuMLPkcwHSt9aHYjqpxKKX2AkgHcKL21LYEjlwaB+D/AegA4BSA97XWt8d2VJGjlBoB4P8CcAJ4Xmv9eIyH1CiUUv8N4Cdg9ch/Apintf5jTAfVSJRSPwKwBcAO8P8dAH6rtV4fu1E1DqVUPwAvgX9fDgArtNbzo/JeySr8BUEQhPAkrdlHEARBCI8If0EQhBREhL8gCEIKIsJfEAQhBRHhLwiCkIKI8BcEQUhBRPgLgiCkICL8BUEQUpD/D/cklHrjXSULAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "plot(X, Y, 'rx', label='data points')\n", + "_=legend()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "from mxfusion.common import config\n", + "config.DEFAULT_DTYPE = 'float64'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The SVGP regression model is created as follow. Two SVGP specific parameters are ```num_inducing``` which specifies the number of inducing points used in the variational sparse GP approximation and ```svgp_log_pdf.jitter``` which the jitter term in the log pdf calculation for numerical robustness. " + ] + }, + { + "cell_type": "code", + "execution_count": 33, + "metadata": {}, + "outputs": [], + "source": [ + "from mxfusion import Model, Variable\n", + "from mxfusion.components.variables import PositiveTransformation\n", + "from mxfusion.components.distributions.gp.kernels import RBF\n", + "from mxfusion.modules.gp_modules import SVGPRegression\n", + "\n", + "m = Model()\n", + "m.N = Variable()\n", + "m.X = Variable(shape=(m.N, 1))\n", + "m.noise_var = Variable(shape=(1,), transformation=PositiveTransformation(), initial_value=0.01)\n", + "m.kernel = RBF(input_dim=1, variance=1, lengthscale=1)\n", + "m.Y = SVGPRegression.define_variable(X=m.X, kernel=m.kernel, noise_var=m.noise_var, shape=(m.N, 1), num_inducing=20)\n", + "m.Y.factor.svgp_log_pdf.jitter = 1e-6" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Inference is done by creating the inference instance from the ```GradBasedInference``` class, in which we use a ```MAP``` inference algorithm as there are no latent variables outside the SVGPRegression module. Additional, we specify ```grad_loop``` to be ```MiniBatchInferenceLoop``` in which we set the size of mini-batch and the scaling factor for minibatch training.\n", + "\n", + "Then, training is triggered by calling the ```run``` method." + ] + }, + { + "cell_type": "code", + "execution_count": 38, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "epoch 1 Iteration 100 loss: 933115.0603707978\t\t\tepoch-loss: 10413624.614005275 \n", + "epoch 2 Iteration 100 loss: 524948.7079326594\t\t\tepoch-loss: 686034.5295730559 \n", + "epoch 3 Iteration 100 loss: 345602.4022749258\t\t\tepoch-loss: 427065.8343717841 \n", + "epoch 4 Iteration 100 loss: 277011.3760208657\t\t\tepoch-loss: 297071.493696023 \n", + "epoch 5 Iteration 100 loss: 183347.13021907964\t\t\tepoch-loss: 219808.0871498559 \n", + "epoch 6 Iteration 100 loss: 143763.11007552472\t\t\tepoch-loss: 169486.20729875282 \n", + "epoch 7 Iteration 100 loss: 132031.47695326462\t\t\tepoch-loss: 134765.1471133905 \n", + "epoch 8 Iteration 100 loss: 95632.60561449913\t\t\tepoch-loss: 109798.66321648406 \n", + "epoch 9 Iteration 100 loss: 73957.6220462552\t\t\tepoch-loss: 91257.8705670977 \n", + "epoch 10 Iteration 100 loss: 64840.07207031624\t\t\tepoch-loss: 77084.06942481917 \n", + "epoch 11 Iteration 100 loss: 60780.27278575914\t\t\tepoch-loss: 65962.38163622493 \n", + "epoch 12 Iteration 100 loss: 48546.66342698521\t\t\tepoch-loss: 57037.39009905885 \n", + "epoch 13 Iteration 100 loss: 42676.907263579335\t\t\tepoch-loss: 49725.50869601666 \n", + "epoch 14 Iteration 100 loss: 43266.74759690139\t\t\tepoch-loss: 43635.70855486856 \n", + "epoch 15 Iteration 100 loss: 33139.32033870425\t\t\tepoch-loss: 38501.415430223606 \n", + "epoch 16 Iteration 100 loss: 35129.68003531527\t\t\tepoch-loss: 34139.30892930683 \n", + "epoch 17 Iteration 100 loss: 33309.08869286892\t\t\tepoch-loss: 30414.713307491817 \n", + "epoch 18 Iteration 100 loss: 31058.180286752693\t\t\tepoch-loss: 27222.957705478882 \n", + "epoch 19 Iteration 100 loss: 22781.668494776342\t\t\tepoch-loss: 24466.753696665117 \n", + "epoch 20 Iteration 100 loss: 16921.53875526696\t\t\tepoch-loss: 22063.866203795988 \n", + "epoch 21 Iteration 100 loss: 16866.27172281184\t\t\tepoch-loss: 19959.435781693166 \n", + "epoch 22 Iteration 100 loss: 18001.39866328793\t\t\tepoch-loss: 18093.70564938978 \n", + "epoch 23 Iteration 100 loss: 19268.435700542395\t\t\tepoch-loss: 16435.61461383947 \n", + "epoch 24 Iteration 100 loss: 13586.70681551015\t\t\tepoch-loss: 14947.197437326102 \n", + "epoch 25 Iteration 100 loss: 11842.634017398044\t\t\tepoch-loss: 13605.954880888436 \n", + "epoch 26 Iteration 100 loss: 12304.581180033452\t\t\tepoch-loss: 12393.880316263208 \n", + "epoch 27 Iteration 100 loss: 12712.095456995734\t\t\tepoch-loss: 11293.27810727986 \n", + "epoch 28 Iteration 100 loss: 12662.540317512301\t\t\tepoch-loss: 10292.698091923068 \n", + "epoch 29 Iteration 100 loss: 9789.253683769626\t\t\tepoch-loss: 9379.934609293405 \n", + "epoch 30 Iteration 100 loss: 10336.484081253366\t\t\tepoch-loss: 8542.778732654882 \n", + "epoch 31 Iteration 100 loss: 8427.615871046397\t\t\tepoch-loss: 7780.101399774407 \n", + "epoch 32 Iteration 100 loss: 6243.338653452632\t\t\tepoch-loss: 7083.3906599663305 \n", + "epoch 33 Iteration 100 loss: 5633.910939630758\t\t\tepoch-loss: 6442.360608787293 \n", + "epoch 34 Iteration 100 loss: 6128.494674105952\t\t\tepoch-loss: 5856.924952855579 \n", + "epoch 35 Iteration 100 loss: 5561.132651568278\t\t\tepoch-loss: 5319.662670742758 \n", + "epoch 36 Iteration 100 loss: 5007.633559342303\t\t\tepoch-loss: 4827.494923733251 \n", + "epoch 37 Iteration 100 loss: 4570.798941667555\t\t\tepoch-loss: 4375.152951451802 \n", + "epoch 38 Iteration 100 loss: 3427.8776815125993\t\t\tepoch-loss: 3958.746627662967 \n", + "epoch 39 Iteration 100 loss: 3145.271868648371\t\t\tepoch-loss: 3574.2727718396727 \n", + "epoch 40 Iteration 100 loss: 3252.388844355417\t\t\tepoch-loss: 3216.389789008766 \n", + "epoch 41 Iteration 100 loss: 2682.992323506939\t\t\tepoch-loss: 2880.8040627817663 \n", + "epoch 42 Iteration 100 loss: 2776.54316335849\t\t\tepoch-loss: 2563.2893900928902 \n", + "epoch 43 Iteration 100 loss: 2052.181117489573\t\t\tepoch-loss: 2259.124250598867 \n", + "epoch 44 Iteration 100 loss: 1789.3450917418618\t\t\tepoch-loss: 1963.4009524512699 \n", + "epoch 45 Iteration 100 loss: 1637.0460616480382\t\t\tepoch-loss: 1683.301052960261 \n", + "epoch 46 Iteration 100 loss: 1250.3190196168575\t\t\tepoch-loss: 1421.08925599032 \n", + "epoch 47 Iteration 100 loss: 1056.4280170128945\t\t\tepoch-loss: 1181.4882875552755 \n", + "epoch 48 Iteration 100 loss: 934.1323712121834\t\t\tepoch-loss: 972.8920812023131 \n", + "epoch 49 Iteration 100 loss: 743.6854774208032\t\t\tepoch-loss: 794.3919410633861 \n", + "epoch 50 Iteration 100 loss: 592.0162492873271\t\t\tepoch-loss: 643.6129305537779 \n", + "epoch 1 Iteration 100 loss: -617.7115390031664\t\t\tepoch-loss: 122.02590714978953 \n", + "epoch 2 Iteration 100 loss: -1042.9322804366407\t\t\tepoch-loss: -861.8691127743712 \n", + "epoch 3 Iteration 100 loss: -1246.1061590298375\t\t\tepoch-loss: -1142.8551043268158 \n", + "epoch 4 Iteration 100 loss: -1422.4364206976472\t\t\tepoch-loss: -1248.3343954963652 \n", + "epoch 5 Iteration 100 loss: -1364.319275718058\t\t\tepoch-loss: -1319.0632400945233 \n", + "epoch 6 Iteration 100 loss: -1138.6014678286117\t\t\tepoch-loss: -1375.485088640635 \n", + "epoch 7 Iteration 100 loss: -1468.2449906521865\t\t\tepoch-loss: -1415.3387799226973 \n", + "epoch 8 Iteration 100 loss: -1331.0742440765116\t\t\tepoch-loss: -1398.7259993571608 \n", + "epoch 9 Iteration 100 loss: -1023.1218294411456\t\t\tepoch-loss: -1406.2506096944428 \n", + "epoch 10 Iteration 100 loss: -1491.0721525479291\t\t\tepoch-loss: -1425.3786072098467 \n", + "epoch 11 Iteration 100 loss: -1487.9902441406107\t\t\tepoch-loss: -1385.4821177117121 \n", + "epoch 12 Iteration 100 loss: -963.575720938497\t\t\tepoch-loss: -1148.7904243974 \n", + "epoch 13 Iteration 100 loss: -1496.8723348964538\t\t\tepoch-loss: -1248.4710558849933 \n", + "epoch 14 Iteration 100 loss: -1189.2469453417261\t\t\tepoch-loss: -1302.58240646708 \n", + "epoch 15 Iteration 100 loss: -1354.0129933002445\t\t\tepoch-loss: -1422.9290660653176 \n", + "epoch 16 Iteration 100 loss: -1375.0688655561046\t\t\tepoch-loss: -1296.0532055882159 \n", + "epoch 17 Iteration 100 loss: -1601.7368685439442\t\t\tepoch-loss: -1432.8777691683824 \n", + "epoch 18 Iteration 100 loss: -1140.428056593764\t\t\tepoch-loss: -1443.8657069101057 \n", + "epoch 19 Iteration 100 loss: -1396.6869254783921\t\t\tepoch-loss: -1421.0467725977735 \n", + "epoch 20 Iteration 100 loss: -1313.511818206805\t\t\tepoch-loss: -1411.19388568273 \n", + "epoch 21 Iteration 100 loss: -1508.1672406497062\t\t\tepoch-loss: -1427.8889874691674 \n", + "epoch 22 Iteration 100 loss: -1249.1642813846483\t\t\tepoch-loss: -1379.492333117903 \n", + "epoch 23 Iteration 100 loss: -1214.1394062603918\t\t\tepoch-loss: -1356.5797617962307 \n", + "epoch 24 Iteration 100 loss: -1554.6263005956837\t\t\tepoch-loss: -1358.5256191991677 \n", + "epoch 25 Iteration 100 loss: -1419.5889498936215\t\t\tepoch-loss: -1405.5467914984783 \n", + "epoch 26 Iteration 100 loss: -1262.3682620336267\t\t\tepoch-loss: -1409.6484860247688 \n", + "epoch 27 Iteration 100 loss: -1327.4752015434606\t\t\tepoch-loss: -1368.1521038967614 \n", + "epoch 28 Iteration 100 loss: -1256.4414309051297\t\t\tepoch-loss: -1351.3528504368003 \n", + "epoch 29 Iteration 100 loss: -1178.4788588168844\t\t\tepoch-loss: -1413.816013007459 \n", + "epoch 30 Iteration 100 loss: -1605.1239164704423\t\t\tepoch-loss: -1426.6550440932342 \n", + "epoch 31 Iteration 100 loss: -1617.1795697144926\t\t\tepoch-loss: -1356.5267725452202 \n", + "epoch 32 Iteration 100 loss: -1590.7237287842681\t\t\tepoch-loss: -1425.2884165221458 \n", + "epoch 33 Iteration 100 loss: -1594.3448025229204\t\t\tepoch-loss: -1420.5483351285052 \n", + "epoch 34 Iteration 100 loss: -1576.9397677615486\t\t\tepoch-loss: -1430.2946033617723 \n", + "epoch 35 Iteration 100 loss: -1303.3497394593587\t\t\tepoch-loss: -1380.3330104443605 \n", + "epoch 36 Iteration 100 loss: -1478.0145396344049\t\t\tepoch-loss: -1399.0665992260174 \n", + "epoch 37 Iteration 100 loss: -1555.4119456067176\t\t\tepoch-loss: -1360.9939473244767 \n", + "epoch 38 Iteration 100 loss: -1553.031887368961\t\t\tepoch-loss: -1419.1421503464217 \n", + "epoch 39 Iteration 100 loss: -1427.3431059260865\t\t\tepoch-loss: -1415.0248356293594 \n", + "epoch 40 Iteration 100 loss: -1137.8470272897798\t\t\tepoch-loss: -1398.6618957762776 \n", + "epoch 41 Iteration 100 loss: -1551.999240061582\t\t\tepoch-loss: -1402.3061839927834 \n", + "epoch 42 Iteration 100 loss: -1458.4434735943848\t\t\tepoch-loss: -1425.2654433536431 \n", + "epoch 43 Iteration 100 loss: -1585.6542548185487\t\t\tepoch-loss: -1384.815978968837 \n", + "epoch 44 Iteration 100 loss: -1410.7384899311965\t\t\tepoch-loss: -1400.3690408109871 \n", + "epoch 45 Iteration 100 loss: -1343.7557878846794\t\t\tepoch-loss: -1402.4205821010662 \n", + "epoch 46 Iteration 100 loss: -1309.0681838828461\t\t\tepoch-loss: -1412.2783526889364 \n", + "epoch 47 Iteration 100 loss: -1125.0585501913108\t\t\tepoch-loss: -1391.0496208478644 \n", + "epoch 48 Iteration 100 loss: -1470.087468755146\t\t\tepoch-loss: -1390.1175558545679 \n", + "epoch 49 Iteration 100 loss: -1572.597159674086\t\t\tepoch-loss: -1389.4460105298315 \n", + "epoch 50 Iteration 100 loss: -1113.9894360784558\t\t\tepoch-loss: -1403.3841449208112 \n" + ] + } + ], + "source": [ + "import mxnet as mx\n", + "from mxfusion.inference import GradBasedInference, MAP, MinibatchInferenceLoop\n", + "\n", + "infr = GradBasedInference(inference_algorithm=MAP(model=m, observed=[m.X, m.Y]), \n", + " grad_loop=MinibatchInferenceLoop(batch_size=10, rv_scaling={m.Y: 1000/10}))\n", + "infr.initialize(X=(1000,1), Y=(1000,1))\n", + "infr.params[m.Y.factor.inducing_inputs] = mx.nd.array(np.random.randn(20, 1), dtype='float64')\n", + "infr.run(X=mx.nd.array(X, dtype='float64'), Y=mx.nd.array(Y, dtype='float64'), \n", + " max_iter=50, learning_rate=0.1, verbose=True)\n", + "infr.run(X=mx.nd.array(X, dtype='float64'), Y=mx.nd.array(Y, dtype='float64'), \n", + " max_iter=50, learning_rate=0.01, verbose=True)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The learned kernel parameters are as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": 44, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "The estimated variance of the RBF kernel is 0.220715.\n", + "The estimated length scale of the RBF kernel is 0.498507.\n", + "The estimated variance of the Gaussian likelihood is 0.003107.\n" + ] + } + ], + "source": [ + "print('The estimated variance of the RBF kernel is %f.' % infr.params[m.kernel.variance].asscalar())\n", + "print('The estimated length scale of the RBF kernel is %f.' % infr.params[m.kernel.lengthscale].asscalar())\n", + "print('The estimated variance of the Gaussian likelihood is %f.' % infr.params[m.noise_var].asscalar())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Prediction\n", + "\n", + "The prediction of a SVGP model can be done by creating a ```TransferInference``` instance." + ] + }, + { + "cell_type": "code", + "execution_count": 45, + "metadata": {}, + "outputs": [], + "source": [ + "from mxfusion.inference import TransferInference, ModulePredictionAlgorithm\n", + "infr_pred = TransferInference(ModulePredictionAlgorithm(model=m, observed=[m.X], target_variables=[m.Y]), \n", + " infr_params=infr.params)\n", + "m.Y.factor.svgp_predict.jitter = 1e-6" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To visualize the fitted model, we make predictions on 100 points evenly spanned from -5 to 5. We estimate the mean and variance of the noise-free output $F$." + ] + }, + { + "cell_type": "code", + "execution_count": 46, + "metadata": {}, + "outputs": [], + "source": [ + "xt = np.linspace(-5,5,100)[:, None]\n", + "res = infr_pred.run(X=mx.nd.array(xt, dtype='float64'))[0]\n", + "f_mean, f_var = res[0].asnumpy()[0], res[1].asnumpy()[0]" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The resulting figure is shown as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": 47, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAY0AAAEKCAYAAADuEgmxAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAIABJREFUeJzsnXd4lFX2xz93Jo2OEIqUCCIgHYEAiigjNhRQLIiyiGvHdRsCi78VdHVFicLq7rqorIvK6gIWXEAsILEvEhCsgAgiBER6S0ibOb8/ztRkAgGSTLuf55lnZt73nXfuTCbvufeU7zEigsVisVgsFcER6QFYLBaLJXawRsNisVgsFcYaDYvFYrFUGGs0LBaLxVJhrNGwWCwWS4WxRsNisVgsFcYaDYvFYrFUGGs0LBaLxVJhrNGwWCwWS4VJivQAKpv09HRp1apVpIdhsVgsMcWqVat2i0ijYx0Xd0ajVatWrFy5MtLDsFgslpjCGPNjRY6z7imLxWKxVBhrNCwWi8VSYazRsFgsFkuFsUbDYrFYLBXGGg2LxWKxVBhrNCwWi8VSYazRsFgsFkuFsUbDYrEcF/n5MGMG7NoV6ZFYIoE1GhaLpcLs2AEuF9x1F5x7LvxYoXIwSzxhjYbFYqkwI0bA11/DI4/Azp1wwQVQVBTpUVmqk7iTEbFYLFXHU09BQQH07AmXXw4//AApKZEelaU6sSsNi8USnqwsyM4G4J13dEXRaWc2Pd/LAqBLFxg6VA/dvDlCY7RUO9ZoWCwWNRB33OE3EgAkJcGgQey++g4uvRRe/3U2DB8OmZkhL336aWjTRlcdlvjHGg2LxQIbN8KsWXDJJTB9uhqPyZOhqIj6r/+L95MuZPhrw2HePD0+K8v/0sGDweGAv/41QmO3VCvWaFgsFo1wp6RAcTHcc48aj7w8PMaJkxLOL3kPx11jYPVq3ZeU5HdftWgBfxqQTe1/ZHF4YXaIQbHEH9ZoWCwWzaNduBBSU/V5cTHicGA8JRSRjNSoocbgnnvUPTVpkhqO4cPhrruY+P4lNC3ajOP6IPdVtjUg8Yg1GhZLouJdKXz9tdqCvXsBj8e/23g8lOAghRJM3bpQWAhOJ/zvfzB6NPzpT3DKKTBjBo5uXRnD0/y99r3IAJcajDDxD0vsY42GxZKAvPIKvL0nE4YPp9n6bK6d3ofa11yMeFcY4j0uCQ/SrDn8/LNucLuhTx94+WU4eBA2bIDatWHVKg5f8QvGH5qMaXsGDBsGV12lKxhLXGGNhsWSYOzdC3feCbNzXTBvHg3uHE5mrXUkU4IA4vHwc1oGAAZwbM/VFYaPVavg0KHA88OHwRjqLp6Dyc+DjRuRvDxo3966p+KQiBoNY8y/jDE7jTFfl7PfGGP+aoz53hjzpTGmR3WP0WKJK7Ky+Lb/HZy1P5t770VXAoMG4cw7CKiRMECTgi2hr3O7A4+Li0PcWACIaBwEEMAtDo17fPRRoN7DGpC4INIrjeeBS4+yfxDQ1nu7HZhRDWOyWOKWjQ0y6fztXBY4r6TzrmzNl509G5KTMWlpfqNhTvD8Bih0pOFwFyH5+WosfAFzG9+ICyJqNETkQ2DvUQ65AnhRlOVAfWPMqdUzOoslvhCBW19y8d/U60jjCFx8Mbz5pu70eEJdUCdBmqcA8V1a6tVToap582x8oxQlJSoAuXYtHDkS6dFUnEivNI5Fc2Br0PNc77YQjDG3G2NWGmNW7rJ6zRZLWETg+uuh6e9H4HA49KoFkJ6u7qe8vEp7LycePBjYvh3GjLEGI4h//AOaNYPkZDj1VOjYUSVZfKxapTkG0Uq0CxaGWyVLmQ0izwLPAvTq1avMfovFolXbt98O9JmocQkfu3dX+nsJ4EDIc9al1owZ4HJxONNF7dqV/lYxx88/Q+fOqtrSuDHUqaN/G9AF39Ch2qvE5dLHQ4ZARkZkxxxMtBuNXKBl0PMWwPYIjaXSOHBAVRu2blXF0Ouu0+2bN0ONGtCkSUSHZ4k3srI48PlGspuO4LIDc0hZuVKvTsbo8uMYlD6iGCcpuP37TNAxJuhegJrkwRkdKRk0hFFpC5m+2kXr1pXyqWKKl1/W/+uBA1WdxeHQrz8cc+fCf/+rt7vv1tv998MDD+iC8NAhqF+/WocfiohE9Aa0Ar4uZ9/lwFvob7AvsOJY5+vZs6dEmoICkc2bRT74QGT2bBG3W7c/9JBIw4Yi+p+qt5SUwOuGD9dtzZqJzJgReJ3FclIsWyb5qfVkH3Ul/8LBIg5H6I+wnJsn6HEeaVICUuLdXvoW7jXSqZPeOxxyeOBQWe9oL717ixRPnSYyaFCkv5VqY/VqkbQ0kYsvFvF4ju+169aJPPaYyMcf6/OcHBFjRDp2FLnuOpGHHxZ57TWRXbtOfpzASqnANdtIBWYaVYUx5j/AACAd+Bm4H0gGEJGnjTEG+DuaYZUP/FJEVh7tnL169ZKVK496SLls2ABvv62TMBG16m43/OIX6oNcuRJefVULY48c0Vtengq1NWumLTDvvVdXEsFs366+y//8B95/H844A04/HU47TeOEZ5yhs47PPoPly2H+fPjgA+jbVxVEu3U7oY9jsQD6e76+aTbP7RtGbZOvG4JTaIM42tVAzuyAY/26MquT0q/xrTJKT6S/GD2N51+A6YzDDL5cZUvinAMHtPfIkSMq29W48cmd78cf4fnn4fPP4csvA5L02dkwYMDJndsYs0pEeh3zwIpYlli6ncxKY+7c8JOu//1P98+apSuDOnVEGjcWOe00tfjr1+v+994T+c1vdEUxc6bIu+/qTKG4+PjG4fGIvPiiSHq6yF/+csIfx2IREf1dfkt72d26Z+BHXWq1UXrVUHoF4X/N5ZeLjBp11NWJp/RrfI9btRIPyCFqyf+mLNPBLVsmMnVqZL+gKsLjEbnySpGkpMBKobLZv1/k889FDh06+XMRCyuNquBkVhqFhf7iVozRDESnE9LSKi0b8bjYuxfq1tU0d4vlRLnpJvjDix05U9aWW38RHJsoc4wxKmTo8Wia7rJl+s/iW600baq5o75jvdcU/7lOOUWn3B4PAnxUbzDvj13I5NrTtQBw0aK4zK6aN0/jldOnw+9/H+nRHJuKrjSiPeW2WklNhYYNoUED/Z3XrQu1akXGYICOIylJXVqXXBJbudyWCOOtwpapWZy9fDptzcZyUxGPajBAJdMfflhzRBctgvx8fVyrlv5Ad+yAmjW9Jww1GALIvn3+CnID9M9/h8nf3wjjxmmTcd9444xrroG33oLf/S7SI6lcrNGIAY4cgSVL4K67KpTsYkl0srL0Yj5kCObHzdyxaSKOhqHpNsHGAo5iMIzRcz34ILRrF0j56ddPt9WqpQG9oqLQlxEa6wh+bIqLYfZsJDlFVxhxWC0uohlSl15afpZUrGKNRgwwaBDcd58GwF5/PdKjsUQ9mZlahX3jjcjTT0Pr1jh27SyzZA5eXYS9rrVrB7fdpq8rKNBIbo0a6lJauVKl0efPV111X6FgqfMHP3af0tA/BgFKikqQSZNCq8XjQKPK44HzzoPnnov0SKoGazRihPvvV9HQKVPsasNyDFyqXuue+wpfS2f47jt1MbndYTOdwuJw6OucTv3xeTzqkkpK0vNfd11gCr1+va44UlLKnMZ3/s/NWXj27Uf8WVuGZNzq6po+XTdNn65aWDG+6pg/Hz7+WL+SeMQGwmOImTO1ovejj+DccyM9Gku0s7rLjZz19Ww8qWk4CgvwoLPEcl1RPpKT9f6MMzQPvWZNbQc7YoRuHx7UKzwnR++TkmCit9K8f3/9kQaxM6MnjbasAsC0a4d89x14x+JISVEj9O9/w+OPw9ixlfDpI4MI9Oihqfhr10YuHnoiVDQQbvNyYohRo6BFC3UnWyxHw3P5YLp//SZ7k9M5pXA3HtRQ+O7D0qGDFgK4XNC8OR//1IaatTfStQskPfNM4Lh589RYTJgQcCvdcQdSsyafNBjKOR/9Gw8OnHgwyclQXEzjLasQYC+n0OC77zDGaPomIMXFmNmzNTYSwwYDNPC9Zg3MmhVbBuO4qEhebizdoqEi3GKJKMuWSUlyqpTgEA9IXtopYau3BbS8GEScTpHUVMm9Z5p4HtW6ie7dddcddxz7/SQ9Xeb3nyY7SZe1p/QVD0ZKzuwUtvYj+Lk7eCxpaSK33171308VMniwyKmnihQVRXokxw8VrNOwMY0Y5KGH4Le/jfQoLFHLY4/x4plTWJp8GQA1CvYBpQLeHTrozF4E2raFW27h4LDRPPVECQ8VTgA01j1+PDzzDLz22lHeLyeHFePm8elHJcwZNo8zJw7DTHsc6dcPwYToUQVna/m2+R3kRUUqvJSdffLfQYQYPx6efDLg4YtLKmJZYumWCCuNX/9aJDlZJDc30iOxRA1Tp+qMX0RXGg3SZVOnwbKTUmJnvirtDh308UUXidSsKbJsmVx9tT4M/l0VFopkZorUr696auUxd67I2WeLHDkStLFPH38FeDhtqjKrnrQ0kTFjEkqXKprArjTil7FjNcNx5sxIj8QSFfjqMoYP11m6y4Xn6mtp9c0i0tlTVk/K49Eo7dCh8O67sGgRRcOGs+e1bCZOhOZBHWtSUmDOnNCi72CKi/WKP3w4fPKJqicAmgn12WccGjAUJ+7wK4ugx4V107WA8IUX4MILVTc8hlJvReDPf9avNe6piGWJpVsirDRERM49V6Rr10iPwhIVeGMKMniwuOvUk/W1e4gHpISjqNnWrevXfCopEbmlzTJ5uP5Uyc8P/xY+xeXiYlVaFVH11q5dRf71rzAvGDRIVw316kmhI1W206TsyqK0XpUxItOm6djq1QusnGKAzz7Tj/PMM5EeyYmDXWnEN8OGqcrlpk2RHokl4uTkQN++yDvv4Dl0iHaHP/c2QfKU/5qiIn89xJYtsLTERZunJ1CjRvjDfU2CZsyA3r21MVBmJuzcWU7/l8WLoVUruO46dv1+Ck3YSXFyoHBBSt0bdALLhAla/zF/fkzpUc2apXWPvt448Yw1GjHKsGFw66220M8CZGYib7+Np7iEJDxHT6sFzQVt1crvzmrdGtat06fH4qab4Ne/1tbi114LX38Nl11WzsETJsAzz9C8cQmbxjxOsqfAvyucm8qAiiD+5jcxZTAKCrTtwVVXaauDeMcW91ksscpll6n/f+xY9tbL4JSDW0NkQcJqSyUna6AiKYnvR07mq9UlDPlwwnErKR8+TMVbt2Znq0WqUQP3gYM4D4Y2nAnRpfKN8ayztMFMDDBnjvZeX7pUO/PFKlblNgEQUTmg/fsjPRJLtZOVpdr548bhvvMu6h7c7nVJlV1l+A3GRRdRlFyTTae5WN3uOl78Vwn/t28Chw4d/9sfV6/vnByYN49P52zhvfxz2N1vaNmxAZ+3GAqNGml0PSdHg+ExwI4d0KlTTC2OTo6KBD5i6ZYogXARkTVrNPj23HORHoml2lm2TAPGSUkaTE5KKlM4FxJwTkqSktp15XdoAd4Alsnppx89jbayOXxY5IG608KOs8x4nU4t9IuRJk3x0JoZGwiPf7p21Zax8+dHeiSWasflgjfewJOSqrP1MCqzIZSUYDp15MH7Skh9Yx7vPJTDhg36+6kuatWCG3quxxO0FipXQv2WW1TrKspl0/fvD8igJwoJ9FHjD2Pgyiu118bhw5EejaVa8dYwrG94Ttigt39by5b+8mTH56uoc0Emda9wkXLfhIhc6M44A44465BLoBiktIS6APzwgyreBsumRyGDBlUsgSCesEYjxhk2TDtvvv12pEdiqRa8HfnIzMQzcCBnbl2CG0f5yrW1asE77+gF+KyzAqq0EcKc0Ybkh+6nqdnJkZS6ge0EGkMBOhOqX7/sCaKo38bGjbB8eVQvhKoEazRinH79tDXtW29FeiSWaiEzU6e2q1f786199Rhl8iCTk3XGDrBwoWYjTZhQfWMNR2YmqdMfwXH5INKKDobsCnZVCcBPP+lU3qdF5cvCipKr9Msv62r/+usjPZLqxRqNGCcpCd57D554ItIjsVQL3gZLMn48B9GZemn3DsbAmDGqNRMFq4sQvJlUTncx5pxz/KsL383X7wPQepLiYu0jPnlyoI9HFLirRLT9x3nnqQcwkbBGIw446yyoUyfSo7BUC17XTFGtU6jPQTyUatnqdELdulok9/jjugyN9OoiGF8PjvHjISfHP+7Shq8EJ9KuvepkNWyo0s7dukWFwQBYtUobG44cGemRVD/WaMQBhw/DAw/EtKK0paJs3AhDhpB6aI9/Zu5DQI3FuedqM/mzzlI5j2gkJwfatNGlcnJymQK/JNyYb79RbY7DhyE1FT78MGpqNzp0gJdegmuuifRIqh9rNOKAtDSYNg1efTXSI7FUOdu3ay/RoNSn0tXfLF0K994bXW6p0mRmasyiZk245BKM9/P43FT+wPiRI2oIfSXrzz0XFbOjWrXghht0IZdoWKMRByQlaUD8ww8jPRJLlbNzJ25nMuLxhI9lgLqnli6NLrdUaXJyVN3vjTf0ucfjNxzBAXEDGkBwuzW+kZkZcWP45pvw2GOatZiIWKMRJ5x3norH7dkT6ZFYqoysLGjcmBIp599WRPUs9u/XmEE04xUzxOVSg3DmmZCREdLlz4/braqAtWqpSiJENPU2K0uHHtfd+Y6CNRpxwnnn6f1HH0V2HJYqwttoSZZlk+IJTHFLV1Szdq3WZURJwLhCLF4Mt90GmzcDIEEmI+SzdesGjzwSaDgVgdTbdet0RX/bbYlVBR5Mgn7s+CMzE9LT1U1siRN8hXygf+BJk8ir2TDkkDJFfbGqWh0cp3CYMjUnAvDpp/r5HnkkYqm3M2eqzbrppmp/66jBGo04ITUVfv5Z0/MtcYKvkM+XMTR6NLV2bym/X0a7dppyO2dONQ6yknC7oW9fcDoxHg+Fzpr+XXs5JfB59+yJWOptQQE8/7yqMIRtPJUgWKMRRyTqcjlu8RbyMWeOyoC89BKFzpo4Cc0yAlRSfNs2mDpV022jIMPouFi8WBU409LA6STVne/vO3gK+0JXHp98EpHPt2uX2vEoyfqNGPYyE0d8/z306KEubUuc4HLBb38L+flw5Aip7vxASqrDGZgptGmjUiElJWpoojndNhzZ2WrsFi6ErCwMgYuTv5GU77NefLEa0Wo2HC1bqsZbLDdaqgys0YgjTj0VvvoK3n8/0iOxVBrZ2dqYe9QopLgYA7iN1iw4PG7twte3r/7hIVBxHc3ptuHwyovgcsHYsXDGGWU7EHo8FLftEAj0z5lTbRlUX34JW7dWy1tFPRE1GsaYS40x640x3xtjJobZf5MxZpcxZo33dmskxhkr1KoF3bur8qYlDvAJ9F11FZ633qYYJwJ4xFDUpaceU1gIU6boDD3WVhfB+Iwd6OfetCnsYc4Na7VeY/dueOEFrZCvYkTg9tu1s26s5hlUJhEzGsYYJ/AUMAjoCFxvjOkY5tC5ItLde/tntQ4yBundW3VxPJ5jH2uJcnyz7w8/xOzeRTJu/stQvk7rSfJXq/SY1FSdccfi6iIc2dnaJMZLcHW4f+UhqGLvsRpPVRLvvKNvN25coH4ykYnkSqM38L2IbBKRImAOcEUExxMXZGbCoUOwfn2kR2I5aXyzb+9suohkrmABZxUsV3eNw6EB5FgMfJdHTo4WHdWpowJPYTDinRE5HNrdrwoRUV23006D0aOr9K1ihkgajeZAsJcw17utNFcbY740xrxqjEkwEeLjp18/9WjYlUYc4K3TcJ/eBoAUikMVYdu1C2RYxbJrKpgJEzT99oYb4IcfQqRFgu8B7bVRxXEN3yrjj3/U8JElskYj3EKvtMdwIdBKRLoCS4EXwp7ImNuNMSuNMSt37dpVycOMLdq2hblzVU3CEuNs3AhXXsnPtc6gJNy/qq+zXby4pnyMHw/PPqvGQySk30YICxbArFm6vK4iWZFvv4XTT7erjGAiaTRygeCVQwtge/ABIrJHRHyaCTOBnuFOJCLPikgvEenVqFGjKhlsrGE1qGKcrCytuzhyhKarF5NEmKXj8uUwfXr1j62qcbnUEBQXg0iZVUZId78zztDVxpVXVklQfOxYzZyyq4wAkTQaOUBbY0xrY0wKMAJYEHyAMebUoKdDgbXVOL6Y5aGHNP02UVU444LMTBUSczox4glfAV6/vqrZxiPDhmnzJfBfscsUNAKyfr2uNtzuSn37vXtVtQQ0K9ESIGJGQ0RKgLuBd1BjME9EvjHGPGiMGeo97DfGmG+MMV8AvwFuisxoY4sOHXSS9uWXkR6J5YRxuWDECNyFxeUfIxK9TZZOlsxMTVXq0gWKisp09/NLp3s8ajCczkoNik+eDOefD7m5lXbKuCGidRoislhE2olIGxF52Lttsogs8D6+V0Q6iUg3EXGJyLpIjjdW8Il/xktsNOHwBsClXXsc4g4fBI7n3E9ffcq992q9RvNAfkyZhlOAeDxQr16l/eC/+ELrKe+8E1q0qJRTxhW2IjwOyciAxo1hxYpIj8RyQrz2GgwZQsHUJ4BygsA1a2oTo3gkJ0cNxiOPwIMPqp81IwMAtzN8EwvZurVS+gKIwN13Q4MG+taWslijEYcYExUNzizHi08K/eBByMsjbVcgI73MuqKgoFqHVq1MmBDQ0CopUZmU7dshIwOHCTWflV2g/fLL8PHHWmSfiK1cK0JSpAdgqRruvttmUMUcPin0a69F1qkn1t8rm6CWrsaoDz+e8aUQ+4obU1Nh2zaM2+3PIwvOpjLJyepXOkny8+GCC+CWW076VHGLXWnEKZdeCiNHRnoUluPCV6j3yit4atcN2WVAfSYimk30yCOqbJsIPPOMamuVKvTzYwwUF3OkADxTT65W47bbNCHNthkoH/vVxDFffFEpky9LdZKTA/Xr4zx8sOy+vXs1Mnvjjeq2iaeCvmPhcqmWB4So33rAXwDo3rWXD/IytXalQwe47LIKn37dOnjpJbXJ8ZxjUBlYoxHHDBsGf/5zpEdhOS6SkpDvvw8pYDNB+9i2Ddq3TyyDARrrCdPLOPgC9lNyBiufW43cc49agQsvrNCpReA3v1GX7t69lTTeOMYajTimVy/4/PNIj8JSYbKztQ94ssqDlKkBd7tVxnjy5PgRKKwI2dk6A/K55kotBXzPzihey7jt9+iTadO0nLsC/Pe/sGQJ/OlPgXpCS/lYoxHH9Oihae779kV6JJYKkZND4X0PkVKcx3ra4SRMbcbatZoLmkipcTk5ml78i1/ATTepwBoBF5WvFtznttqT3LTCsukFBWpbOnWCu+6q/KHHIzZ7Ko7p6VXqWr1aM0IsUUhWViA/OjOTebku2vAKZ7M8UPWclKSR2aIizW5ItHhG8GedPl3FDAmk2/pmvr7nDYt3cOS9T6hB1jG/p2nT4Icf4L331PtnOTZ2pRHH9Oih99ZFFcX40myTkmD4cOr9312cjbZeNIBp3VqNRFERDB0KmzcnlsEIJjtbV1lB6cal5UV81Hh3gX5Xx6BzZ23BbidVFccajTimYUP9P7M551GML8124kTcKakMzp3BVm9bGQ8OnQYbA8nJ0LRp/GpNVQSfm2rJEujZ0++eglDjYQCP08mRdZuPecorroAnnqiS0cYt1mjEOQMG2MrWqMVXAZ6TA40a4dy+DYAMtrGvZguMLxSekgKPPhpfHfpOhAkTtGZj+nTtaUxogV9wptnTrbI4//DicsVv58zRRUsli+MmBNZoxDkbNsDDD6syhSXK8LmmNm+Gn37yX/Q8QIP8XL0AXnSRVkOvXx9fHfpOlOzsEDn40isN3/O+Z0OtnGyWX1222O+777SI7513bIfLE8EajThnwwa47z5YsybSI7GUISdHhQdnzKCgZVv/Zv8/pdOpwn1vvKFT4zlzEjee4SMnR2dBySpcGC6ecaR1B8569Y8sdgxh/Zsb2bYtaN8RuPZatcNz5/pPYzkOrNGIc3wZVN7VvCWa2LgR3xUtbct3FJAS2p3O41FXzJw5tkzZh0/M8OKLQ76TYCXgtB/WYoqLSeUIHo8GugEOHdLksy+/hNmzrez5iWKNRpzTpAk0a2YzqKIKXyxjxAhIS/Nf8NIoCvXNi8CiRfDvf8P8+erPt6jhaN5cvx+C2r+agDaVuN04atSg5s0jaPhVNoUPZVFYqF/jfffBoEGRGXo8YI1GAtCzp11pRBW+WMbq1eqCcib5ffL+uXOw3+TsszXLyqJkZ+vqKy1NW94CbhwYKRWgePBBrmu3mqe3DSH13EwaNoTvv9d2yJYTxxqNBKBHD/jxx/huwRBT+NJsJ0+GPn3ArdXLIb0hiovV/TJwoKpOJnLWVGlycnSVtngx7NuHuegikryZZiEd/SZOxPmHcZgHHwSXC2MSRxi4KjEild3GJLL06tVLVq5cGelhRBWHD2vgzwb9oowbb4TZs0OMhQeD07clLS1QlzF8uBoau+IIxdsa1rN3Pw5Pif+79ODAiQfq1oXzztPc2kSucakAxphVItLrWMfZlUYCULu2NRhRR0aGanE7HIjDQQFpeIwDh++y53CoSuvw4frcptuWxddL/JxzMEEGA8CJR58fPKhxoQoq3lqOjTUaCcL992sLS0sUkJ0Nu3aBx8OuvoNZmnErDtw4xKPulYEDtRahf/+AsXC5bLptaXJyNH92wQJMRgZmzBiMwxFSryEAtWrBWWdFbpxxhjUaCcLKldr/2BIF5OTA4sVIn76kf7qAMze/RQrFeIwTRo2C//1Pj5swwRqLozFhghZGTpumrSpffBHxpuEGiv1M4qkCVzHWaCQIvXqpqnZeXqRHkuAEqdpu7n0tHhxksBUBtl1+u0qFPPSQul1s8PvYLF6s2uYjRqioo9uN2+vk09RlofjdbGt4KxFrNBKEzEytFbOV4dWMrybDR2amNhSaNYuMv43H4fW9G6DlohnQpYsWr9kYxvGTkgJoPMPjcOBJq0UBaSS986YWSVoqBWs0EgRfZbhNLKtmfDUZwYZDBNmwAYc3TXQTrQM1GqtW6WusW+r4yMmBli39T0tMGs6HHySn3oVsSm6PJ0ivynJyWKORIJx6qnYKtQJt1YyvJmP4cK3LGDwYUlPxOLSsLz1TAAAgAElEQVSgz42hDT8EMn/q17dptSdCZiZs3Yoxhk/bjKLAnYRMmkzfoo+4tfgfvPsbm25bWVijkUB89hn8/veRHkUC4nLBmDEaq2jfHnbvxllcyE/OFv6aDANaUyBiYxknwpw5Wl3/+ONk7n6L/3IF5OfjHNCf75q5mDo10gOMH6zRsFiqmuxsmDEDJk1SXW4RBGjqzg097uBBTSG1sYzjp00bVQMuKSFpyCBuZDZm1C9wDujPE1dkk/l+FsuXR3qQ8YE1GgnEmjVw+unw/vuRHkkC4StAmzdPqyxbtsRbjVG2VanDAbNmqavFcnz40pOTkjAv/VtTl996C5KSuGbecNbXyeSRRyI9yPjAGo0Eonlz7R5qg+HVyGOPaU8Ml0uNwQ8/UEplKkDv3iotYlcaJ0Z2NjzyCDz+OJ7Fb/FBzUHIuHGY/7uXs8a6WLAAvv460oOMfazRSCAaNYLTTrNGo1oZPx4mTtQvfvVq3F5FWwhjOho3VvlzmzV1YuTk6Ipu7Fi4cwznb5nNx6f9AkpK+PWvtTDcxjZOHms0EoxevazRqFZcLrj1VtiyBe65B0e+VleWcU2BSqVbThyfiyo7G8czM1h41iQ6/PgWRd1UFv322+E///Eu9iwnjDUaCUZmpjaM27s30iNJIP7xD2jbNmRTmVWGw2E7A1UGQTGk5CkPcq3MQ7x1MvfcowlWkydHepCxTVKkB2Apy86d2pLyq6/g229h+3b4+WfYvVslzuvWhXr1oFs31bY77zyNsVYElwvuuMP21qhysrLUOo8Yoc9//jnQkY8wRsPj0T+05eTwuahcLi4oghH1XDzRdx5/yMmhucvFuHEq3HnnndCvX9UOJS8Pli3Trplr1sD69ap04vFoq5RTT1Wx41atVAigRw+dWziifCof0X4axphLgScBJ/BPEXm01P5U4EWgJ7AHuE5ENh/tnLHWTyMvT4uAV6zQOorPPoOtWwP7GzXSXsZNm0J6uvbmOXgQ9uxRb0ZRESQlwTXXwJ/+BO3aRe6zWILIzoYrr9Q+Dk4nDBgACxb4jUWIa8oYvVL07Kk/AEulcd99UKcO/OEP+jwvD848U/+XVq7UP01lIqLnfe45FQg9dEj/vO3aQceOUKOGvqfbrXOEH3/U//cS7cNF7drqQj77bOjbV3t0NWlSuWMsj4r200BEInJDDcVG4HQgBfgC6FjqmLuAp72PRwBzj3Xenj17SrSya5fIBx+IPPmkyC9/KdK1q4jDIaI/NZHWrUVGjBCZNk3kvfdEdu48+vny80WWLhX5/e9FatYUcTpFbrlFZNu2o7+upEQkN7fyPpelHG6/XSQ5WcThEA+IB6QYE/iDB98aNxaZOjXSI04I5s7Vr3zGjMo97xdfiAwcqOeuUUNk9Gj9Pz506OivKyoSWbNG5F//EvnVr0R69RJJSgr8NDIyRK65RmTKFJFFi0S2bBHxeCp37CIiwEqpwLX7qCsNY0yGiGw5OftV7rnPBh4QkUu8z+/1GrFHgo55x3vM/4wxScAOoJEcZdDVvdIQUVfPwYOwb5+2Sdi1C3bs0BnEli0aeFu/PjSO0LixSvz36aOZlpmZuu1E+flnzTacMQNOOQX++189dzhuugmWLIHcXJ0FWaqIO+6AF16AwkJAu/KZ4Apw0D+AiC4pd+6MzDjjnMJC/Z9s0UKfiwS66H73HTRseHLn371bVzQzZ6rbeNIkuPlmfXyiHDkS8ED4bsEB/Nq14Ywz9NaqlabTN2sGrVufeJlPRVcax4ppvAH08J7wNRG5+sSGE5bmQJAjhlyg9GXOf4yIlBhjDgANgd2VOA5AL/jXXKM/KI9H793uwK2kRF1DxcX6IzxyRI1FXl5gaVmapCT9oZ52mp67fXvo0AG6d1d/ZmXSpAk88YRmiAwZAuefD//8J/ziF2WPzcyExi9k8fOcTJpeH6RzlJ2tPmGb8nnyZGWpH8JrMHwy3WVITdWucv37V+/4EgiXS79mnzqLMfDXv+r/4S23wGuvnbib6rXXVCFm3z749a81yN6gwcmPuUYNOPdcvfk4cEDrTHzGbsMGfbxwof9nRp8+VHnl+7GMRvA89PRKfu9wc9zS/1UVOQZjzO3A7QAZGRknPKDCQnUtG6O31FT9MTmdagCSk/WWmqp/1LQ0zf2uW1dv9evrhDE9XS/iTZpUvs/0WHTsqLOSa67RotgtW+D//i/0mHPPhVfJpP4dw6HpPH+aor9y2XLyzJoF69ZBSgpSVOTvJldmhVFSonUFVqSwysjM1AlUcXGg7XHnzqqW/tvfwu9+p0bkeFbdu3bB3Xfrv0vPnvDeexrMrkrq1dPgfekAvogare3by5/AVipH810Bn4d7XBk34GzgnaDn9wL3ljrmHeBs7+MkdIVhjnbeaI5pVCeFhSIjR6pP9IUXQveVlIjUrSsyfcgykfR0kUmT9H7ZssgMNt6YOlWkb1/98pOTxW2cIt6YRkgcY9QokXr1NPZhqTJ8MYycnLL7xo7VfY89VrFzeTwizz8v0rChhqv+/GeNScQDVDCmcawLuxs4CBwCSryPfc8PVuQNjnLuJGAT0JpAILxTqWN+RWggfN6xzmuNRoDCQhGXS3/cH3wQum9mu6lyc+tlajBA75cts8HYymCZ1xifeabfQJQxGGlpajCmTbMGu4rZulW/8iefLLvP7Ra59tqA4SgsLP88X30lcsEFeuw55+jzeKJSjEZV34DLgO/QLKo/erc9CAz1Pk4DXgG+B1YApx/rnNZohLJ3r0j79iINGohs2BDY/vm0ZVJUs65euCZN0vu6de3Fq7JYtiwkBcYDUuIzHsaI1KolMniwGmlrrKucjAyR664Lv+/IEZHLLhN/BuPs2ZrxdPCg/v/MnSty/vm6v149zbpyu6t1+NVCRY1GRIv7RGQxsLjUtslBjwuAa6t7XPHEKafAm29qgOyqqzSHPCVFM7dI9vrVQe9tKlXlkJWlQTBvxytfEM4BmKFD4d139ftu1iyQdGBjGlXKX/9afvJJWhosWgTvvKPakqNGlT2mdWt49FHNimrUqGrHGu3YivAEoE0beP55zap69FGvjEJODl89OJ+6q7I57aGHNE/Q5dLsKXsBOzkyM2HgQESEnxzNaezZQRJuNR6rV6uSLegfxlItXHHF0fcbA5deChdfrG05vv9ek2KcTs14vPji6K/Uri4iWhFeFcRaRXh1csMN8OqrKmvQuTPc2T6bR38YTv2JY7TAwyu/YDlJsrPhoosQt5tikkmimBKSSaYY43DA0qX2e65miot1JdGypcrvWMpS0ToNazsTiCef1LS9W24BT+8+PLlpCNcxj6L7HlSDMWRI+RWBlmOTlaUFfRMnQlYWO+qcQQrFCODErQajRg1tTWqpVoxRGbB//jPSI4l9rNFIIBo1Ut/uihXww+HGpJTk07F4tSpyr14N+fknV5ae6GzcCC+9BGvWIH/4A40ObfIaDHDiUad6SQls26YGxlJtJCXpfOjTTyM9ktjHGo0EY8QIGDwYum9dyI7xjzONcTS//jwYNw4ef1zLSy0nxogR6gQXLdpzEhoIZ9s2uOQSFSW0LV2rnXPO0Qrqw4cjPZLYxhqNBMMYneTm58NjJWNZmXYuLX74SMvEx46N9PBiG5dLo6gEmiyVyUdbuDDQ/tVSrfTrp5JAK1ZEeiSxjTUaCUiHDjB6NAx8YgiZBR/h6dwFPv5YdRWys9Uvb90nJ443zSZEBt3n9jvjjGrSerCUpm9fvbfq8yeHTblNUP7+4xBqyCKKHWmkbPlRu9Lcc49e8GrX9s+YrYhhBfA1XGrfHiZPRgoLcePE6U2zNaAKtj17aupakv23iwT166vQn810PjnsSiNBqXl4J0XOmtzreRj34XzcM57VHR4P3H+/Bsb79FERQ+t/PzqZmTB3rmZNFRWxtXYHnLgB8DicAdXLb75R4zx5ckBy1VKtxEJnvGjHfn2JymefkT9vEQ8ymXyp4b/IMWoU/PGPuupYs8bWblQElwvmzwcRRKDp4e8pIpn/S50Gl1yqK4wlS7Sor1UrjWvk5ER61AnJ2rXqfc3NjfRIYhdb3Jfg7GrQjkb7NgT87w6HX/6CHj20E4ylfHyuKdAuOUuWALCKs0g+rRldd2WrRoU1vFHBypW6MHzlFW0fYAlQWU2YLPFKVhZs3kx6kMEQwPgMhtOpF8Pp0zVwa2Ma4cnMhIcf1mbtBQX+7nw9WA0/roZp0/S4rCz7HUYBXbuq9tpnn1mjcaJY91SikpQEM2Zg6tVje90OeDChP4a0NO0mN26cDdyWR1aWVnePHAkFBSHd+Qxox5/vv9dKexsXigp8Yp027fbEsUYjUSkp4bs7p3HkYDHNDq7FUbohYl4evP22FvzZFNHw+CrAn30WmjULqc0QYzDFxarp9eCD1j0VRfTpo24q+7M+MazRSEC++gpcb02g/dNjWZB8tb8ATSjVSzclRQv+rFslPCNGBJrIb9/u//4EML5YYWqqV4feEi307g1Nm8JPP0V6JLGJNRoJSG6uSim8dnc2w90vhxSh+XpZA3DkiKaaWMLjcsEvf+l/aoCD1Ak9JinJChRGGTfcoIvEli0jPZLYxDqrE5BBg2BQWrbWYFx6Kbz5ZmCG7D3G/3juXC1as8HwULKyNE7hdvs3CVCPQ6GrNZcLXn9dVyXWRRUV2F5jJ4ddaSQQBw9qiCI/H60TmDcPLrig/Bf4CtEmTbKB3NJs3AgXXujX2g42uH7GjIHly1VrytZlRBUPPaTzJcvxY41GApGVBePHa4ETEybozHfpUhXlcTr9Fz3ffdFHn8Ejj9g6g3CMGKFTVo8nJGUZvN+fMaozddVVdpUWhRQXa0lNXl6kRxJ7WKORIGzbpiUX11+vBcp+Fi+G9HSM213m4pfy9ecq8/3YY9U82igmKyugxzVoUIixDUFEDcWIEdZgRCG9e2sN6+efR3oksYc1GgnCgw/qhPfhh0vtyM7WPphegjOoBJA9e7TewKreKpmZ2pBk82ZdpXljGmWSCECFH+0KLSrxeVut4u3xY41GAnD4sJYTjB4NrVuX2pmTo7KfycnIkKFlfPOmQwfVSpo/3xoO0O9r9Gh4+mkoLAQITbUNPvbUU6t/fJYK0aSJ/uw/+ijSI4k9bPZUArB7N5x3Htx0U5idPtdJZiYOl4vDdU+l9qEdAJSk1cJs3YYjNRXz+eeqwZDoZGaqu65OHTh40G8oPJSagTmd1mEe5dx8s6q/WI4PK1hoCTB4sD/9FgIXQ+NwYlKSNf7hciV2jw2vZhczZvg3lWBI8n1rxkCLFvDzz/r4rbesi8oSE1RUsNC6p+KcPXtg69YKHDh9Orz5pj52BERFHAAeN1x8sYrvdeiQ2FpKXs0uWrQAwA04kUAswxjYu1e1u6ZMsam2UU5xsdp3S8Wx7qk4Z+ZMbY+Rm3sMF/vSpXDmmdCuHSxY4A/q+ov9vNsANR6JOHv2yaBnZMCWLRTWbkDK4b2hAXCPR91S06bZnusxQGam/jkXLIj0SGIHu9KIY0TgxRfhnHMqEJNdvFgLOJo2RYwjfDYQaJOmRL0Y+gQKc3PB4SD18F4gyLgGlxrbznwxQe/e8OGHIYX9lmNgjUYcs2qV2oEbbzy+1xmnIyQbyHejdm147TW9IGZnqy5VomRUZWWpnEpBAQDiLerzxX10o8BFF2mK8tKl1nDEAAMGwIED2qTSUjGs0YhjXnxRRVavvbaCL8jOhrlzMU4n37QZ6t/sr904fFilM4YMUQ2GOXMSJ7aRmanV8ZdfDh5PSGqt/59o6FD49FN49FFVCLZChVHPgAF6//77kRxFbGGNRpwiAq++qtf3+vUr+KKcHLjuOpgyhY4/vqXnKX3MggXqsxeBN95InNhGTo66nxYuLKN453/2wQe6f+xY/W7atKn2YVqOj2bNNIxnjUbFsYHwOMUYlUg4fPg4XuRLoR0yBEdJMUdO70CRqUG9jZ8jQIGjBmmeI3rMeecljsEAXWns36/GkjB1GaBGwveduFyJ9f3EMI8/Dg0aRHoUsYNdacQxTZuqZt5x43bDtGnU+OdT1N32LcU48OAgzXMEj8P7k3n//fjvteHTmYJA5TyBWE9I0yqHA774wsYxYpAhQ6Bfv0iPInawRiMOEVGliyBJqeNj8WJYvx657DI+aP1LHBgceAO/Ho9WO7vd8NxzWhAYr8HwzEy9okyfrvUZ69YBockBfteUx6PHDx9uDUeMIaL/Kx9+GOmRxAbWaMQhq1drEHz79pM7j3E46Lfun2gJW1DNhtvtlwXn7bfjNxiekwNdusA992iqrcMRojMVssoYPBiGDdMeJbagL6YwRlsG3HtvpEcSG0TEaBhjGhhjlhhjNnjvTynnOLcxZo33ZstvKsjrr+tiYMiQkzjJM8/AokU4cYf8SPwzaxG9ZWXFr+8+MxO+/VZTaD//XI0k+h0cplaoOGGzZoEeJYkorxLjjBypiW8bN0Z6JNFPpFYaE4H3RKQt8J73eTiOiEh3721oOcdYSvH663D++ZCefpIncrng9DZlCv38F8sWLVRvPTs7/lxUl12mS7b+/VVrIihjSoA6BIkRejzasMQSs4wcqX/if/870iOJfiJlNK4AXvA+fgG4MkLjiDvWrtXbsGGVcLI77sDxw8bwst+gldHz56sfP95cVBdeqG6pt97SeEZ5wp5paVCrFnz8sY1lxDAtWugcafbs8v/UFiVSRqOJiPwE4L1vXM5xacaYlcaY5cYYa1gqwL590KsXXHmy31Z2tvrxPR5KGoT+eUL8+cuXa/VgvPnxS0pUi8vj0ceUqllJS9P74mLtcHXddfH3HSQYo0bBoUMVFPhMYKqsTsMYsxRoGmbXH4/jNBkist0YczqwzBjzlYiU8ToaY24HbgfIyMg4ofHGC+ecU0nXrpwcXbN/+SXJy5f7NwenmwJQs6aqIt58cyW8aRSxcWPI1aPM5y4ogLZtYcMG/fxr10ZgkJbK5IYb9CefnBzpkUQ3VbbSEJELRaRzmNt/gZ+NMacCeO93lnOO7d77TcD7wFnlHPesiPQSkV6NGjWqks8TCxw6dJzFfEdjwgQNhjds6N/kLuWg8gDk56v7ZsSI+IltZGVp6ll+vj43pqxrzunUUuJatbS/hnVNxTwpKWowPB4rYHg0IuWeWgCM9j4eDfy39AHGmFOMManex+lAP+DbahthDDJzJjRuDDvDmuCTQwBHKVER/48nJQVeeSV++mxs3Ki9RXzObZGQJAADelXJztap6eLF1jUVJ2zYAK1aqRqMJTyRMhqPAhcZYzYAF3mfY4zpZYz5p/eYDsBKY8wXQDbwqIhYo3EUXntNhVgblxchOl6ys7Xye9o0TNu2/gumh6CaDdDS8xkz1LcfD+m3I0aE3Rw2PjpihE2zjSNat9YWsH/7mw2Il0dEjIaI7BGRgSLS1nu/17t9pYjc6n38qYh0EZFu3vvnIjHWWGHbNs0zv/rqSjxpTo5Ouc46CzaGZlGFBMO/+w6aN4/tPhvBkiEulwoSBVHGPQVlhAstsU9SkjYtW7ZMJ2GWstiK8Dhh/ny9v+aaSjxpkIAhHg/mnHMQHP4LaElyWuBi+tNP0KdPJb55NeOTDBkyRI3HK6+E7C6TcmyMGhcrGxJ3jBkD3brpHCgv79jHJxrWaMQJr74KHTtqlmilkpMDnTpp+9LOnfEkJbPNaH/s5OICMAbxSYp8+aXqNPlm7bHUpMnlUvfam29qP/Tly8vKhfgIrpq0siFxR1ISPPWUJs+9/HKkRxN9GIkzx12vXr1k5cqVkR5GtfPll7B7N1xwQRW9wR13wJw5FM17A8cXq3H+4Z6yVeJOpz4ZNAg++ig2e27ceKNWeKGfzY3BiZR1Tw0dCu+9p+67WPp8lgqzYoUuQBPFC2mMWSUivY51nF1pxAldu1ahwfBhDCl/n07SfROR5OQQl42nbl3NKHK7NZsoPz8QJI4VsrO1ArxVK0A/2z5KNVowRgv7duxQg2FXGXFL79765167VoPjFsUajTjg8cerofPYM89o4OTdd5HiYkqKTYjrxnHwYOBYj0dv5WQhRRU+V1p2tsYzrr0Wdu3CYzTBOJ09etxFF+kVJDVVVW27d7dZUwnATz+p8bjzTptN5cMajRjn559h4sST6J1xPLhc0L8/BkimiB8cbXEHBcZDSPKKDUR7wZ+vB8acOeqamjEDyctjdo072IyqC3hwwCefqIhhaqou6Wwr14Tg1FNVgmzWLJg6NdKjiQ5su9cY5+WX1SM0alQ1vFl2tnana9MGz+YttHFvQAjT+jQ5WWflgwfrY19qVzSSkwN9+8ILL/hXESIwMv8ZBDhAPepzAAoLNa14/nx9jV1hJAz3369Z5ffeq50wKzVDMQaxRiOGEdEZUO/emjlVpWRn64x83jwAHJdcgnilFnwBcf+Ko7hYKwz37FFp8Zyc6I1tZGbqVaGkRA0D2nwqSbRTYT0OqDvK49HKSUuVUFxcTG5uLgUFBZEeSljuuw9uu01jG19+Gdv6VGlpabRo0YLkE/wQ1mjEMGvWwFdfwT/+UQ1vlpPjNxgMH4655BLci970S4uUcVFt2wY9e2oPzWbNdFt2dvTN0nNytAzYKzgoAB5PaFaYMfDYY/CnP+myzmcAo+lzxDi5ubnUqVOHVq1aYaI0XaltW5XoOfVUnUfEIiLCnj17yM3NpXXr1id0jhj96BbQ63KbNtUUb/Z1pfMZj4ULOTLgsjKH+WOFKSmwapWqKPrEDKOh70Zw5XdWlooNBinUlun9DWoo3n5b74uKNJ040p8jzigoKKBhw4ZRazBAVxfNm6vBKCmJzcC4MYaGDRue1IrOGo0YZvBgFVg7JWyz3CrCZzyA2pPvwXgVYIP/f8SYQI6iz4fmc21F2k3lC3xnZ+vjWbNCen9DqWK+mjX1fskSOHJEn8+fH/nPEYdEs8EIpqREuwDn5kZ6JCfGyX7P1j0Vo+zcCQ0aBJKUIsLo0YhXATZk9hE8BevZU4vlJk2Kjguty6XGa8gQqFcvkB5MYHUREqPxSb+XlOhxQ4dGx+ewRIykJP3p/Pyz3tetG+kRVS92pRGj3HqrJv1EjOxsTWKnrIChf+XhcKiLatQoVcGNFo0mlwuuukp7ZhQVUZRWN2S1VMbrUFKi1e4OhzaRnj69esdriTpattQaz82b/Y0dEwZrNGKQLVu06HrgwAgOIicHLrnEv9T1hDvGGI1tvP++zu6jQdwvK0sv+q+/rgV7QHLBwRBDUSam4Xvd0qVQo4bKoEb6c1gqnc2bN3PmmWdy66230rlzZ0aOHMnSpUvp168fbdu2ZcWKFeTl5XHzzTfTp08mI0eexZIl/yU3V1/bv39/evToQY8ePfj0008BeP/99xkwYADXXHMNZ555JiNHjiTWpZuseyoGeewxvR7/6lcRHMSECdqsyOnE07gpzu25ZWTTjU9WpGvXgFtozpzIZlAlJWm11tChyDvvhBiIYOl3P8Zoyb1P9n3RosBnsG6qKuF3v9PMwMqke3d44oljH/f999/zyiuv8Oyzz5KZmcnLL7/Mxx9/zIIFC5gyZQodO3bkggsu4F//+hf79++nR4/eXHDBhbRt25glS5aQlpbGhg0buP766/Fp4K1evZpvvvmGZs2a0a9fPz755BPOPffcyv2A1Yg1GjHGjh3aoe/GGyHi7dCfeQbat8c5blzZAr8mTeHnHfp4167A9tdfD6TuRoKSEujbF1m4kEJHDVJL7S6zwrjwwtA+IS6XNRZxTOvWrenSpQsAnTp1YuDAgRhj6NKlC5s3byY3N5cFCxbwuLffSklJAampW3C7mzFmzN2sWbMGp9PJd9995z9n7969adFClaG7d+/O5s2brdGwVB+zZ2vt3MSJkR4J6qJ55BFo3hxHbm5ITMDx8w4KU+qQUnQI8803MHmyxjWqO4MqK0uzpEq9pwikufNDVkdhWbJEP6c1FNVGRVYEVUVqamAa4XA4/M8dDgclJSU4nU5ee+012pcq9Jw06QFq127CF198gcfjIS0tLew5nU4nJTEeBLExjRhj3DiVbG7bNtIjQV0055yjuYcOByYpCWrX9l+EU4oOsbdpJ62GeughlUyvblXY4BRbgFmzkOXLMUiIwSizwnA69WYMXHmljWFYALjkkkv429/+5o9LrF69GoAdOw6QmnoqBw86mD17Nm63O5LDrFKs0Ygh3G69hvXsGemReMnMVCnxvn01SNysGRw+TBHJ/otxgx3fIJs2aQbV7NlaF1Gd+GIpw4dDrVrIunUAeMLILPq3OJ26wliyRLUjRoywEugWACZNmkRxcTFdu3alc+fOTJo0CYBx4+5i8eIXOO+8vqxb9x21atWK8EirEBGJq1vPnj0lHtm7V6R1a5GXXor0SIKYOlVk2bLA85YtxY0RD0gxDvGAeEAKk2qKpKaKgEhycuhrqmt8kyaJeMfj9t6L9xb8WEDE4ai+MVpEROTbb7+N9BBOmrw8kZwckU2bIj2SYxPu+wZWSgWusXalESP89reaalvlwoTHQ1B1OACDB/u1qJKCknBTSvKRwsLAMumxx6pnfD7X1JAhyGOPQdu2ZdJpw7qmRKxLynLc1Kypntg9e2Dv3kiPpuqwgfAY4I031LMzebKmDkYtr7yiJbIHDoTfL6Junp49A+KFGzfqvmeeCRx3ssKGwcHvvn2RRYvUlG3Y4FesLddwpKSoBEpRkabW2gC45Tho1kznRvXqRXokVYddaUQ5u3dre+7u3bWmLKpp1AgOHAi5IJeZxbvdKmI4fLiW0774ol6cfbP6kxE29IkRBgW/3d99Hypz4pUMCa5eD4llXHwxXH651pbYRkuW48QYNRxOp/7U4jEebo1GlLN4sU7cX3xRJ8FRTVCla+kU1hCpkXXr4LTTNAW3Rw+dlg0frkupowkbBivU+sjO1o56wcYC4N57kcsuw7NhYyXJmnEAABM8SURBVEhKbbCKbcgYfT3OQQv4PvvMSp9bThgRWL8eNm2KTTXco2GNRpRz443w/ffgrTeKbs47Tx27Dkd4KQ7fNqdTNanq1gWv3AJjxhw7Lbd0+qxvVXLhhQFjMW+exiMmTcJTUESyFLOjRU9ISgrfltbHwYOaBbZ8uY1lWE4aY6BhQ53wbdrkX+DGBdZoRCEiKgq7bJk+9xaTRj9t2sDo0SH/IaUnWQIBhbeDB/W+sFBXHaNGqSBgedK9wemzwauSsWNDta2OHIH8fAxCCQ6a5q4KUZULKxcCcNZZeh6bXmupBBo31v/dfft04hcvriprNKIMj0evgX/+s3pJYoqkJL34A6SnA0dRwA1m505tCvLKKxpPeOQRDeSEc0Xl5ARWJWPGBNxYOTm6SnnoIS2ZBxwGnIQasLCeAmNUsnTVKj2fdUslHFu3bsXlctGhQwc6derEk08+WSnnbdoUWrWCnJw1PP/84nKPa9WqFbt3766U96xqrNGIItasgXPPVRmF3/4Wpk2L9IiOk7lzNfAybZpKjwc1ewkWBaT0Y2M0s8nXDi09XdvEBruipk/XrlNJSfDoo6pQO316QKb8+ec1xczXUMmYEGdyaXdZGVHChx9WP4J1TSUkSUlJTJs2jbVr17J8+XKeeuopvv3220o5d3o6HDiwhpUr1WgUF8f4qqMixRyxdIvV4r4vv9SaskaNRJ5/XsTjifSIToDgYrqpU0Vuv11k6NDwBXSlb8bovdOpX0S9eiLTponUqiXStq3uHzNGJD3df07JyNDtGRkhhXo7SPcXFkqpew9IScP0wHtNmqTvVbeuvt/UqZH9DhOUaCvuGzp0qLz77rtht7/wwgsiIvL000/LDTfcUOaYefPmSadOnaRr167Sv39/KSwslJYtW0p6erp069ZNnnxyjrz//m7p3/8i6dq1u9x22+2SkZEhu3btqvLP5eNkivsifpGv7FusGI39+0XmzBH5y1/0uccj8ve/a+V33LBsmV7kGzcuc/E+qgGpX18NTlqaSNOm4q/SHjxYpG9frS5PTtbtvv1BRqH0e4R9v+Cq72XL1HDcfntkv68EpvRF7Pzzy96eekr35eWF3z9rlu7ftavsvuPhhx9+kJYtW8qBAwfK7NuxY4e0adNGPvzwQ2nbtq3s2bOnzDGdO3eW3NxcERHZt2+fiIjMmjVLfvWrX4mIyOHDIqNG/VruuONPkpMj8te/LhLAbzQKCkTc7uMb8/FiK8JjhGXL4De/0XhrgwYqafTMMxrH8PXHqNZ+31VNTg5ce63GLChbiV1ujGH/fs2wKipSLXjQL2nRIs1uMkZdVE2a6H5H+J9x6TiK/72SkiA5GVav1jRel0v7ftu6jITn8OHDXH311TzxxBPUDdPHtUmTJjz44IO4XC6mTZtGgwYNyhzTr18/brrpJmbOnBlWuLBWLfjyyw8ZO/YXtG4Nl19+OfXr6z++CHzzDXz+OXzxhWanb9oUqDAX0Q7EPk9uJLAV4UEUF+sfxOOdmvp6CDVooK76Awe0w2lhoSboFBRAXh4MGKA/hE8+gYULtSBvxw4Vf83N1aLnevXg3Xfhn/+Es8/W7KgBA6B//3KvebHPhAlaQ9GhA6xdG7IrXOOjkIwmX0A9HMXFWulYUKDPPZ4QA1H6/KW3UVKi4xo3Thssge2TEWW8/375+2rWPPr+9PSj7y+P4uJirr76akaOHMlVV11V7nFfffUVDRs2ZPv27WH3P/3003z22We8+eabdO/enTXldJRKTjY0bKipuUHhP047Ta8xhYU6b8rL0zwN0OuRL9Tia4yZnKyZWmHsV5VgjUYQ8+fDddeV3b58OfTpA6++qr25S/P119Cpk84O/vIX/dE2bqx9hM85J5Dted99mhVVXkZpXDJ+PFxyiT5u3hy2bStTbFc6OH7UegoIWPMwlDZAYbO1ataEBQs0+yrGextYKgcR4ZZbbqFDhw6MDW66VYoVK1bw1ltvsXr1as4//3wuvvhiWrduHXLMxo0b6dOnD3369GHhwoVs3bqVOnXqcOjQIf8x5513Hi+99BL33Xcfb731Fvv27QMC9R3lYQycfrrOm4qK9N6bLFhtJNLl65h0767JOMbozddSoVUr3e9ywX/+A6mpavnT0nSF4fvN3HUX3H136KwhmNq1q+VjRBc5OeqP69lTy9q9lHEZnSClVxi+baX/BJ669XEWFeh/5LZtOp5WrWx6rQWATz75hNmzZ9OlSxe6ewXepkyZwmWXXeY/prCwkNtuu41Zs2bRrFkzpk2bxs0338yyZcswQf/048ePZ8OGDYgIAwcOpFu3bmRkZPDoo4/SvXt37r33Xu6//36uv/56evTowfnnn09GBdtwOp3Vt6IoDyMRcIwZY64FHgA6AL1FZGU5x10KPAk4gX+KyKPHOnevXr3E15vXEiX4KrdPP119cStW+F1KpVNxg1cJFVl1BP96S68qDOBG88oNaKVVbq6m637yicZIrEsqKli7di0dOnSI9DAShnDftzFmlYj0OtZrI+VN/xq4CviwvAOMMU7gKWAQ0BG43hgTTcLgloqSk6OV1p99BlOmqBPWmLC1G+5S2442pSltIAjz3AF4OnTS5V9urq4w3n1XDUZwHYjFYqkQETEaIrJWRNYf47DewPcisklEioA5wBVVPzpLpePru+FbcUyZEhLYCS68cwAlQT/Lo/XvNqWOCV6ZuB1O/z7num81s6FZM/jxx0DPbysZYrEcN9Gct9Mc2Br0PNe7zRKr+FYc2dkavSuVNmaAIpJY0enWEBeVj9Krj6OtNJweNx5vGiMiGlDati1Uo8pKhlgsx02VGQ1jzFJjzNdhbhVdLYRzZ5cjHWRuN8asNMas3LVr14kP2lK1+C7QS5eqi8rj0WwCp64KBEihhHO+eRYDeDh66my47KhgY+M4dDCw4/BhzXKwKwyL5aSosuwpEbnwJE+RC7QMet4CCJsYLSLPAs+CBsJP8n0tVUlOjuq9f/ih9q8dPRqefhrOPBOzcSNSXBzkbnIgXsHBo6Xplp5d+A2MLy23b1/Nmho3Tp+PHWsD4BbLCRLN7qkcoK0xprUxJgUYASyI8JgsJ8uECVoG/8tfajB682YtsFu7FgYNUoPgTV90BhmM4HvCPA+uyzDBZfXJydqFb9EifZ+lS6viU1ksCUNEjIYxZpgxJhc4G3jTGPOOd3szY8xiABEpAe4G3gHWAvNE5JtIjNdSBfiC44sX68z/rru04G7oUExqqj/vvXTlOIDbuzVstTdoA4PgYpkRI/R+7Fh9P4vlGDzwwAM87lMLKIc33nij0pRwy2P79u1cc801xzxuypQpVTqOYCKVPTVfRFqISKqINBGRS7zbt4vIZUHHLRaRdiLSRkQejsRYLdXEsmVaof2732mAvFT9kH8VYQxOJJBxFVxe73D44yOIaHptzZrayc+m1sYP5bX9zcqq1mFUh9Fo1qwZr7766jGPi3ujYbGUYd06+Mc/YM6cgKgOwBlnhGZZBRuTmjVVBsS3qvB4Al0D27bVc06erCsNG/iOH8pr+5uZeVKnffjhh2nfvj0XXngh69cHKgJmzpxJZmYm3bp14+qrryY/P59PP/2UBQsWMH78eLp3787GjRvDHleaBx54gFGjRnHBBRfQtm1bZs6cCaiMyfjx4+ncuTNdunRh7ty5AGzevJnOnTsD8Pzzz3PVVVdx6aWX0rZtWyZ4E0smTpzIkSNH6N69OyNHjiQvL4/LL7+cbt260blzZ/+5Ko2KSOHG0i1WpNEt5TB1qkqg16sX6HVRs6ZISkqotHl6eqjE+bRp+hi0/4ZIQJrdJ4FuiVqOu5+G7287aVKl/I1XrlwpnTt3lry8PDlw4IC0adNGHnvsMRER2b17t/+4P/7xj/LXv/5VRERGjx4tr7zyin9feccFc//990vXrl0lPz9fdu3aJS1atJBt27bJq6++KhdeeKGUlJTIjh07pGXLlrJ9+3b54YcfpFOnTiKi8uqtW7eW/fv3y5EjRyQjI0O2bNkiIiK1atXyv8err74qt956q//5/v37y4zDSqNb4ofMTFWInD9fYx4iupIoLXezezdkZOi+1atV3yolRVcYW7dqu1ibXhu/uFzh2/6eIB999BHDhg2jZs2a1K1bl6FDh/r3ff311/Tv358uXbrw0ksv8c034UOrFT3uiiuuoEaNGqSnp+NyuVixYgUff/wx119/PU6nkyZNmnD++eeTE+Z3O3DgQOrVq0daWhodO3bkxx9/LHNMly5dWLp06f+3d7+xVd11HMff39YbbgiKDsyAXVL7hOiyAmukLFnoHFOyARnpk8EMVmJSnmCyGYuIS9OmPFk6o4YgdYsxMbGJZSgjLPEPJhB4gEZbN8kyNYvWrmNGvD6wDyC2+PXBObcttWWHyz3nd+/t5/Wk99xz237Pvcn93N/53fP9ceTIES5fvszKlSvLfFYWptCQ6lK6APDxx6Pbvb2zgZHPz3Z9bGiIlngdGIjapHd0RJPcL78chcfwsC7gq2cXLkTt83t6op8VmLOyRTqNHjhwgBMnTnD16lV6e3u5WWrJX+bj5v8fM8MT9gBctmzZzO3GxkamF+jSvGHDBkZGRmhpaeHo0aP09/cn+ttJKTSkupS+VVW6PT0N27dHE9w7dkSLCzQ3R3MXhw5F+zs7o2Apndt+7bVopKIRRn0qvc6nTkF//+1X+Zepvb2dM2fOcOPGDSYnJzl37tzMvsnJSdauXcvU1BRDQ0Mz989vd77Y4+Y7e/YsN2/epFgscvHiRbZs2UJ7ezvDw8PcunWL69evc+nSJdra2hLXn8vlmIp7pF+7do3ly5ezf/9+uru7GR0dvZun4gOpNbpUt9IooaFhdg2Mkydhz55oe2wsGl3cf390qqKnZzZ0dAFffZo7GoXbT0OW+Zq3trayd+9eNm/eTFNTE9u2bZvZd+zYMbZu3UpTUxMtLS0zQbFv3z66uro4fvw4p0+fXvRx87W1tbFr1y7Gx8fp6elh3bp1dHR0cOXKFTZt2oSZMTAwwJo1axgbG0tU/8GDB9m4cSOtra10dnZy+PBhGhoayOVyDN5pQbMyBGmNnia1Rq9TO3dG61+8+moUHIOD0VKyY2PRQk/PPDN7/9w3FKkJS6U1el9fHytWrKC71J0gkHtpja6RhtSG0kV5q1fPjij6+28/VVFasnXutohUlEJDasf8yc/SZHmFT1WIpKWvry90CfdMoSG14W5GFKX9IlJx+vaU1IY7jSikLtTb/Gq1utfnWSMNqQ0LXWuhEUXdyOfzFItFVq1atej1EnLv3J1isUg+ny/7byg0RCS4QqHAxMQEWkQtffl8nkKhUPbvKzREJLhcLkdzc3PoMiQBzWmIiEhiCg0REUlMoSEiIonVXRsRM7sO/H+/4Oq3Gvhn6CIypmNeGnTMtaHJ3T/+QQ+qu9CoVWb2uyR9X+qJjnlp0DHXF52eEhGRxBQaIiKSmEKjerwSuoAAdMxLg465jmhOQ0REEtNIQ0REElNoVCEz6zYzN7PVoWtJm5m9ZGZ/NLM/mNkZM/to6JrSYGZPmtmfzOwdM/t66HrSZmbrzeyCmb1tZm+Z2XOha8qKmTWa2e/N7PXQtaRBoVFlzGw98DlgPHQtGTkPPOTuG4E/A0cD11NxZtYIfBd4CngQeNbMHgxbVeqmga+6+6eAR4BDS+CYS54D3g5dRFoUGtXn28DXgCUx2eTuv3T36Xjz10D57TerVxvwjrv/xd3/A/wY2BO4plS5+/vuPhrfniR6E30gbFXpM7MCsAv4fuha0qLQqCJm9jTwnru/GbqWQL4E/Cx0ESl4AHh3zvYES+ANtMTMPgE8DPwmbCWZ+A7Rh77/hi4kLWqNnjEz+xWwZoFdLwDfAHZkW1H67nTM7n42fswLRKc0hrKsLSMLrSq0JEaSZrYC+AnwvLv/O3Q9aTKz3cA/3H3EzD4Tup60KDQy5u6fXeh+M2sBmoE345XLCsCombW5+98zLLHiFjvmEjP7IrAbeMLr8zvgE8D6OdsF4FqgWjJjZjmiwBhy95+GricDjwJPm9lOIA98xMx+5O77A9dVUbpOo0qZ2RjwaXevtaZnd8XMngS+BTzm7nW5bJuZfYhokv8J4D3gt8Dn3f2toIWlyKJPPj8E/uXuz4euJ2vxSKPb3XeHrqXSNKchoZ0APgycN7M3zOx7oQuqtHii/8vAL4gmhE/Vc2DEHgW+AGyPX9c34k/gUuM00hARkcQ00hARkcQUGiIikphCQ0REElNoiIhIYgoNERFJTKEhkrK44+tfzey+ePtj8XZT6NpE7pZCQyRl7v4uMAi8GN/1IvCKu/8tXFUi5dF1GiIZiFtqjAA/ALqAh+OOtyI1Rb2nRDLg7lNmdhj4ObBDgSG1SqenRLLzFPA+8FDoQkTKpdAQyYCZbSZakfER4CtmtjZwSSJlUWiIpCzu+DpItKbEOPAS8M2wVYmUR6Ehkr4uYNzdz8fbJ4FPmtljAWsSKYu+PSUiIolppCEiIokpNEREJDGFhoiIJKbQEBGRxBQaIiKSmEJDREQSU2iIiEhiCg0REUnsfxBV+uRsLmYcAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "plot(xt, f_mean[:,0], 'b-', label='mean')\n", + "plot(xt, f_mean[:,0]-2*np.sqrt(f_var[:, 0]), 'b--', label='2 x std')\n", + "plot(xt, f_mean[:,0]+2*np.sqrt(f_var[:, 0]), 'b--')\n", + "plot(X, Y, 'rx', label='data points')\n", + "ylabel('F')\n", + "xlabel('X')\n", + "_=legend()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.0" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/notebooks/variational_auto_encoder.ipynb b/examples/notebooks/variational_auto_encoder.ipynb index 1736b67..3e0efdc 100644 --- a/examples/notebooks/variational_auto_encoder.ipynb +++ b/examples/notebooks/variational_auto_encoder.ipynb @@ -6,28 +6,25 @@ "source": [ "# Variational Auto-Encoder (VAE)\n", "\n", - "### Zhenwen Dai (2018-8-21)" + "### Zhenwen Dai (2019-05-29)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "```\n", - "# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.\n", - "#\n", - "# Licensed under the Apache License, Version 2.0 (the \"License\").\n", - "# You may not use this file except in compliance with the License.\n", - "# A copy of the License is located at\n", - "#\n", - "# http://www.apache.org/licenses/LICENSE-2.0\n", - "#\n", - "# or in the \"license\" file accompanying this file. This file is distributed\n", - "# on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either\n", - "# express or implied. See the License for the specific language governing\n", - "# permissions and limitations under the License.\n", - "# ==============================================================================\n", - "```" + "Variational auto-encoder (VAE) is a latent variable model that uses a latent variable to generate data represented in vector form. Consider a latent variable $x$ and an observed variable $y$. The plain VAE is defined as\n", + "\\begin{align}\n", + "p(x) =& \\mathcal{N}(0, I) \\\\\n", + "p(y|x) =& \\mathcal{N}(f(x), \\sigma^2I)\n", + "\\end{align}\n", + "where $f$ is the deep neural network (DNN), often referred to as the decoder network.\n", + "\n", + "The variational posterior of VAE is defined as \n", + "\\begin{align}\n", + "q(x) = \\mathcal{N}\\left(g_{\\mu}(y), \\sigma^2_x I)\\right)\n", + "\\end{align}\n", + "where $g_{\\mu}$ is the encoder networks that generate the mean of the variational posterior of $x$. For simplicity, we assume that all the data points share the same variance in the variational posteior. This can be extended by generating the variance also from the encoder network." ] }, { @@ -80,7 +77,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Model Defintion" + "## Model Defintion\n", + "\n", + "We first define that the encoder and decoder DNN with MXNet Gluon blocks. Both DNNs have two hidden layers with tanh non-linearity." ] }, { @@ -101,11 +100,10 @@ "H = 50\n", "encoder = nn.HybridSequential(prefix='encoder_')\n", "with encoder.name_scope():\n", - " encoder.add(nn.Dense(H, activation=\"tanh\"))\n", - " encoder.add(nn.Dense(H, activation=\"tanh\"))\n", - " encoder.add(nn.Dense(Q, flatten=True))\n", - "encoder.initialize(mx.init.Xavier(magnitude=3))\n", - "_=encoder(mx.nd.array(np.random.rand(5,D)))" + " encoder.add(nn.Dense(H, in_units=D, activation=\"tanh\", flatten=False))\n", + " encoder.add(nn.Dense(H, in_units=H, activation=\"tanh\", flatten=False))\n", + " encoder.add(nn.Dense(Q, in_units=H, flatten=False))\n", + "encoder.initialize(mx.init.Xavier(magnitude=3))" ] }, { @@ -117,20 +115,17 @@ "H = 50\n", "decoder = nn.HybridSequential(prefix='decoder_')\n", "with decoder.name_scope():\n", - " decoder.add(nn.Dense(H, activation=\"tanh\"))\n", - " decoder.add(nn.Dense(H, activation=\"tanh\"))\n", - " decoder.add(nn.Dense(D, flatten=True))\n", - "decoder.initialize(mx.init.Xavier(magnitude=3))\n", - "_=decoder(mx.nd.array(np.random.rand(5,Q)))" + " decoder.add(nn.Dense(H, in_units=Q, activation=\"tanh\", flatten=False))\n", + " decoder.add(nn.Dense(H, in_units=H, activation=\"tanh\", flatten=False))\n", + " decoder.add(nn.Dense(D, in_units=H, flatten=False))\n", + "decoder.initialize(mx.init.Xavier(magnitude=3))" ] }, { - "cell_type": "code", - "execution_count": 7, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "from mxfusion.components.variables.var_trans import PositiveTransformation" + "Then, we define the model of VAE in MXFusion. Note that for simplicity in implementation, we use scalar normal distributions defined for individual entries of a Matrix instead of multivariate normal distributions with diagonal covariance matrices." ] }, { @@ -142,23 +137,42 @@ "name": "stdout", "output_type": "stream", "text": [ - "x ~ Normal(mean=Variable(e909c), variance=Variable(e90bf))\n", - "f = GluonFunctionEvaluation(decoder_input_0=x, decoder_dense0_weight=Variable(0f71b), decoder_dense0_bias=Variable(aee54), decoder_dense1_weight=Variable(8db61), decoder_dense1_bias=Variable(7c56e), decoder_dense2_weight=Variable(85b99), decoder_dense2_bias=Variable(21241))\n", - "y ~ Normal(mean=f, variance=noise_var)\n" + "Model (37a04)\n", + "Variable (b92c2) = BroadcastToOperator(data=Variable noise_var (a50d4))\n", + "Variable (39c2c) = BroadcastToOperator(data=Variable (e1aad))\n", + "Variable (b7150) = BroadcastToOperator(data=Variable (a57d4))\n", + "Variable x (53056) ~ Normal(mean=Variable (b7150), variance=Variable (39c2c))\n", + "Variable f (ad606) = GluonFunctionEvaluation(decoder_input_0=Variable x (53056), decoder_dense0_weight=Variable (b9b70), decoder_dense0_bias=Variable (d95aa), decoder_dense1_weight=Variable (73dc2), decoder_dense1_bias=Variable (b85dd), decoder_dense2_weight=Variable (7a61c), decoder_dense2_bias=Variable (eba91))\n", + "Variable y (23bca) ~ Normal(mean=Variable f (ad606), variance=Variable (b92c2))\n" ] } ], "source": [ - "m = mf.models.Model()\n", - "m.N = mf.components.Variable()\n", - "m.decoder = mf.components.functions.MXFusionGluonFunction(decoder, num_outputs=1,broadcastable=False)\n", - "m.x = mf.components.distributions.Normal.define_variable(mean=mx.nd.array([0]), variance=mx.nd.array([1]), shape=(m.N, Q))\n", + "from mxfusion.components.variables.var_trans import PositiveTransformation\n", + "from mxfusion import Variable, Model, Posterior\n", + "from mxfusion.components.functions import MXFusionGluonFunction\n", + "from mxfusion.components.distributions import Normal\n", + "from mxfusion.components.functions.operators import broadcast_to\n", + "\n", + "m = Model()\n", + "m.N = Variable()\n", + "m.decoder = MXFusionGluonFunction(decoder, num_outputs=1,broadcastable=True)\n", + "m.x = Normal.define_variable(mean=broadcast_to(mx.nd.array([0]), (m.N, Q)),\n", + " variance=broadcast_to(mx.nd.array([1]), (m.N, Q)), shape=(m.N, Q))\n", "m.f = m.decoder(m.x)\n", - "m.noise_var = mf.components.Variable(shape=(1,), transformation=PositiveTransformation(), initial_value=mx.nd.array([0.01]))\n", - "m.y = mf.components.distributions.Normal.define_variable(mean=m.f, variance=m.noise_var, shape=(m.N, D))\n", + "m.noise_var = Variable(shape=(1,), transformation=PositiveTransformation(), initial_value=mx.nd.array([0.01]))\n", + "m.y = Normal.define_variable(mean=m.f, variance=broadcast_to(m.noise_var, (m.N, D)), \n", + " shape=(m.N, D))\n", "print(m)" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We also define the variational posterior following the equation above." + ] + }, { "cell_type": "code", "execution_count": 9, @@ -168,17 +182,19 @@ "name": "stdout", "output_type": "stream", "text": [ - "x_mean = GluonFunctionEvaluation(encoder_input_0=y, encoder_dense0_weight=Variable(9768f), encoder_dense0_bias=Variable(9f1a0), encoder_dense1_weight=Variable(18970), encoder_dense1_bias=Variable(bcff4), encoder_dense2_weight=Variable(3d2a8), encoder_dense2_bias=Variable(95031))\n", - "x ~ Normal(mean=x_mean, variance=x_var)\n" + "Posterior (4ec05)\n", + "Variable x_mean (86d22) = GluonFunctionEvaluation(encoder_input_0=Variable y (23bca), encoder_dense0_weight=Variable (51b3d), encoder_dense0_bias=Variable (c0092), encoder_dense1_weight=Variable (ad9ef), encoder_dense1_bias=Variable (83db0), encoder_dense2_weight=Variable (78b82), encoder_dense2_bias=Variable (b856d))\n", + "Variable (6dc84) = BroadcastToOperator(data=Variable x_var (19d07))\n", + "Variable x (53056) ~ Normal(mean=Variable x_mean (86d22), variance=Variable (6dc84))\n" ] } ], "source": [ - "q = mf.models.Posterior(m)\n", - "q.x_var = mf.components.Variable(shape=(1,), transformation=PositiveTransformation(), initial_value=mx.nd.array([1e-6]))\n", - "q.encoder = mf.components.functions.MXFusionGluonFunction(encoder, num_outputs=1, broadcastable=False)\n", + "q = Posterior(m)\n", + "q.x_var = Variable(shape=(1,), transformation=PositiveTransformation(), initial_value=mx.nd.array([1e-6]))\n", + "q.encoder = MXFusionGluonFunction(encoder, num_outputs=1, broadcastable=True)\n", "q.x_mean = q.encoder(q.y)\n", - "q.x.set_prior(mf.components.distributions.Normal(mean=q.x_mean, variance=q.x_var))\n", + "q.x.set_prior(Normal(mean=q.x_mean, variance=broadcast_to(q.x_var, q.x.shape)))\n", "print(q)" ] }, @@ -186,50 +202,52 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Variational Inference" + "## Variational Inference\n", + "\n", + "Variational inference is done via creating an inference object and passing in the stochastic variational inference algorithm." ] }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 11, "metadata": {}, "outputs": [], "source": [ - "from mxfusion.inference import BatchInferenceLoop, StochasticVariationalInference, GradBasedInference" + "from mxfusion.inference import BatchInferenceLoop, StochasticVariationalInference, GradBasedInference\n", + "\n", + "observed = [m.y]\n", + "alg = StochasticVariationalInference(num_samples=3, model=m, posterior=q, observed=observed)\n", + "infr = GradBasedInference(inference_algorithm=alg, grad_loop=BatchInferenceLoop())" ] }, { - "cell_type": "code", - "execution_count": 11, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "observed = [m.y]\n", - "alg = StochasticVariationalInference(num_samples=3, model=m, posterior=q, observed=observed)\n", - "infr = GradBasedInference(inference_algorithm=alg, grad_loop=BatchInferenceLoop())" + "SVI is a gradient-based algorithm. We can run the algorithm by providing the data and specifying the parameters for the gradient optimizer (the default gradient optimizer is Adam)." ] }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 13, "metadata": { - "scrolled": true + "scrolled": false }, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Iteration 201 loss: 1715.0395507812525\n", - "Iteration 401 loss: 599.87670898437525\n", - "Iteration 601 loss: 149.24291992187538\n", - "Iteration 801 loss: -44.793395996093755\n", - "Iteration 1001 loss: -202.39929199218755\n", - "Iteration 1201 loss: -314.48220825195315\n", - "Iteration 1401 loss: -301.41076660156255\n", - "Iteration 1601 loss: -585.94531250937585\n", - "Iteration 1801 loss: -702.51806640625525\n", - "Iteration 2000 loss: -775.11627197265625" + "Iteration 200 loss: 1720.556396484375\t\t\t\t\t\n", + "Iteration 400 loss: 601.11962890625\t\t\t\t\t\t\t\n", + "Iteration 600 loss: 168.620849609375\t\t\t\t\t\t\n", + "Iteration 800 loss: -48.67474365234375\t\t\t\t\t\n", + "Iteration 1000 loss: -207.34835815429688\t\t\t\t\n", + "Iteration 1200 loss: -354.17742919921875\t\t\t\t\n", + "Iteration 1400 loss: -356.26409912109375\t\t\t\t\n", + "Iteration 1600 loss: -561.263427734375\t\t\t\t\t\t\n", + "Iteration 1800 loss: -697.8665161132812\t\t\t\t\t\n", + "Iteration 2000 loss: -753.83203125\t\t\t\t8\t\t\t\t\t\n" ] } ], @@ -241,31 +259,37 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Plot the training data in the latent space" + "## Plot the training data in the latent space\n", + "\n", + "Finally, we may be interested in visualizing the latent space of our dataset. We can do that by calling encoder network." ] }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 17, "metadata": {}, "outputs": [], "source": [ + "from mxfusion.inference import TransferInference\n", + "\n", "q_x_mean = q.encoder.gluon_block(mx.nd.array(Y)).asnumpy()" ] }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 18, "metadata": {}, "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXYAAAD8CAYAAABjAo9vAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAFlBJREFUeJzt3X+I3Hedx/HXeye7nKLUYpMoTbZrq4g/7hjbMVeRcFJ7sXc0EevJ6R1Szjb55wQFj3peQe4EuVNROM6Cl01FD0pFsGpalbZixQjGdLbMmcacR1oakqtkq5dWpYezu/O+PzYTNpvZnV+f7/fzmc/3+YCy3WYy89lJ85rPvL/vz3vM3QUAyMdU7AUAAMIi2AEgMwQ7AGSGYAeAzBDsAJAZgh0AMkOwA0BmCHYAyAzBDgCZ2RLjQa+66iqfm5uL8dAAMLEWFhZ+5e5b+90uSrDPzc2p2WzGeGgAmFhmdnqQ21GKAYDMEOwAkBmCHQAyQ7ADQGbGDnYz+wMzO2Zm/2lmJ8zsn0IsDAAwmhBdMb+XdJO7/87MpiX92My+5+5HA9w3AGBIY+/YfdXvLnw7feEfPpYJmFCtxZYOHT+k1mIr9lIwoiB97GZWk7Qg6bWS7nH3n/a4zQFJByRpdnY2xMMCCKy12NL+R/arvdLWTG1G83vmVd9Wj70sDCnIxVN3X3H3uqQdknaZ2Zt73OaguzfcvbF1a9+DUwAiaJ5rqr3SVkcdLXWW1DzHQcJJFLQrxt2fl/RDSbeEvF8A5Whsb2imNqOa1TQ9Na3G9kbsJWEEY5dizGyrpCV3f97MXiLpZkmfGXtlAEpX31bX/J55Nc811djeoAwzoULU2F8t6asX6uxTkr7u7g8FuF8AEdS31Qn0CTd2sLv7zyS9JcBaAAABcPIUADJDsANAZgh2AMgMwQ4AmSHYASAzBDsAZIZgB4DMEOwAkBmCHavOHJOOfH71K4CJFmRsLybcmWPSV/dJK22pNiPdfljauSv2qgCMiB07pGeOrIa6r6x+feZI7BUBGAPBDmlu9+pO3WqrX+d2x14RgDFQisFq2eX2w6s79bndlGGACUewY9XOXQQ6kAlKMQCQGYIdADJDsANAZgh2AMgMwQ4AmSHYASAzBDsAZIZgB4DMEOwAkBmCHQAyQ7ADQGYIdgDIDMEOAJkh2AEgMwQ7AGRm7GA3s51m9piZnTSzE2b2kRALAwCMJsQHbSxL+pi7P2FmL5e0YGaPuvvPA9w3ENzC6fM6+vSvdeO1r9QN11wZezlAcGMHu7v/UtIvL/z7b83spKSrJRHsSM7C6fP660NH1V7uaGbLlO6780bCHdkJWmM3szlJb5H005D3C4Ry9Olfq73cUcelpeWOjj7969hLqoTWYkuHjh9Sa7EVeymVEOwzT83sZZK+Iemj7v6bHr9+QNIBSZqdnQ31sMBQbrz2lZrZMqWl5Y6mt0zpxmtfGXtJ2WsttrT/kf1qr7Q1U5vR/J551bfVYy8ra0GC3cymtRrq97n7A71u4+4HJR2UpEaj4SEeFxjWDddcqfvuvJEae4ma55pqr7TVUUdLnSU1zzUJ9oKNHexmZpLulXTS3b8w/pKAYt1wzZUEeoka2xuaqc1oqbOk6alpNbY3Yi8peyF27G+X9EFJx82sW0D7B3f/boD7BjDh6tvqmt8zr+a5phrbG+zWSxCiK+bHkizAWgBkqr6tTqCXiJOnAKKgU6Y4wbpigBRw+Ggy0ClTLIId2eDw0eSgU6ZYlGKQDQ4fTY5up0zNanTKFIAdO7LB4aPJQadMscy9/LNCjUbDm81m6Y+L/FFjR87MbMHd+769YceOrHD4CKDGDgDZIdgBIDMEOwBkhmAHgMwQ7MjawunzuuexU1o4fT72UoDS0BWDbHESFVXFjh3Z4iQqqopgR7a6J1FrJk6iolIoxSBbfAweqopgR9Y4iYoqohQDFISOHMTCjh0oAB05iIkdO1AAOnIQE8EOFICOHMREKQYoAB05iIlgRxhnjknPHJHmdks7d8VeTRLoyEEsBDvGd+aY9NV90kpbqs1Itx8m3IGIqLFjfM8cWQ11X1n9+syR2CsCStdabOnQ8UNqLbZiL4UdOwKY2726U+/u2Od2x14RUKrWYkv7H9mv9kpbM7UZze+Zj/oB3QQ7xrdz12r5JaMaOx+KjWE0zzXVXmmro46WOktqnmsS7MjAzl1ZBLrE4SIMr7G9oZnajJY6S5qemlZjeyPqegh2YJ1eh4vWBzs7eqxV31bX/J55Nc811djeiLpblwIFu5l9WdKtkhbd/c0h7hOJqGAbY/dw0dJy5+LhorVBLokdPS5T31aPHuhdoXbsX5H0RUn/Eej+kIKKtjGuP1wkXRrkt12/o++OHogpSLC7+4/MbC7EfSEhvdoYKxDs0qWHi+557NQlQW7SZTt6ICXU2LEx2hglXV6aue36Hbrt+h3U2JEsc/cwd7S6Y39ooxq7mR2QdECSZmdnbzh9+nSQx0XBKlhj73VhNJWLpamsA3GY2YK79225KW3H7u4HJR2UpEajEebVBMXrhnn3NGnm4b5Rq2MKc19ow8SgGCmAzXUvoP7g06tfzxyLvaJCpTxHPeW1IS1Bgt3M7pf0E0mvN7OzZnZHiPtFAio2ByblOeopr60KBp0Fk8LMmFBdMR8IcT9IUMUuoIaYo15UHZwZ7/EMOgsmlZkxdMVgcxnOgellfRiPGppF18G799UtwxDu5dhsFkxrsXXxxGkqM2MIdvSX0RyYXhZOn9cH5o9ebGe8f//oYTzIOIJx18oF1PJtNAtm/Q79rrfelcTMGIIdlffAE2fVXu5IktrLHT3wxNmRw7LXOIKQin7hQG8bzYJZv0N/of1CEjNjCHZU3vre23F6cYuugxf9woGN9ZoF02snn8LMmGAHlIbRaDS82WyW/rjoo4KHkaQLpZiDP9HSimu6Zrr/wNuS3gVzSCkta2vsRQf6oAeUCPaUlRm0FR341UVYYhIkd/IUQyozaM8ck374z9Ly7yV1KjfwS1K0k6W8oKAIBHuqypqsePEF5EKoa6oS/epl2Sy46XBBUQj2VJV1MOjiC0hHsinp2ndI7/hEpXbrRekX3HS4oCgEe6rKOhi0/gWEUA+mX3DT4VI9ZV1oJdhTVsbBoFFeQCraPTOsfsHNiIBqKXPcAMGO4V5AKtg9M+oFzkGCO4VxwChHmeMGCHYMp2IflzfuBU6CG10bjSUoAsFOWWE4FZv2yAVOhLLRWIIiVDvYK1hWGFtFpj12FXWBk/71aipr3EC1g71iZYW+Bn33kvm0x7WKuMBJ/zqKVu1gr1hZYVO8e9lQvzr5sLtvyjsoWrWDvYiywqTW7Hn3MpD1IT7K7pv+dRSt2sEuhS0rTPKul3cvffUK8VF23/Svo2gE+zD67cYnedc7QRdFY1147BXio+6+aYNEkQj2QQ2yG5/0Xe8g714il5piXnjsFeLsvpEign1Qg+zGU9n1FhW+CZSaYl543CjE2X0jNQT7oAbdjcduBRw2fId5EUig1NTdNbeXOzIzXfnSmVIfnxDHJJiKvYCJ0d2N33R32hdFe4XvRrovAj/49OrXM8c2v+/ui5vVopWabrjmSn3y1jdpykwrHdenHjqhhdPnS18HkDJ27MOIvRsfxDB1/mF34ImUms6/2FbHXa60+sA5TYpUEOy5GSZ8R7nYm8CLW4p94JwmRUoI9hSNe/Fz0PBNZAc+rBQ7UThNipQQ7Kkpu/Nk2B14IidrU7uImeK7CFQXwZ6aBDpPNpRAu2OqUnwXgeoi2FPTq+6dyC456RedBKT2LgLVFSTYzewWSf8qqSbpkLv/S4j7raT1dW8pnV3ypJ+sjYRuGZRt7GA3s5qkeyT9qaSzkh43s8Pu/vNx77uy1ta9j3w+nV3yhF5sjYluGcQQYse+S9Ipd39akszsa5LeLYlgDyG1XXKR7Y6plJwColsGMYQI9qslnVnz/VlJf7z+RmZ2QNIBSZqdnQ3wsBVRlV1yphdmr3zpjKbMJDndMihNiJEC1uO/+WX/wf2guzfcvbF169YAD1shO3dJuz+WRdBtaJhRCBNi4fR5feqhE1rpuKbM9Mlb33TJbn3h9Hnd89gpRiIguBA79rOSdq75foekZwPc72gyfDtfCamVnALolmFckrvr/Ivti79G7R1FChHsj0t6nZm9RtL/SHq/pL8KcL/Dy/TtfOlivDhmWHK6OIly6fJJlNTeUaSxSzHuvizpw5IelnRS0tfd/cS49zuSDN/Ol27YiY8hZVZyujiJcsrU8UsnUXZDv2ai9o7ggvSxu/t3JX03xH2NJcO386ULcQgp43LYsD3p3UmU63fmnFRFkfI6eZrh2/nSjfvimHE5bJS6+GYzZCb9pGprsaXmuaYa2xuqb6vHXg7WyCvYpSTGyk60cV8cEx07EOL05yh18Vx35q3Flu54+A4tdZY0PTWte991L+GekPyCHeMb58UxwXJYqA6UUSc4TvrOvJfDTx1Wu7Pa5dPutHX4qcMEe0IIdoSVYDksVAdKrrvvUdi64yvrv0dckx/sGV+om1i9dvwR/5xCzkrPcfc9ir3X7dW3Tn3rYilm73V7Yy8Ja5j7ZYdEC9doNLzZbI5/RxlfqCtMjIBN4M+JCYvh9bp4ygXVYpnZgrs3+t1usnfsiV6oS1asgE3gz4mddnj1bfVLwru12NL+R/arvdLWTG1G83vmCfdIQsyKiad7oc5qyVyoG8uZY6tjeos6FBTrAFduf04V01ps6dDxQ2ottja9XfNcU+2VtjrqaKmzpOa5AO/KMZLJ3rEneKFuZGXspmN1rGTy51TFcs4wu/DG9oZmajMX6+6N7X0rBijIZAe7lE/fehnlipgBm8Cf0zjBXNWhXb124RsFe31bXfN75qmxJ2Dygz0XZe2mEwjYGMYN5qoO7eruwtsrbZlMV8xcsent19fdEcdk19hz0t1N33R3dbt7CrzG0CuYh1HVoV31bXXd9da7NGVT6nhHn338s31r7YiPHXtKKrqbllT4NYZxe9mrfDjphfYL6nhnoHJMP7RDlmOygp3DSMWJ/dwWfI0hRDBXtWVynIuia4NcEu2QJZmcYE/gkEu2UnhuS7jGUNVgHteoF0XXd9Tsu27fwBdiMZ7JCfYEDrlkK4XnNrGWyCq2Nm5mmIui3V36s7979pIgdzntkCWZnGBPcGpgNlJ5bhO5xlDV1sYQ1u7St0xt0ZapLVrxFU1PTWvfdfu077p91NhLMDnBntiOLis8t5eoamtjCGv73ld8Re993Xv16pe9+pIgJ9CLNznBLiWzo8sSz+1FIadBVs36C617r9tLkEcw2dMdgYJUscYeqhWxez9XzFyhF9ovUHYJqBrTHYGCVK2DJuRkxu7vo7UxHk6eYmNFT5tEMkJPZmTSY1zs2NFbCr3tKM2wh5D6lW2Y9BgXwY7eUuhtR2mGOYQ0SNmGSY9xEewbyPri2SDjA1LpbUdpBj2ENOgoXyY9xkOw95D1AZVBSyz0tmMDlFnSR7D3kPUBlWFKLPS2owfKLOmjK6aHrGdv8/mjKMign42K4rFj7yHr2duUWDCmXhdPJfrWUzJWsJvZ+yT9o6Q3SNrl7tk0q2Z9QIUSC8awUY86I3nTMe6O/UlJt0n69wBrATABNrp4utEFVT41qXxBZsWY2Q8l/d2gO3ZmxQCTrdc8GEmXBXjIUQVgVgyAAm00D+bOP7zzktsN2vOOsPp2xZjZ983syR7/vHuYBzKzA2bWNLPmc889N/qKASRhkHkw3bJNzWr0vJeo747d3W8O8UDuflDSQWm1FBPiPgHEM8hBpW7P+4NPPSgXf+3LQikGwEiGOah0+KnDaq+09eBTD1JnL8G47Y7vkfRvkrZK+o6Ztdz9XUFWBiB5g8yDoc5evrGC3d2/KembgdYCIEPMlikfpZiAYk+EjP34QC/MlikfwR5I7ImQsR8f2AwjfMtFsAcSayJkd5f+7PP/l+9ESiSLU6VpIthHtL7s0Z0IubTcKW0i5Npd+pYp05balFZWynt8VBunStNFsI9go7JH2RMh175LWOm4/nLXTl39ipdQY0cp6HZJF8E+go3KLmVPhFz/LuG91+8g0FEaul3SRbCPIEbZpZes58YjeXS7pCvIdMdh5TDdkdZCAGVjumPBsv4gDgATjc88BYDMEOwAkBmCHQAyQ7ADQGYIdgDIDMEOAJkh2AEgMwQ7AGSGYAeAzBDsAJAZgh0AMkOwAwVqLbZ06PghtRZbsZeCCmEIGFAQPmEIsbBjBwrS6xOGgDIQ7EBBup8wVLManzCEUlGKAQqy/hOGJOnQ8UN82hAKR7ADBapvq6u+rU69HaUi2IESbFRv5/NCUQSCHShBt96+1FnS9NS0rpi5gh08CkOwAyVYX2/vtYMn2BHKWMFuZp+TtFdSW9JTkv7G3Z8PsTAgN916e9faHTwdMwjJ3H3032y2R9IP3H3ZzD4jSe7+8X6/r9FoeLNJTy+qrbXYosaOoZjZgrv33QWMtWN390fWfHtU0l+Mc39AlazfwQOhhDyg9CFJ3wt4fwCAEfTdsZvZ9yW9qscv3e3u375wm7slLUu6b5P7OSDpgCTNzs6OtFiUa9hSAaUFIA19g93db97s183sdkm3Snqnb1Kwd/eDkg5KqzX2IdeJkg17oKbf7Qn9wfFcYVzjdsXcIunjkv7E3V8MsySkYNh2vM1uz6nLwfFcIYRxa+xflPRySY+aWcvMvhRgTUjAsAOsNrs9Uw4Hx3OFEMbtinltqIUgLesP1PTbNW52+/WnLunZ3hjPFUIYq499VPSxxxGzdkvdeHA8V9jIoH3sBHtFULsFJt+gwc4HbVQEtVugOgj2iuDTfIDqYLpjRQx7MRTA5CLYK4TZJEA1UIoBgMwQ7ACQGYIdADJDsANAZgh2AMgMwQ4AmYkyUsDMnpN0uuSHvUrSr0p+zJRU/eeXeA6q/vNLk/8cXOPuW/vdKEqwx2BmzUFmLOSq6j+/xHNQ9Z9fqs5zQCkGADJDsANAZqoU7AdjLyCyqv/8Es9B1X9+qSLPQWVq7ABQFVXasQNAJVQq2M3sc2b2X2b2MzP7ppm9IvaaymRm7zOzE2bWMbPsOwO6zOwWM/uFmZ0ys7+PvZ6ymdmXzWzRzJ6MvZZYzGynmT1mZicv/B34SOw1FalSwS7pUUlvdvc/kvTfkj4ReT1le1LSbZJ+FHshZTGzmqR7JP2ZpDdK+oCZvTHuqkr3FUm3xF5EZMuSPubub5B0o6S/zfn/g0oFu7s/4u7LF749KmlHzPWUzd1PuvsvYq+jZLsknXL3p929Lelrkt4deU2lcvcfSfrf2OuIyd1/6e5PXPj330o6KenquKsqTqWCfZ0PSfpe7EWgcFdLOrPm+7PK+C80+jOzOUlvkfTTuCspTnafoGRm35f0qh6/dLe7f/vCbe7W6luz+8pcWxkG+fkrxnr8N1rBKsrMXibpG5I+6u6/ib2eomQX7O5+82a/bma3S7pV0js9w17Pfj9/BZ2VtHPN9zskPRtpLYjIzKa1Gur3ufsDsddTpEqVYszsFkkfl7TP3V+MvR6U4nFJrzOz15jZjKT3SzoceU0omZmZpHslnXT3L8ReT9EqFeySvijp5ZIeNbOWmX0p9oLKZGbvMbOzkt4m6Ttm9nDsNRXtwsXyD0t6WKsXzL7u7ifirqpcZna/pJ9Ier2ZnTWzO2KvKYK3S/qgpJsu/N1vmdmfx15UUTh5CgCZqdqOHQCyR7ADQGYIdgDIDMEOAJkh2AEgMwQ7AGSGYAeAzBDsAJCZ/wfwkPjuAc1V6gAAAABJRU5ErkJggg==\n", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXYAAAD8CAYAAABjAo9vAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAAFiZJREFUeJzt3X2MXFd5x/Hfs5OdFglkosRrotiOSaAVNKqGZDBGKCoKaQgocUQqJEiFogK2KpUKJKrQNFJf/mobBGolIrXeNYJKKQg1EEwhyosUhJHqOLPRFDuYFyeKazfRrkONASFldnee/jE7Zjye3Zk7c+89d879fiRrM97J3DO7ye+eee5zzjV3FwAgHjOhBwAASBfBDgCRIdgBIDIEOwBEhmAHgMgQ7AAQGYIdACJDsANAZAh2AIjMZSEOeuWVV/quXbtCHBoAptbi4uIr7r512POCBPuuXbvUaDRCHBoAppaZnRrleZRiACAyBDsARIZgB4DIEOwAEBmCHQAiQ7ADQGQmDnYz+20zO2pm/21mz5nZ36UxMAD5ay43tXBsQc3lZuihYAJp9LG/Kulmd/+Vmc1K+r6ZPeruR1J4bQA5aS43te/xfWqttVStVDV/67xqc7XQw8IYJp6xe8ev1h/Orv/hRqrAlGksNdRaa6mttlbaK2ossYhwWqVSYzezipk1JS1LesLdnx7wnP1m1jCzxtmzZ9M4LIAU1bfVVa1UVbGKZmdmVd9WDz0kjMnc05tcm9nrJX1D0p+7+/GNnlev150tBYDiaS431VhqqL6tThmmgMxs0d2HnnFT3SvG3X9uZt+VdJukDYMdQDHV5moEegTS6IrZuj5Tl5m9RtItkn406esCAMaTxoz9KklfNrOKOieKr7n7f6bwugCAMUwc7O7+A0lvS2EsAIAUsPIUACJDsANAZAh2AIgMwQ4AkSHYASAyBDsARIZgB4DIEOwAEBmCHYOdPiod/lznK4CpkuomYIjE6aPSl/dKay2pUpXuOSTt2B16VABGxIwdl3rxcCfUfa3z9cXDoUcEIAGCHZfadVNnpm6VztddN4UeEYAEKMXgUjt2d8ovLx7uhDplGGCqEOwYbMduAh2YUpRiACAyBDsARIZgB4DIEOwAEBmCHQAiQ7ADQGQIdgCIDMEOAJEh2AEgMgQ7AESGYAeAyBDsABAZgh0AIkOwA0BkCHYAiMzEwW5mO8zsKTM7YWbPmdkn0xgYAGA8adxoY1XSp939WTN7naRFM3vC3X+YwmsDmVo8dU5HXviZ9lx7hW685vLQwwFSMXGwu/vLkl5e/+dfmtkJSVdLIthRaIunzumPF46otdpW9bIZPfTxPYQ7opBqjd3Mdkl6m6SnB3xvv5k1zKxx9uzZNA8LjOXICz9Ta7Wttksrq20deeFnoYcEpCK1YDez10p6WNKn3P0X/d939wPuXnf3+tatW9M6LDC2PddeoeplM6qYNHvZjPZce0XoIUWludzUwrEFNZeboYdSOqnczNrMZtUJ9Yfc/etpvCaQtRuvuVwPfXwPNfYMNJeb2vf4PrXWWqpWqpq/dV61uVroYZXGxMFuZibpoKQT7v75yYcE5OfGay4n0DPQWGqotdZSW22ttFfUWGoQ7DlKoxTzLkkfkXSzmTXX/7w/hdcFMKXq2+qqVqqqWEWzM7Oqb6uHHlKppNEV831JlsJYAESiNlfT/K3zaiw1VN9WZ7aes1Rq7ADQrzZXI9ADYUsBRGvx1Dk9+NRJLZ46F3oo6EG3TPaYsSNKLD4qJrpl8sGMHVFi8VExDeqWQfoIdkSJxUfFRLdMPszdcz9ovV73RoMzNbLFBl/F1Fxu0i0zJjNbdPehZ0OCHQCmxKjBTikGACJDsANAZAh2AIgMwQ4AkSHYURqsREVZsPIUpcBKVJQJM3aUAitRUSYEO0qBlagoE0oxKAVug4cyIdhRGtwGD2VBKQYAIkOwAzmh3RJ5oRQD5IB2S+SJGTuQA9otkSeCHdk4fVQ6/LnOV9BuiVxRikH6Th+VvrxXWmtJlap0zyFpx+7QowqKdkvkiWBH+l483Al1X+t8ffFw6YNdot0S+aEUg/TtuqkzU7dK5+uum0KPCMhcc7mphWMLai43Qw+FGTsysGN3p/zy4uFOqDNbR+Say03te3yfWmstVStVzd86H/R+rgQ7srFj99QHOjfDxqgaSw211lpqq62V9ooaSw2CHSga+s6RRH1bXdVKVSvtFc3OzKq+bej9pjNFsGN0p4+WprwyqO98ULAzq4ck1eZqmr91Xo2lhurb6kFn61JKwW5mX5R0u6Rld78+jddEwZSshbHbd76y2r6o77w3yCUxq8cFtbla8EDvSmvG/iVJX5D0bym9HoqmZC2Mg/rO+8szd92wfaRZPZC3VILd3b9nZrvSeC0UVLeFsTtjL0ELY3/feX95xqSBs3ogtNxq7Ga2X9J+Sdq5c2deh0VaaGG8pDxz1w3bddcN26mxo3ByC3Z3PyDpgCTV63XP67jAuPovjG60LQCBjqKhKwajKdnF043aHdkWANOALQUwmkEXTyNW5G12uWFH8YXeXiCtdsevSHq3pCvN7Iykv3H3g2m8NgqiZBdPN2p3DI2FU2E1l5tDe9WLsL1AWl0xH07jdVBgJbt4WtRtdkddOIX0bRbYvYFfhO0FqLFjdBHs/7KZf3/6f/To8Zf1vuuv0t3v2DlRPT2rFalF/SRRBhsFdn/g3/v2e4NvL0CwA+qE+l9945gk6fBPX5Ek3f2O8dpysyyXFPWTRBlstB9Mf+Cfb50Pvr0AwY6LlWg/mF6PHn/5ksfjBjvlkjhttB/MoMAPvb0AwY7fKFlLY6/3XX/VhZl69/G4siyXcPE0rEGBXbQNwCSCvfjymkGfPip99++l1VcltUuxH0yv7uy8t8Y+rizLJXwaKKbQM/R+BHuR5TWDvnCc9VDXTClaGvvd/Y6dEwV6r6wWMnHxFKMg2Issrx0VLxynLdmMdO27pXffV5rZ+jTh4ilGQbAXWV6LgvqPQ6inIquWR7Y1wDAEe5HltSgo6XFK2jmTxKgXObkDU7mMsnI1DQR70eW1KGjU45S4cyaJUS5y0uFSLnluNcAmYEimZJuBjat7kbNi2vAiZ5E3GkP6Bq1czQoz9l6UGIYr2WZg0njlklEuctLhUi4brVzNgrnnf8+Ler3ujUZ2Z6uxUGIYXYlOgFmXS6ixl8ukNXYzW3T3oWcEZuxdJbtZ80CjBnbkm4H1ynpBEB0u5ZLXQiaCvauEJYaL8IllIMolmEYEe1fJ9hu/BJ9YBspqQRAlGGSJYO+VZolh2urQZf/Esom0yyW0OSJrBHtSowT2NJY1puwTS6gZ76DjJh0LG3khawR7EqMG9rSWNZIsUgp4Agg14x10XEmJx0LdHlkj2JMYNbBjLmsU4NNIqBnvRguKko6FjbyQNYI9iVEDO3RZI8sZdQE+jey59gpdVunMeCuV/Ga8G820x5l90+aILBHsSSQJ7FC93kln1ElPAkX5NNJdWJfjAruNZtrMvlE0BHtSRV+ck2RGPU5ZJfSnEXVKIqttl0taa3uuFx8HzbSZfaNoCPbYJJlRj1tWCXxy4+IjsDmCPTZJZtRFKaskVMSLjyw4QpGwCVhR5XkT6yTHmbaFVzlgwRHywiZg0yzPlsIkZZUCtDoWEQuOUDTcaKOIBtW+Tx+VDn+u87VI48JIN9UA8sSMvYj6a9+vuaIYM+UprclnrYg1f5RbKsFuZrdJ+mdJFUkL7v4PabxuafVfAC3AoqCB46IMcwEtjyiSiYPdzCqSHpT0h5LOSHrGzA65+w8nfe1S6699F2WmnGWrY6QXZumYQd7SmLHvlnTS3V+QJDP7qqQ7JRHsaSnDTDnSC7N0zCCENC6eXi3pdM/jM+t/dxEz229mDTNrnD17NoXDlsyO3dJNn44i7AaK9MLsRhuHAVlKI9htwN9d0hzv7gfcve7u9a1bt6Zw2A0UoXsEyXUvzFolfLkpRXTMIIQ0SjFnJO3oebxd0kspvG5ykX6cz12IWneE5aZubf2vb/89nft1ixo7cpNGsD8j6c1m9kZJ/yvpQ5LuTuF1kytK98g0C3lyLPoGawkMq61zQRVZmjjY3X3VzD4h6TF12h2/6O7PTTyycdBnPbk0To6Rdrck0Vtbf3WlrYefPXPRrfS4oIospdLH7u7fkfSdNF5rIhF+nM/dpCdHymGS1m8GMmNqrXW2F/6PxTP6oxu268ZrLmcLAmQuvi0FYu8eyVr35Hjz/eOFcqTdLVJnpv3gUye1eOrc0OfeeM3l+mB9x4XOgrW133TExHJBtbnc1MKxBTWXm6GHgj5sKYBLTVLrLmA5LI169jjlk7tu2K6Hnz1zyb7xMWxB0Fxu6mOPfUwr7RXNzszq4HsPqjZXCz0srCPYka6ClcPSqmePUz7ZLMCnfQuCQ88fUqvdkiS12i0dev4QwV4g0x/sXKgrno1m/AF+V2nVs8e9a9O0B/hGrG/5Sv9jhDXdwc6FumRCngQD/a7Suo1eDOWTNN1x3R165OQjF0oxd1x3R+ghocd0Bzt966MLfRIM9LtKM5BjnX2PozZX08H3HlRjqaH6trpqczU1l5sXPUY40x3sBbxQV1ihT4IBf1cEcjZqc7ULAd5cbmrf4/vUWmupWqlq/tZ5wj2g6Q72gl2om0jWZZLQJ8GYflclNGw23lhqqLXWUlttrbRX1FhqEOwBTXewS3EsQ8+jTFKEYI3hd1VCo8zG69vqqlaqF2ru9W1D77eMDE1/sMcgrzJJkYM1408sk/ayl3lvl1Fm47W5muZvnafGXhAEexGELpOElvEnlkl72cu+t0t3Nt5aa8lk2lLdMvB5vTV3hDVdWwrEutf6pMv4p13G2xBMerOLst8sozZX071vv1czNqO2t/XAMw+wjUDBTc+MPXS7XtZCl0lC9rhn/Ill0l72tHrhp9n51nm1vT3xxVFaIvMxPcEeul0vZqFPmhlf2J20l53FSeNfHO0Nckm0ROZkeoK97HXoLBXhpJnxJ5ZJe9nL3guf9OJoc7mpbz3/LT1y8hGttldVrVS197q9tETmZHqCvQjterHipIkRjHpxtNse+eraq/L12x+vtFfkcloiczI9wS6Fr0PHipPmRcrc2piGbntkN9RNptmZWe29bq/2XreXGnsOpivYkR1OmpJobUxDbz2+YhXd+aY7tfe6vReCnEDPHsEO9OC2dZPrrcdvqW7R+db50EMqHYId6FH21sa02hG7/y5dMGEQ7NhYCW9iUubWxrR3aGRjsHAIdgwWurc9oLK2NqYdxGwMFg7BXkajzMSL0NuOXCUN4mFlGzYGC4dgL5tRZ+L0tpdOkiAetWzDxmBhEOxlM+pMnN72Uho1iKmfFxvBPkDUC1SSzMTpbccGqJ8Xm7l77get1+veaDRyP+4oSrFApYTdLkjfoBo7uzdmy8wW3X3oWZQZe59SLFBhJo4UdIO7sfSbSRp968VAsPcp+wIVYFT9F1DZvbE4Jgp2M/ugpL+V9BZJu929mPWVBMq8QAVIov8CKrs3FsekM/bjku6S9K8pjKUwyrpABUiif7MvSbr37ffqfOv8JTV2au/5mijY3f2EJJlZOqMBMDW6fe/dG2o8/JOHB9bW096qAMPldjNrM9tvZg0za5w9ezavwwLIUG2upqtee5VW26sX1dZ7Dep5R7aGBruZPWlmxwf8uTPJgdz9gLvX3b2+devW8UcMoFC6JZmKVQbW1nu/X7GKXvrVS2ouNwONthxS6WM3s+9K+otRL54WuY8dQHLDauiD7oFKSSY5+tgB5GbYVgS1uZoaS41LSjYEezYmqrGb2QfM7Iykd0r6tpk9ls6wAMRmWMkG6WFLgYhEvccNokDb42QoxeQsdKiWYo8bTD228c0HwZ6C0KG6eOqc/unJn8S/xw2AkeTWxx6zQRuH5aV7Uvn+T19R26UZE3vcIHfN5aYWji3QxlgQzNgTGlRyCblxWPek4uqcpd/1piv1qVt+h9k6csPK0uIh2BPYqOQScuOw/pMKoY68cTel4iHYE9hsr/ZQG4exGyVC425KxUOwJ1DUvdrZjRIhJbkJNvJBH3tCodsaAZQXfewZYXYMoOhodwSAyBDsABAZgh0AIkOwA0BkCHYAiAzBDgCRIdgBIDIEOwBEhmAHgMgQ7AAQGYIdyBA3oEAI7BUDZIQbUCAUZuxARgbdgALIA8EOZKR7A4qKVTQ7M6st1S2UZZALSjFARnpvQLGlukUPPPMAZRnkgmAHMlSbq6k2V9PCsYWBZRnuOoQsEOxADvrvC7qluoULq8gMwQ7koP++oIMurBLsSAvBDuSkW5bp6p3B17cNvY0lMLKJgt3MPivpDkktSc9L+hN3/3kaAwNi1j+DZ7aONE06Y39C0n3uvmpm/yjpPkmfmXxYQPz6Z/BAWibqY3f3x919df3hEUnbJx8SAGASaS5Q+qikR1N8PQDAGIaWYszsSUlvGPCt+939m+vPuV/SqqSHNnmd/ZL2S9LOnTvHGizy01xuJqr/Jn0+gOwMDXZ3v2Wz75vZPZJul/Qed/dNXueApAOSVK/XN3wewku6eRWbXaWLkyQmNWlXzG3qXCz9A3f/dTpDQmhJe6yHPZ+gGh0nSaRh0q6YL0j6LUlPmJkkHXH3P514VAiqf5XksB7rzZ5PUCXDwiWkYaJgd/c3pTUQFEfSHuvNnk9QJZP0pAoMwsrTkkhaDknaY73R8wmqZFi4hDTYJtc7M1Ov173R4KYDeQldDqHGDqTDzBbdfejsiBl7CYQuh7DCEsgXd1Aqgf47+VAOAeLGjL0EqNsC5UKwlwTlEKA8KMUAQGQIdgCIDMEOAJEh2AEgMgQ7AESGYAeAyATZUsDMzko6lfuBpSslvRLguEVR9vcv8TMo+/uXpvtncI27bx32pCDBHoqZNUbZZyFWZX//Ej+Dsr9/qRw/A0oxABAZgh0AIlO2YD8QegCBlf39S/wMyv7+pRL8DEpVYweAMijbjB0Aole6YDezz5rZj8zsB2b2DTN7fegx5cnMPmhmz5lZ28yi7gzoZWa3mdmPzeykmf1l6PHkzcy+aGbLZnY89FhCMLMdZvaUmZ1Y/+//k6HHlKXSBbukJyRd7+6/L+knku4LPJ68HZd0l6TvhR5IXsysIulBSe+T9FZJHzazt4YdVe6+JOm20IMIaFXSp939LZL2SPqzmP8bKF2wu/vj7r66/vCIpO0hx5M3dz/h7j8OPY6c7ZZ00t1fcPeWpK9KujPwmHLl7t+T9H+hxxGKu7/s7s+u//MvJZ2QdHXYUWWndMHe56OSHg09CGTuakmnex6fUcT/U2NzZrZL0tskPR12JNmJ8g5KZvakpDcM+Nb97v7N9efcr87Hs4fyHFseRnn/JWMD/o52sBIys9dKeljSp9z9F6HHk5Uog93db9ns+2Z2j6TbJb3HI+z3HPb+S+iMpB09j7dLeinQWBCImc2qE+oPufvXQ48nS6UrxZjZbZI+I2mvu/869HiQi2ckvdnM3mhmVUkfknQo8JiQIzMzSQclnXD3z4ceT9ZKF+ySviDpdZKeMLOmmf1L6AHlycw+YGZnJL1T0rfN7LHQY8ra+sXyT0h6TJ2LZl9z9+fCjipfZvYVSf8l6XfN7IyZfSz0mHL2LkkfkXTz+v/3TTN7f+hBZYWVpwAQmTLO2AEgagQ7AESGYAeAyBDsABAZgh0AIkOwA0BkCHYAiAzBDgCR+X8tA7ZLJcL0MgAAAABJRU5ErkJggg==\n", "text/plain": [ "
" ] }, - "metadata": {}, + "metadata": { + "needs_background": "light" + }, "output_type": "display_data" } ], diff --git a/mxfusion/__version__.py b/mxfusion/__version__.py index 8473f95..0a2127a 100644 --- a/mxfusion/__version__.py +++ b/mxfusion/__version__.py @@ -13,4 +13,4 @@ # ============================================================================== -__version__ = '0.3.0' +__version__ = '0.3.1' diff --git a/mxfusion/common/exceptions.py b/mxfusion/common/exceptions.py index 2ea8690..4a082cf 100644 --- a/mxfusion/common/exceptions.py +++ b/mxfusion/common/exceptions.py @@ -16,8 +16,10 @@ class ModelSpecificationError(Exception): pass + class InferenceError(Exception): pass + class SerializationError(Exception): pass diff --git a/mxfusion/components/distributions/bernoulli.py b/mxfusion/components/distributions/bernoulli.py index fa9aa77..d61c0f0 100644 --- a/mxfusion/components/distributions/bernoulli.py +++ b/mxfusion/components/distributions/bernoulli.py @@ -45,7 +45,8 @@ def replicate_self(self, attribute_map=None): """ This functions as a copy constructor for the object. In order to do a copy constructor we first call ``__new__`` on the class which creates a blank object. - We then initialize that object using the methods standard init procedures, and do any extra copying of attributes. + We then initialize that object using the methods standard init procedures, and do any extra copying of + attributes. Replicates this Factor, using new inputs, outputs, and a new uuid. Used during model replication to functionally replicate a factor into a new graph. @@ -85,7 +86,7 @@ def draw_samples_impl(self, prob_true, rv_shape, num_samples=1, F=None): :param rv_shape: the shape of each sample. :type rv_shape: tuple :param num_samples: the number of drawn samples (default: one). - :int num_samples: int + :type num_samples: int :param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray). :returns: a set samples of the Bernoulli distribution :rtypes: MXNet NDArray or MXNet Symbol diff --git a/mxfusion/components/distributions/beta.py b/mxfusion/components/distributions/beta.py index 760f544..0ba9121 100644 --- a/mxfusion/components/distributions/beta.py +++ b/mxfusion/components/distributions/beta.py @@ -81,7 +81,7 @@ def draw_samples_impl(self, alpha, beta, rv_shape, num_samples=1, F=None): :param rv_shape: the shape of each sample. :type rv_shape: tuple :param num_samples: the number of drawn samples (default: one). - :int num_samples: int + :type num_samples: int :param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray). :returns: a set samples of the beta distribution. :rtypes: MXNet NDArray or MXNet Symbol diff --git a/mxfusion/components/distributions/categorical.py b/mxfusion/components/distributions/categorical.py index b937ce8..9f56a86 100644 --- a/mxfusion/components/distributions/categorical.py +++ b/mxfusion/components/distributions/categorical.py @@ -51,7 +51,8 @@ def __init__(self, log_prob, num_classes, one_hot_encoding=False, rand_gen=rand_gen, dtype=dtype, ctx=ctx) if axis != -1: - raise NotImplementedError("The Categorical distribution currently only supports the last dimension to be the class label dimension, i.e., axis == -1.") + raise NotImplementedError("The Categorical distribution currently only supports the last dimension to be " + "the class label dimension, i.e., axis == -1.") self.axis = axis self.normalization = normalization self.one_hot_encoding = one_hot_encoding @@ -61,7 +62,8 @@ def replicate_self(self, attribute_map=None): """ This functions as a copy constructor for the object. In order to do a copy constructor we first call ``__new__`` on the class which creates a blank object. - We then initialize that object using the methods standard init procedures, and do any extra copying of attributes. + We then initialize that object using the methods standard init procedures, and do any extra copying of + attributes. Replicates this Factor, using new inputs, outputs, and a new uuid. Used during model replication to functionally replicate a factor into a new graph. @@ -112,7 +114,7 @@ def draw_samples_impl(self, log_prob, rv_shape, num_samples=1, F=None): :param rv_shape: the shape of each sample. :type rv_shape: tuple :param num_samples: the number of drawn samples (default: one). - :int num_samples: int + :type num_samples: int :param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray). :returns: a set samples of the Categorical distribution :rtypes: MXNet NDArray or MXNet Symbol diff --git a/mxfusion/components/distributions/distribution.py b/mxfusion/components/distributions/distribution.py index 6c3a44b..8914d60 100644 --- a/mxfusion/components/distributions/distribution.py +++ b/mxfusion/components/distributions/distribution.py @@ -93,14 +93,16 @@ def log_cdf(self, F=None, **kwargs): def draw_samples(self, F, variables, num_samples=1, targets=None, always_return_tuple=False): """ - Draw a number of samples from the distribution. All the dependent variables are automatically collected from a dictionary of variables according to the UUIDs of the dependent variables. + Draw a number of samples from the distribution. All the dependent variables are automatically collected from a + dictionary of variables according to the UUIDs of the dependent variables. :param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray). :param variables: the set of variables where the dependent variables are collected from. :type variables: {str(UUID): MXNet NDArray or Symbol} :param num_samples: the number of drawn samples (default: one). - :int num_samples: int - :param always_return_tuple: return the samples in a tuple of shape one. This allows easy programming when there are potentially multiple output variables. + :type num_samples: int + :param always_return_tuple: return the samples in a tuple of shape one. This allows easy programming when there + are potentially multiple output variables. :type always_return_tuple: boolean :returns: a set samples of the distribution. :rtypes: MXNet NDArray or MXNet Symbol or [MXNet NDArray or MXNet Symbol] @@ -122,7 +124,7 @@ def draw_samples_impl(self, rv_shape, num_samples=1, F=None, **kwargs): :param rv_shape: the shape of each sample. :type rv_shape: tuple, [tuple] :param num_samples: the number of drawn samples (default: one). - :int num_samples: int + :type num_samples: int :param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray). :returns: a set samples of the distribution. :rtypes: MXNet NDArray or MXNet Symbol or [MXNet NDArray or MXNet Symbol] diff --git a/mxfusion/components/distributions/gamma.py b/mxfusion/components/distributions/gamma.py index 414fe9b..66e1ea8 100644 --- a/mxfusion/components/distributions/gamma.py +++ b/mxfusion/components/distributions/gamma.py @@ -63,8 +63,8 @@ def draw_samples_impl(self, alpha, beta, rv_shape, num_samples=1, F=None): Draw samples from the Gamma distribution. :param rv_shape: the shape of each sample. :type rv_shape: tuple - :param nSamples: the number of drawn samples (default: one). - :int nSamples: int + :param num_samples: the number of drawn samples (default: one). + :type num_samples: int :param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray). :returns: a set samples of the Gamma distribution. :rtypes: MXNet NDArray or MXNet Symbol @@ -79,6 +79,10 @@ def define_variable(alpha=0., beta=1., shape=None, rand_gen=None, """ Creates and returns a random variable drawn from a Gamma distribution parameterized with a and b parameters. + :param alpha: beta parameter of the Gamma random variable (also known as rate) + :type alpha: float + :param beta: alpha parameter of the Gamma random variable (also known as shape) + :type beta: float :param shape: the shape of the random variable(s). :type shape: tuple or [tuple] :param rand_gen: the random generator (default: MXNetRandomGenerator). @@ -137,6 +141,10 @@ def log_pdf_impl(self, mean, variance, random_variable, F=None): """ Computes the logarithm of the probability density function (PDF) of the Gamma distribution. + :param mean: mean of the Gamma random variable (alpha / beta) + :type mean: float + :param variance: variance of the Gamma random variable (alpha / beta**2) + :type variance: float :param random_variable: the random variable of the Gamma distribution. :type random_variable: MXNet NDArray or MXNet Symbol :param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray). @@ -150,14 +158,13 @@ def log_pdf_impl(self, mean, variance, random_variable, F=None): p1 = (alpha - 1.) * F.log(random_variable) return (p1 - beta * random_variable) - (g_alpha - alpha * F.log(beta)) - def draw_samples_impl(self, mean, variance, rv_shape, num_samples=1, - F=None): + def draw_samples_impl(self, mean, variance, rv_shape, num_samples=1, F=None): """ Draw samples from the Gamma distribution. :param rv_shape: the shape of each sample. :type rv_shape: tuple - :param nSamples: the number of drawn samples (default: one). - :int nSamples: int + :param num_samples: the number of drawn samples (default: one). + :type num_samples: int :param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray). :returns: a set samples of the Gamma distribution. :rtypes: MXNet NDArray or MXNet Symbol @@ -168,8 +175,7 @@ def draw_samples_impl(self, mean, variance, rv_shape, num_samples=1, ctx=self.ctx) @staticmethod - def define_variable(mean=0., variance=1., shape=None, rand_gen=None, - dtype=None, ctx=None): + def define_variable(mean=0., variance=1., shape=None, rand_gen=None, dtype=None, ctx=None): """ Creates and returns a random variable drawn from a Gamma distribution parameterized with mean and variance. diff --git a/mxfusion/components/distributions/gp/cond_gp.py b/mxfusion/components/distributions/gp/cond_gp.py index da7674d..6176a87 100644 --- a/mxfusion/components/distributions/gp/cond_gp.py +++ b/mxfusion/components/distributions/gp/cond_gp.py @@ -15,11 +15,9 @@ import numpy as np from ....common.config import get_default_MXNet_mode -from ....common.exceptions import InferenceError +from ....common.exceptions import ModelSpecificationError from ...variables.variable import Variable -from ....util.customop import broadcast_to_w_samples from ..distribution import Distribution -from ...variables.runtime_variable import get_num_samples class ConditionalGaussianProcess(Distribution): @@ -46,8 +44,10 @@ class ConditionalGaussianProcess(Distribution): :type Y_cond: Variable :param kernel: the kernel of Gaussian process. :type kernel: Kernel - :param mean_func: the mean function of Gaussian process. - :type mean_func: N/A + :param mean: the mean of Gaussian process. + :type mean: Variable + :param mean_cond: the mean of the conditional output variable under the same mean function. + :type mean_cond: Variable :param rand_gen: the random generator (default: MXNetRandomGenerator). :type rand_gen: RandomGenerator :param dtype: the data type for float point numbers. @@ -55,23 +55,40 @@ class ConditionalGaussianProcess(Distribution): :param ctx: the mxnet context (default: None/current context). :type ctx: None or mxnet.cpu or mxnet.gpu """ - def __init__(self, X, X_cond, Y_cond, kernel, mean_func=None, + def __init__(self, X, X_cond, Y_cond, kernel, mean=None, mean_cond=None, rand_gen=None, dtype=None, ctx=None): + if (mean is None) and (mean_cond is not None): + raise ModelSpecificationError("The argument mean and mean_cond need to be both specified.") inputs = [('X', X), ('X_cond', X_cond), ('Y_cond', Y_cond)] + \ [(k, v) for k, v in kernel.parameters.items()] input_names = [k for k, _ in inputs] + if mean is not None: + inputs.append(('mean', mean)) + input_names.append('mean') + self._has_mean = True + else: + self._has_mean = False + if mean_cond is not None: + inputs.append(('mean_cond', mean_cond)) + input_names.append('mean_cond') + self._has_mean_cond = True + else: + self._has_mean_cond = False output_names = ['random_variable'] super(ConditionalGaussianProcess, self).__init__( inputs=inputs, outputs=None, input_names=input_names, output_names=output_names, rand_gen=rand_gen, dtype=dtype, ctx=ctx) - self.mean_func = mean_func self.kernel = kernel + @property + def has_mean(self): + return self._has_mean + @staticmethod - def define_variable(X, X_cond, Y_cond, kernel, shape=None, mean_func=None, - rand_gen=None, minibatch_ratio=1., dtype=None, - ctx=None): + def define_variable(X, X_cond, Y_cond, kernel, shape=None, mean=None, + mean_cond=None, rand_gen=None, minibatch_ratio=1., + dtype=None, ctx=None): """ Creates and returns a set of random variable drawn from a Gaussian process. @@ -85,8 +102,10 @@ def define_variable(X, X_cond, Y_cond, kernel, shape=None, mean_func=None, :type kernel: Kernel :param shape: the shape of the random variable(s) (the default shape is the same shape as *X* but the last dimension is changed to one.) :type shape: tuple or [tuple] - :param mean_func: the mean function of Gaussian process. - :type mean_func: N/A + :param mean: the mean of Gaussian process. + :type mean: Variable + :param mean_cond: the mean of the conditional output variable under the same mean function. + :type mean_cond: Variable :param rand_gen: the random generator (default: MXNetRandomGenerator). :type rand_gen: RandomGenerator :param dtype: the data type for float point numbers. @@ -95,15 +114,15 @@ def define_variable(X, X_cond, Y_cond, kernel, shape=None, mean_func=None, :type ctx: None or mxnet.cpu or mxnet.gpu """ gp = ConditionalGaussianProcess( - X=X, X_cond=X_cond, Y_cond=Y_cond, kernel=kernel, - mean_func=mean_func, rand_gen=rand_gen, dtype=dtype, ctx=ctx) + X=X, X_cond=X_cond, Y_cond=Y_cond, kernel=kernel, mean=mean, + mean_cond=mean_cond, rand_gen=rand_gen, dtype=dtype, ctx=ctx) gp.outputs = [('random_variable', Variable(value=gp, shape=X.shape[:-1] + (1,) if shape is None else shape))] return gp.random_variable def log_pdf_impl(self, X, X_cond, Y_cond, random_variable, F=None, - **kernel_params): + **kernel_params): """ Computes the logarithm of the probability density function (PDF) of the conditional Gaussian process. @@ -127,6 +146,12 @@ def log_pdf_impl(self, X, X_cond, Y_cond, random_variable, F=None, :returns: log pdf of the distribution. :rtypes: MXNet NDArray or MXNet Symbol """ + if self._has_mean: + mean = kernel_params['mean'] + del kernel_params['mean'] + if self._has_mean_cond: + mean_cond = kernel_params['mean_cond'] + del kernel_params['mean_cond'] D = random_variable.shape[-1] F = get_default_MXNet_mode() if F is None else F K = self.kernel.K(F, X, **kernel_params) @@ -136,9 +161,10 @@ def log_pdf_impl(self, X, X_cond, Y_cond, random_variable, F=None, LccInvKc = F.linalg.trsm(Lcc, Kc) cov = K - F.linalg.syrk(LccInvKc, transpose=True) L = F.linalg.potrf(cov) - if self.mean_func is not None: - random_variable = random_variable - self.mean_func(F, X) - Y_cond = Y_cond - self.mean_func(F, X_cond) + if self._has_mean: + random_variable = random_variable - mean + if self._has_mean_cond: + Y_cond = Y_cond - mean_cond LccInvY = F.linalg.trsm(Lcc, Y_cond) rv_mean = F.linalg.gemm2(LccInvKc, LccInvY, True, False) LinvY = F.sum(F.linalg.trsm(L, random_variable - rv_mean), axis=-1) @@ -161,13 +187,19 @@ def draw_samples_impl(self, X, X_cond, Y_cond, rv_shape, num_samples=1, :param rv_shape: the shape of each sample. :type rv_shape: tuple :param num_samples: the number of drawn samples (default: one). - :int num_samples: int + :type num_samples: int :param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray). :param **kernel_params: the set of kernel parameters, provided as keyword arguments. :type **kernel_params: {str: MXNet NDArray or MXNet Symbol} :returns: a set samples of the distribution. :rtypes: MXNet NDArray or MXNet Symbol """ + if self._has_mean: + mean = kernel_params['mean'] + del kernel_params['mean'] + if self._has_mean_cond: + mean_cond = kernel_params['mean_cond'] + del kernel_params['mean_cond'] F = get_default_MXNet_mode() if F is None else F K = self.kernel.K(F, X, **kernel_params) Kc = self.kernel.K(F, X_cond, X, **kernel_params) @@ -176,19 +208,18 @@ def draw_samples_impl(self, X, X_cond, Y_cond, rv_shape, num_samples=1, LccInvKc = F.linalg.trsm(Lcc, Kc) cov = K - F.linalg.syrk(LccInvKc, transpose=True) L = F.linalg.potrf(cov) - if self.mean_func is not None: - Y_cond = Y_cond - self.mean_func(F, X_cond) + if self._has_mean_cond: + Y_cond = Y_cond - mean_cond LccInvY = F.linalg.trsm(Lcc, Y_cond) rv_mean = F.linalg.gemm2(LccInvKc, LccInvY, True, False) out_shape = (num_samples,) + rv_shape - L = broadcast_to_w_samples(F, L, out_shape[:-1] + out_shape[-2:-1]) die = self._rand_gen.sample_normal( shape=out_shape, dtype=self.dtype, ctx=self.ctx) rv = F.linalg.trmm(L, die) + rv_mean - if self.mean_func is not None: - rv = rv + self.mean_func(F, X) + if self._has_mean: + rv = rv + mean return rv def replicate_self(self, attribute_map=None): @@ -197,7 +228,7 @@ def replicate_self(self, attribute_map=None): """ replicant = super(ConditionalGaussianProcess, self).replicate_self(attribute_map) - replicant.mean_func = self.mean_func.replicate_self(attribute_map) \ - if self.mean_func is not None else None + replicant._has_mean = self._has_mean + replicant._has_mean_cond = self._has_mean_cond replicant.kernel = self.kernel.replicate_self(attribute_map) return replicant diff --git a/mxfusion/components/distributions/gp/gp.py b/mxfusion/components/distributions/gp/gp.py index 7c9ebdd..95c55b3 100644 --- a/mxfusion/components/distributions/gp/gp.py +++ b/mxfusion/components/distributions/gp/gp.py @@ -15,27 +15,25 @@ import numpy as np from ....common.config import get_default_MXNet_mode -from ....common.exceptions import InferenceError from ...variables import Variable -from ....util.customop import broadcast_to_w_samples from ..distribution import Distribution -from ...variables.runtime_variable import get_num_samples class GaussianProcess(Distribution): """ The Gaussian process distribution. - A Gaussian process consists of a kernel function and a mean function (optional). A collection of GP random variables follows a multi-variate - normal distribution, where the mean is computed from the mean function (zero, if not given) and the covariance matrix is computed from the kernel - function, both of which are computed given a collection of inputs. + A Gaussian process consists of a kernel function and a mean function (optional). A collection of GP random + variables follows a multi-variate normal distribution, where the mean is computed from the mean function + (zero, if not given) and the covariance matrix is computed from the kernel function, both of which are computed + given a collection of inputs. :param X: the input variables on which the random variables are conditioned. :type X: Variable :param kernel: the kernel of Gaussian process. :type kernel: Kernel - :param mean_func: the mean function of Gaussian process. - :type mean_func: N/A + :param mean: the mean of Gaussian process. + :type mean: Variable :param rand_gen: the random generator (default: MXNetRandomGenerator). :type rand_gen: RandomGenerator :param dtype: the data type for float point numbers. @@ -43,21 +41,31 @@ class GaussianProcess(Distribution): :param ctx: the mxnet context (default: None/current context). :type ctx: None or mxnet.cpu or mxnet.gpu """ - def __init__(self, X, kernel, mean_func=None, rand_gen=None, dtype=None, + def __init__(self, X, kernel, mean=None, rand_gen=None, dtype=None, ctx=None): inputs = [('X', X)] + [(k, v) for k, v in kernel.parameters.items()] input_names = [k for k, _ in inputs] + if mean is not None: + inputs.append(('mean', mean)) + input_names.append('mean') + self._has_mean = True + else: + self._has_mean = False output_names = ['random_variable'] super(GaussianProcess, self).__init__( inputs=inputs, outputs=None, input_names=input_names, output_names=output_names, rand_gen=rand_gen, dtype=dtype, ctx=ctx) - self.mean_func = mean_func self.kernel = kernel + @property + def has_mean(self): + return self._has_mean + @staticmethod - def define_variable(X, kernel, shape=None, mean_func=None, rand_gen=None, dtype=None, ctx=None): + def define_variable(X, kernel, shape=None, mean=None, rand_gen=None, + dtype=None, ctx=None): """ Creates and returns a set of random variables drawn from a Gaussian process. @@ -65,10 +73,11 @@ def define_variable(X, kernel, shape=None, mean_func=None, rand_gen=None, dtype= :type X: Variable :param kernel: the kernel of Gaussian process. :type kernel: Kernel - :param shape: the shape of the random variable(s) (the default shape is the same shape as *X* but the last dimension is changed to one). + :param shape: the shape of the random variable(s) (the default shape is the same shape as *X* but the last + dimension is changed to one). :type shape: tuple or [tuple] - :param mean_func: the mean function of Gaussian process. - :type mean_func: N/A + :param mean: the mean of Gaussian process. + :type mean: Variable :param rand_gen: the random generator (default: MXNetRandomGenerator). :type rand_gen: RandomGenerator :param dtype: the data type for float point numbers. @@ -76,8 +85,8 @@ def define_variable(X, kernel, shape=None, mean_func=None, rand_gen=None, dtype= :param ctx: the mxnet context (default: None/current context). :type ctx: None or mxnet.cpu or mxnet.gpu """ - gp = GaussianProcess(X=X, kernel=kernel, mean_func=mean_func, - rand_gen=rand_gen, dtype=dtype, ctx=ctx) + gp = GaussianProcess(X=X, kernel=kernel, mean=mean, rand_gen=rand_gen, + dtype=dtype, ctx=ctx) gp.outputs = [('random_variable', Variable(value=gp, shape=X.shape[:-1] + (1,) if shape is None else shape))] @@ -92,26 +101,27 @@ def log_pdf_impl(self, X, random_variable, F=None, **kernel_params): :param random_variable: the random_variable of which log-PDF is computed. :type random_variable: MXNet NDArray or MXNet Symbol :param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray) - :param **kernel_params: the set of kernel parameters, provided as keyword arguments. - :type **kernel_params: {str: MXNet NDArray or MXNet Symbol} + :param kernel_params: the set of kernel parameters, provided as keyword arguments. + :type kernel_params: {str: MXNet NDArray or MXNet Symbol} :returns: log pdf of the distribution. :rtypes: MXNet NDArray or MXNet Symbol """ + if self._has_mean: + mean = kernel_params['mean'] + del kernel_params['mean'] D = random_variable.shape[-1] F = get_default_MXNet_mode() if F is None else F K = self.kernel.K(F, X, **kernel_params) L = F.linalg.potrf(K) - if self.mean_func is not None: - mean = self.mean_func(F, X) + if self._has_mean: random_variable = random_variable - mean LinvY = F.linalg.trsm(L, random_variable) logdet_l = F.linalg.sumlogdiag(F.abs(L)) return (- logdet_l * D - F.sum(F.sum(F.square(LinvY) + np.log(2. * np.pi), axis=-1), axis=-1) / 2) * self.log_pdf_scaling - def draw_samples_impl(self, X, rv_shape, num_samples=1, F=None, - **kernel_params): + def draw_samples_impl(self, X, rv_shape, num_samples=1, F=None, **kernel_params): """ Draw a number of samples from the Gaussian process. @@ -120,25 +130,25 @@ def draw_samples_impl(self, X, rv_shape, num_samples=1, F=None, :param rv_shape: the shape of each sample. :type rv_shape: tuple :param num_samples: the number of drawn samples (default: one). - :int num_samples: int + :type num_samples: int :param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray). - :param **kernel_params: the set of kernel parameters, provided as keyword arguments. - :type **kernel_params: {str: MXNet NDArray or MXNet Symbol} + :param kernel_params: the set of kernel parameters, provided as keyword arguments. + :type kernel_params: {str: MXNet NDArray or MXNet Symbol} :returns: a set samples of the distribution. :rtypes: MXNet NDArray or MXNet Symbol """ + if self._has_mean: + mean = kernel_params['mean'] + del kernel_params['mean'] F = get_default_MXNet_mode() if F is None else F K = self.kernel.K(F, X, **kernel_params) L = F.linalg.potrf(K) out_shape = (num_samples,) + rv_shape - L = broadcast_to_w_samples(F, L, out_shape[:-1] + out_shape[-2:-1]) - die = self._rand_gen.sample_normal( shape=out_shape, dtype=self.dtype, ctx=self.ctx) rv = F.linalg.trmm(L, die) - if self.mean_func is not None: - mean = self.mean_func(F, X) + if self._has_mean: rv = rv + mean return rv @@ -147,7 +157,6 @@ def replicate_self(self, attribute_map=None): The copy constructor for a Gaussian process distribution. """ replicant = super(GaussianProcess, self).replicate_self(attribute_map) - replicant.mean_func = self.mean_func.replicate_self(attribute_map) \ - if self.mean_func is not None else None + replicant._has_mean = self._has_mean replicant.kernel = self.kernel.replicate_self(attribute_map) return replicant diff --git a/mxfusion/components/distributions/gp/kernels/add_kernel.py b/mxfusion/components/distributions/gp/kernels/add_kernel.py index 30b5114..d6decdd 100644 --- a/mxfusion/components/distributions/gp/kernels/add_kernel.py +++ b/mxfusion/components/distributions/gp/kernels/add_kernel.py @@ -53,7 +53,8 @@ def _compute_K(self, F, X, X2=None, **kernel_params): :param F: MXNet computation type . :param X: the first set of inputs to the kernel. :type X: MXNet NDArray or MXNet Symbol - :param X2: (optional) the second set of arguments to the kernel. If X2 is None, this computes a square covariance matrix of X. In other words, + :param X2: (optional) the second set of arguments to the kernel. If X2 is None, this computes a square + covariance matrix of X. In other words, X2 is internally treated as X. :type X2: MXNet NDArray or MXNet Symbol :param **kernel_params: the set of kernel parameters, provided as keyword arguments. @@ -70,8 +71,8 @@ def _compute_Kdiag(self, F, X, **kernel_params): """ The internal interface for the actual computation for the diagonal of the covariance matrix. - This function takes as an assumption: The prefix in the keys of *kernel_params* that corresponds to the name of the kernel has been - removed. The dimensions of *X* has been sliced according to *active_dims*. + This function takes as an assumption: The prefix in the keys of *kernel_params* that corresponds to the name of + the kernel has been removed. The dimensions of *X* has been sliced according to *active_dims*. :param F: MXNet computation type . :param X: the first set of inputs to the kernel. diff --git a/mxfusion/components/distributions/gp/kernels/kernel.py b/mxfusion/components/distributions/gp/kernels/kernel.py index 25d837c..e5c082e 100644 --- a/mxfusion/components/distributions/gp/kernels/kernel.py +++ b/mxfusion/components/distributions/gp/kernels/kernel.py @@ -24,13 +24,15 @@ class Kernel(MXFusionFunction): """ - The base class for a Gaussian process kernel: a positive definite function which forms of a covariance function (kernel). + The base class for a Gaussian process kernel: a positive definite function which forms of a covariance function + (kernel). :param input_dim: the number of dimensions of the kernel. (The total number of active dimensions). :type input_dim: int :param name: the name of the kernel. The name is also used as the prefix for the kernel parameters. :type name: str - :param active_dims: The dimensions of the inputs that are taken for the covariance matrix computation. (default: None, taking all the dimensions). + :param active_dims: The dimensions of the inputs that are taken for the covariance matrix computation. + (default: None, taking all the dimensions). :type active_dims: [int] or None :param dtype: the data type for float point numbers. :type dtype: numpy.float32 or numpy.float64 @@ -60,12 +62,12 @@ def __setattr__(self, name, value): @property def local_parameters(self): """ - The kernel parameters in the current kernel, which does not include kernel parameters that belongs to the sub-kernels of a compositional - kernel. The keys of the returned dictionary are the name of the kernel parameters (without the prefix) and the values are the corresponding - variables. + The kernel parameters in the current kernel, which does not include kernel parameters that belongs to the + sub-kernels of a compositional kernel. The keys of the returned dictionary are the name of the kernel + parameters (without the prefix) and the values are the corresponding variables. - :return: a dictionary of local kernel parameters, in which the keys are the name of individual parameters, including the kernel in front, and - the values are the corresponding Variables. + :return: a dictionary of local kernel parameters, in which the keys are the name of individual parameters, + including the kernel in front, and the values are the corresponding Variables. :rtype: {str: Variable} """ return {getattr(self, n) for n in self._parameter_names} @@ -73,11 +75,12 @@ def local_parameters(self): @property def parameters(self): """ - All the kernel parameters including the kernel parameters that belongs to the sub-kernels. The keys of the returned dictionary are the name of - the kernel parameters with a prefix (the name of the kernel plus '_') and the values are the corresponding variables. + All the kernel parameters including the kernel parameters that belongs to the sub-kernels. The keys of the + returned dictionary are the name of the kernel parameters with a prefix (the name of the kernel plus '_') and + the values are the corresponding variables. - :return: a dictionary of all the kernel parameters, in which the keys are the name of individual parameters, including the kernel in front, - and the values are the corresponding Variables. + :return: a dictionary of all the kernel parameters, in which the keys are the name of individual parameters, + including the kernel in front, and the values are the corresponding Variables. :rtype: {str: Variable} """ raise NotImplementedError @@ -100,11 +103,11 @@ def K(self, F, X, X2=None, **kernel_params): :param F: MXNet computation type . :param X: the first set of inputs to the kernel. :type X: MXNet NDArray or MXNet Symbol - :param X2: (optional) the second set of arguments to the kernel. If X2 is None, this computes a square covariance matrix of X. In other words, - X2 is internally treated as X. + :param X2: (optional) the second set of arguments to the kernel. If X2 is None, this computes a square + covariance matrix of X. In other words, X2 is internally treated as X. :type X2: MXNet NDArray or MXNet Symbol - :param **kernel_params: the set of kernel parameters, provided as keyword arguments. - :type **kernel_params: {str: MXNet NDArray or MXNet Symbol} + :param kernel_params: the set of kernel parameters, provided as keyword arguments. + :type kernel_params: {str: MXNet NDArray or MXNet Symbol} :return: The covariance matrix :rtype: MXNet NDArray or MXNet Symbol """ @@ -129,8 +132,8 @@ def Kdiag(self, F, X, **kernel_params): :param F: MXNet computation type . :param X: the first set of inputs to the kernel. :type X: MXNet NDArray or MXNet Symbol - :param **kernel_params: the set of kernel parameters, provided as keyword arguments. - :type **kernel_params: {str: MXNet NDArray or MXNet Symbol} + :param kernel_params: the set of kernel parameters, provided as keyword arguments. + :type kernel_params: {str: MXNet NDArray or MXNet Symbol} :return: The diagonal of the covariance matrix. :rtype: MXNet NDArray or MXNet Symbol """ @@ -149,6 +152,8 @@ def add(self, other, name='add'): :param other: the other kernel to be added. :type other: Kernel + :param name: The name of the kernel + :type name: str :return: the kernel which is the sum of the current kernel with the specified kernel. :rtype: Kernel """ @@ -170,6 +175,8 @@ def multiply(self, other, name='mul'): :param other: the other kernel to be added. :type other: Kernel + :param name: The name of the kernel + :type name: str :return: the kernel which is the sum of the current kernel with the specified kernel. :rtype: Kernel """ @@ -177,8 +184,7 @@ def multiply(self, other, name='mul'): raise ModelSpecificationError( "Only a Gaussian Process Kernel can be multiplied with a Gaussian Process Kernel.") from .multiply_kernel import MultiplyKernel - return MultiplyKernel([self, other], name=name, ctx=self.ctx, - dtype=self.dtype) + return MultiplyKernel([self, other], name=name, ctx=self.ctx, dtype=self.dtype) def __mul__(self, other): """ @@ -190,8 +196,8 @@ def _compute_K(self, F, X, X2=None, **kernel_params): """ The internal interface for the actual covariance matrix computation. - This function takes as an assumption: The prefix in the keys of *kernel_params* that corresponds to the name of the kernel has been - removed. The dimensions of *X* and *X2* have been sliced according to *active_dims*. + This function takes as an assumption: The prefix in the keys of *kernel_params* that corresponds to the name of + the kernel has been removed. The dimensions of *X* and *X2* have been sliced according to *active_dims*. :param F: MXNet computation type . :param X: the first set of inputs to the kernel. @@ -210,8 +216,8 @@ def _compute_Kdiag(self, F, X, **kernel_params): """ The internal interface for the actual computation for the diagonal of the covariance matrix. - This function takes as an assumption: The prefix in the keys of *kernel_params* that corresponds to the name of the kernel has been - removed. The dimensions of *X* has been sliced according to *active_dims*. + This function takes as an assumption: The prefix in the keys of *kernel_params* that corresponds to the name of + the kernel has been removed. The dimensions of *X* has been sliced according to *active_dims*. :param F: MXNet computation type . :param X: the first set of inputs to the kernel. @@ -225,13 +231,15 @@ def _compute_Kdiag(self, F, X, **kernel_params): def fetch_parameters(self, params): """ - The helper function to fetch the kernel parameters from a set of variables according to the UUIDs of the kernel parameters. It returns a - dictionary of kernel parameters, where the keys are the name of the kernel parameters and the values are the MXNet array at runtime. The - returned dict can be directly passed into *K* and *Kdiag* as *kernel_params*. + The helper function to fetch the kernel parameters from a set of variables according to the UUIDs of the kernel + parameters. It returns a dictionary of kernel parameters, where the keys are the name of the kernel parameters + and the values are the MXNet array at runtime. The returned dict can be directly passed into *K* and *Kdiag* as + *kernel_params*. :param params: the set of parameters where the kernel parameters are fetched from. :type params: {str (UUID): MXNet NDArray or MXNet Symbol} - :return: a dict of the kernel parameters, where the keys are the name of the kernel parameters and the values are the MXNet array at runtime. + :return: a dict of the kernel parameters, where the keys are the name of the kernel parameters and the values + are the MXNet array at runtime. :rtype: {str (kernel name): MXNet NDArray or MXNet Symbol} """ return {n: params[v.uuid] for n, v in self.parameters.items()} @@ -241,10 +249,10 @@ def eval(self, F, X, X2=None, **kernel_params): The method handling the execution of the function. :param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray) - :param **input_kws: the dict of inputs to the functions. The key in the + :param input_kws: the dict of inputs to the functions. The key in the dict should match with the name of inputs specified in the inputs of FunctionEvaluation. - :type **input_kws: {variable name: MXNet NDArray or MXNet Symbol} + :type input_kws: {variable name: MXNet NDArray or MXNet Symbol} :returns: the return value of the function :rtypes: MXNet NDArray or MXNet Symbol """ @@ -273,7 +281,8 @@ class NativeKernel(Kernel): :type input_dim: int :param name: the name of the kernel. The name is used to access kernel parameters. :type name: str - :param active_dims: The dimensions of the inputs that are taken for the covariance matrix computation. (default: None, taking all the dimensions). + :param active_dims: The dimensions of the inputs that are taken for the covariance matrix computation. + (default: None, taking all the dimensions). :type active_dims: [int] or None :param dtype: the data type for float point numbers. :type dtype: numpy.float32 or numpy.float64 @@ -289,11 +298,12 @@ def __init__(self, input_dim, name, active_dims=None, dtype=None, @property def parameters(self): """ - All the kernel parameters including the kernel parameters that belongs to the sub-kernels. The keys of the returned dictionary are the name of - the kernel parameters with a prefix (the name of the kernel plus '_') and the values are the corresponding variables. + All the kernel parameters including the kernel parameters that belongs to the sub-kernels. The keys of the + returned dictionary are the name of the kernel parameters with a prefix (the name of the kernel plus '_') + and the values are the corresponding variables. - :return: a dictionary of all the kernel parameters, in which the keys are the name of individual parameters, including the kernel in front, - and the values are the corresponding Variables. + :return: a dictionary of all the kernel parameters, in which the keys are the name of individual parameters, + including the kernel in front, and the values are the corresponding Variables. :rtype: {str: Variable} """ return {self.name + '_' + n: getattr(self, n) for n in @@ -306,7 +316,8 @@ def parameter_names(self): class CombinationKernel(Kernel): """ - The base class for combination kernels: the covariance matrix is computed by combining the covariance matrix from multiple sub-kernels. + The base class for combination kernels: the covariance matrix is computed by combining the covariance matrix from + multiple sub-kernels. :param sub_kernels: a list of kernels that are combined to compute a covariance matrix. :type sub_kernels: [Kernel] @@ -331,11 +342,12 @@ def __init__(self, sub_kernels, name, dtype=None, ctx=None): @property def parameters(self): """ - All the kernel parameters including the kernel parameters that belongs to the sub-kernels. The keys of the returned dictionary are the name of - the kernel parameters with a prefix (the name of the kernel plus '_') and the values are the corresponding variables. + All the kernel parameters including the kernel parameters that belongs to the sub-kernels. The keys of the + returned dictionary are the name of the kernel parameters with a prefix (the name of the kernel plus '_') and + the values are the corresponding variables. - :return: a dictionary of all the kernel parameters, in which the keys are the name of individual parameters, including the kernel in front, - and the values are the corresponding Variables. + :return: a dictionary of all the kernel parameters, in which the keys are the name of individual parameters, + including the kernel in front, and the values are the corresponding Variables. :rtype: {str: Variable} """ p = {} @@ -354,10 +366,8 @@ def replicate_self(self, attribute_map=None): """ The copy constructor for a kernel. """ - replicant = super(CombinationKernel, self).replicate_self( - attribute_map) - replicant.sub_kernels = [k.replicate_self(attribute_map) for k in - self.sub_kernels] + replicant = super(CombinationKernel, self).replicate_self(attribute_map) + replicant.sub_kernels = [k.replicate_self(attribute_map) for k in self.sub_kernels] for k in replicant.sub_kernels: setattr(replicant, k.name, k) return replicant diff --git a/mxfusion/components/distributions/gp/kernels/linear.py b/mxfusion/components/distributions/gp/kernels/linear.py index c6ec55e..53d371a 100644 --- a/mxfusion/components/distributions/gp/kernels/linear.py +++ b/mxfusion/components/distributions/gp/kernels/linear.py @@ -27,14 +27,15 @@ class Linear(NativeKernel): :param input_dim: the number of dimensions of the kernel. (The total number of active dimensions) . :type input_dim: int - :param ARD: a binary switch for Automatic Relevance Determination (ARD). If true, the squared distance is divided by a lengthscale for individual - dimensions. + :param ARD: a binary switch for Automatic Relevance Determination (ARD). If true, the squared distance is divided + by a lengthscale for individual dimensions. :type ARD: boolean :param variances: the initial value for the variances parameter, which scales the input dimensions. :type variances: float or MXNet NDArray :param name: the name of the kernel. The name is used to access kernel parameters. :type name: str - :param active_dims: The dimensions of the inputs that are taken for the covariance matrix computation. (default: None, taking all the dimensions). + :param active_dims: The dimensions of the inputs that are taken for the covariance matrix computation. + (default: None, taking all the dimensions). :type active_dims: [int] or None :param dtype: the data type for float point numbers. :type dtype: numpy.float32 or numpy.float64 @@ -62,13 +63,11 @@ def _compute_K(self, F, X, variances, X2=None): :param F: MXNet computation type . :param X: the first set of inputs to the kernel. :type X: MXNet NDArray or MXNet Symbol - :param X2: (optional) the second set of arguments to the kernel. If X2 is None, this computes a square covariance matrix of X. In other words, - X2 is internally treated as X. + :param X2: (optional) the second set of arguments to the kernel. If X2 is None, this computes a square + covariance matrix of X. In other words, X2 is internally treated as X. :type X2: MXNet NDArray or MXNet Symbol :param variances: the variances parameter, which scales the input dimensions. :type variances: MXNet NDArray or MXNet Symbol - :param lengthscale: the lengthscale parameter. - :type lengthscale: MXNet NDArray or MXNet Symbol :return: The covariance matrix. :rtype: MXNet NDArray or MXNet Symbol """ @@ -101,8 +100,7 @@ def _compute_Kdiag(self, F, X, variances): :rtype: MXNet NDArray or MXNet Symbol """ X2 = F.square(X) - return F.sum(X2 * F.expand_dims(variances, axis=-2), - axis=-1) + return F.sum(X2 * F.expand_dims(variances, axis=-2), axis=-1) def replicate_self(self, attribute_map=None): """ diff --git a/mxfusion/components/distributions/gp/kernels/matern.py b/mxfusion/components/distributions/gp/kernels/matern.py index 845cbe8..0d277b7 100644 --- a/mxfusion/components/distributions/gp/kernels/matern.py +++ b/mxfusion/components/distributions/gp/kernels/matern.py @@ -27,8 +27,8 @@ class Matern(StationaryKernel): :param input_dim: the number of dimensions of the kernel. (The total number of active dimensions) :type input_dim: int - :param ARD: a binary switch for Automatic Relevance Determination (ARD). If true, the squared distance is divided by a lengthscale for individual - dimensions. + :param ARD: a binary switch for Automatic Relevance Determination (ARD). If true, the squared distance is divided + by a lengthscale for individual dimensions. :type ARD: boolean :param variance: the initial value for the variance parameter (scalar), which scales the whole covariance matrix. :type variance: float or MXNet NDArray @@ -36,7 +36,8 @@ class Matern(StationaryKernel): :type lengthscale: float or MXNet NDArray :param name: the name of the kernel. The name is used to access kernel parameters. :type name: str - :param active_dims: The dimensions of the inputs that are taken for the covariance matrix computation. (default: None, taking all the dimensions). + :param active_dims: The dimensions of the inputs that are taken for the covariance matrix computation. + (default: None, taking all the dimensions). :type active_dims: [int] or None :param dtype: the data type for float point numbers. :type dtype: numpy.float32 or numpy.float64 @@ -70,8 +71,8 @@ def _compute_K(self, F, X, lengthscale, variance, X2=None): :param F: MXNet computation type . :param X: the first set of inputs to the kernel. :type X: MXNet NDArray or MXNet Symbol - :param X2: (optional) the second set of arguments to the kernel. If X2 is None, this computes a square covariance matrix of X. In other words, - X2 is internally treated as X. + :param X2: (optional) the second set of arguments to the kernel. If X2 is None, this computes a square + covariance matrix of X. In other words, X2 is internally treated as X. :type X2: MXNet NDArray or MXNet Symbol :param variance: the variance parameter (scalar), which scales the whole covariance matrix. :type variance: MXNet NDArray or MXNet Symbol @@ -102,8 +103,8 @@ def _compute_K(self, F, X, lengthscale, variance, X2=None): :param F: MXNet computation type . :param X: the first set of inputs to the kernel. :type X: MXNet NDArray or MXNet Symbol - :param X2: (optional) the second set of arguments to the kernel. If X2 is None, this computes a square covariance matrix of X. In other words, - X2 is internally treated as X. + :param X2: (optional) the second set of arguments to the kernel. If X2 is None, this computes a square + covariance matrix of X. In other words, X2 is internally treated as X. :type X2: MXNet NDArray or MXNet Symbol :param variance: the variance parameter (scalar), which scales the whole covariance matrix. :type variance: MXNet NDArray or MXNet Symbol @@ -134,8 +135,8 @@ def _compute_K(self, F, X, lengthscale, variance, X2=None): :param F: MXNet computation type . :param X: the first set of inputs to the kernel. :type X: MXNet NDArray or MXNet Symbol - :param X2: (optional) the second set of arguments to the kernel. If X2 is None, this computes a square covariance matrix of X. In other words, - X2 is internally treated as X. + :param X2: (optional) the second set of arguments to the kernel. If X2 is None, this computes a square + covariance matrix of X. In other words, X2 is internally treated as X. :type X2: MXNet NDArray or MXNet Symbol :param variance: the variance parameter (scalar), which scales the whole covariance matrix. :type variance: MXNet NDArray or MXNet Symbol diff --git a/mxfusion/components/distributions/gp/kernels/multiply_kernel.py b/mxfusion/components/distributions/gp/kernels/multiply_kernel.py index 30fcd15..3769850 100644 --- a/mxfusion/components/distributions/gp/kernels/multiply_kernel.py +++ b/mxfusion/components/distributions/gp/kernels/multiply_kernel.py @@ -53,8 +53,8 @@ def _compute_K(self, F, X, X2=None, **kernel_params): :param F: MXNet computation type . :param X: the first set of inputs to the kernel. :type X: MXNet NDArray or MXNet Symbol - :param X2: (optional) the second set of arguments to the kernel. If X2 is None, this computes a square covariance matrix of X. In other words, - X2 is internally treated as X. + :param X2: (optional) the second set of arguments to the kernel. If X2 is None, this computes a square + covariance matrix of X. In other words, X2 is internally treated as X. :type X2: MXNet NDArray or MXNet Symbol :param **kernel_params: the set of kernel parameters, provided as keyword arguments. :type **kernel_params: {str: MXNet NDArray or MXNet Symbol} @@ -70,8 +70,8 @@ def _compute_Kdiag(self, F, X, **kernel_params): """ The internal interface for the actual computation for the diagonal of the covariance matrix. - This function takes as an assumption: The prefix in the keys of *kernel_params* that corresponds to the name of the kernel has been - removed. The dimensions of *X* has been sliced according to *active_dims*. + This function takes as an assumption: The prefix in the keys of *kernel_params* that corresponds to the name + of the kernel has been removed. The dimensions of *X* has been sliced according to *active_dims*. :param F: MXNet computation type . :param X: the first set of inputs to the kernel. diff --git a/mxfusion/components/distributions/gp/kernels/rbf.py b/mxfusion/components/distributions/gp/kernels/rbf.py index 46aefe1..2719d70 100644 --- a/mxfusion/components/distributions/gp/kernels/rbf.py +++ b/mxfusion/components/distributions/gp/kernels/rbf.py @@ -25,8 +25,8 @@ class RBF(StationaryKernel): :param input_dim: the number of dimensions of the kernel. (The total number of active dimensions) :type input_dim: int - :param ARD: a binary switch for Automatic Relevance Determination (ARD). If true, the squared distance is divided by a lengthscale for individual - dimensions. + :param ARD: a binary switch for Automatic Relevance Determination (ARD). If true, the squared distance is divided + by a lengthscale for individual dimensions. :type ARD: boolean :param variance: the initial value for the variance parameter (scalar), which scales the whole covariance matrix. :type variance: float or MXNet NDArray @@ -34,7 +34,8 @@ class RBF(StationaryKernel): :type lengthscale: float or MXNet NDArray :param name: the name of the kernel. The name is used to access kernel parameters. :type name: str - :param active_dims: The dimensions of the inputs that are taken for the covariance matrix computation. (default: None, taking all the dimensions). + :param active_dims: The dimensions of the inputs that are taken for the covariance matrix computation. + (default: None, taking all the dimensions). :type active_dims: [int] or None :param dtype: the data type for float point numbers. :type dtype: numpy.float32 or numpy.float64 @@ -57,8 +58,8 @@ def _compute_K(self, F, X, lengthscale, variance, X2=None): :param F: MXNet computation type . :param X: the first set of inputs to the kernel. :type X: MXNet NDArray or MXNet Symbol - :param X2: (optional) the second set of arguments to the kernel. If X2 is None, this computes a square covariance matrix of X. In other words, - X2 is internally treated as X. + :param X2: (optional) the second set of arguments to the kernel. If X2 is None, this computes a square + covariance matrix of X. In other words, X2 is internally treated as X. :type X2: MXNet NDArray or MXNet Symbol :param variance: the variance parameter (scalar), which scales the whole covariance matrix. :type variance: MXNet NDArray or MXNet Symbol diff --git a/mxfusion/components/distributions/gp/kernels/static.py b/mxfusion/components/distributions/gp/kernels/static.py index 3782b39..790e0d6 100644 --- a/mxfusion/components/distributions/gp/kernels/static.py +++ b/mxfusion/components/distributions/gp/kernels/static.py @@ -32,7 +32,8 @@ class Bias(NativeKernel): :type variance: float or MXNet NDArray :param name: the name of the kernel. The name is used to access kernel parameters. :type name: str - :param active_dims: The dimensions of the inputs that are taken for the covariance matrix computation. (default: None, taking all the dimensions). + :param active_dims: The dimensions of the inputs that are taken for the covariance matrix computation. + (default: None, taking all the dimensions). :type active_dims: [int] or None :param dtype: the data type for float point numbers. :type dtype: numpy.float32 or numpy.float64 @@ -59,8 +60,8 @@ def _compute_K(self, F, X, variance, X2=None): :param F: MXNet computation type . :param X: the first set of inputs to the kernel. :type X: MXNet NDArray or MXNet Symbol - :param X2: (optional) the second set of arguments to the kernel. If X2 is None, this computes a square covariance matrix of X. In other words, - X2 is internally treated as X. + :param X2: (optional) the second set of arguments to the kernel. If X2 is None, + this computes a square covariance matrix of X. In other words, X2 is internally treated as X. :type X2: MXNet NDArray or MXNet Symbol :param variance: the variance parameter. :type variance: MXNet NDArray or MXNet Symbol @@ -100,7 +101,8 @@ class White(NativeKernel): :type variance: float or MXNet NDArray :param name: the name of the kernel. The name is used to access kernel parameters. :type name: str - :param active_dims: The dimensions of the inputs that are taken for the covariance matrix computation. (default: None, taking all the dimensions). + :param active_dims: The dimensions of the inputs that are taken for the covariance matrix computation. + (default: None, taking all the dimensions). :type active_dims: [int] or None :param dtype: the data type for float point numbers. :type dtype: numpy.float32 or numpy.float64 @@ -127,8 +129,8 @@ def _compute_K(self, F, X, variance, X2=None): :param F: MXNet computation type :param X: the first set of inputs to the kernel. :type X: MXNet NDArray or MXNet Symbol - :param X2: (optional) the second set of arguments to the kernel. If X2 is None, this computes a square covariance matrix of X. In other words, - X2 is internally treated as X. + :param X2: (optional) the second set of arguments to the kernel. If X2 is None, this computes a square + covariance matrix of X. In other words, X2 is internally treated as X. :type X2: MXNet NDArray or MXNet Symbol :param variance: the variance parameter. :type variance: MXNet NDArray or MXNet Symbol diff --git a/mxfusion/components/distributions/gp/kernels/stationary.py b/mxfusion/components/distributions/gp/kernels/stationary.py index c95b312..75e21ad 100644 --- a/mxfusion/components/distributions/gp/kernels/stationary.py +++ b/mxfusion/components/distributions/gp/kernels/stationary.py @@ -31,13 +31,14 @@ class StationaryKernel(NativeKernel): In this implementation, r is scaled by the lengthscales parameter(s): .. math:: r2(x, x') = \\sum_{q=1}^Q \\frac{(x_q - x'_q)^2}{\\ell_q^2}. - By default, there's only one lengthscale: separate lengthscales for each dimension can be enables by setting ARD=True. + By default, there's only one lengthscale: separate lengthscales for each dimension can be enables by setting + ARD=True. :param input_dim: the number of dimensions of the kernel. (The total number of active dimensions). :type input_dim: int - :param ARD: a binary switch for Automatic Relevance Determination (ARD). If true, the squared distance is divided by a lengthscale for individual - dimensions. + :param ARD: a binary switch for Automatic Relevance Determination (ARD). If true, the squared distance is divided + by a lengthscale for individual dimensions. :type ARD: boolean :param variance: the initial value for the variance parameter (scalar), which scales the whole covariance matrix. :type variance: float or MXNet NDArray @@ -45,7 +46,8 @@ class StationaryKernel(NativeKernel): :type lengthscale: float or MXNet NDArray :param name: the name of the kernel. The name is used to access kernel parameters. :type name: str - :param active_dims: The dimensions of the inputs that are taken for the covariance matrix computation. (default: None, taking all the dimensions). + :param active_dims: The dimensions of the inputs that are taken for the covariance matrix computation. + (default: None, taking all the dimensions). :type active_dims: [int] or None :param dtype: the data type for float point numbers. :type dtype: numpy.float32 or numpy.float64 @@ -81,8 +83,8 @@ def _compute_R2(self, F, X, lengthscale, variance, X2=None): :param F: MXNet computation type . :param X: the first set of inputs to the kernel. :type X: MXNet NDArray or MXNet Symbol - :param X2: (optional) the second set of arguments to the kernel. If X2 is None, this computes a square covariance matrix of X. In other words, - X2 is internally treated as X. + :param X2: (optional) the second set of arguments to the kernel. If X2 is None, this computes a square + covariance matrix of X. In other words, X2 is internally treated as X. :type X2: MXNet NDArray or MXNet Symbol :return: The squared distance. :rtype: MXNet NDArray or MXNet Symbol diff --git a/mxfusion/components/distributions/laplace.py b/mxfusion/components/distributions/laplace.py index 3063a2f..7fbc03f 100644 --- a/mxfusion/components/distributions/laplace.py +++ b/mxfusion/components/distributions/laplace.py @@ -54,8 +54,7 @@ def log_pdf_impl(self, location, scale, random_variable, F=None): F.abs(F.broadcast_minus(random_variable, location)), scale)) * self.log_pdf_scaling return logL - def draw_samples_impl(self, location, scale, rv_shape, num_samples=1, - F=None): + def draw_samples_impl(self, location, scale, rv_shape, num_samples=1, F=None): """ Draw samples from the Laplace distribution. @@ -66,7 +65,7 @@ def draw_samples_impl(self, location, scale, rv_shape, num_samples=1, :param rv_shape: the shape of each sample. :type rv_shape: tuple :param num_samples: the number of drawn samples (default: one). - :int num_samples: int + :type num_samples: int :param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray). :returns: a set samples of the Laplace distribution. :rtypes: MXNet NDArray or MXNet Symbol @@ -78,8 +77,7 @@ def draw_samples_impl(self, location, scale, rv_shape, num_samples=1, scale), location) @staticmethod - def define_variable(location=0., scale=1., shape=None, rand_gen=None, - dtype=None, ctx=None): + def define_variable(location=0., scale=1., shape=None, rand_gen=None, dtype=None, ctx=None): """ Creates and returns a random variable drawn from a Laplace distribution. diff --git a/mxfusion/components/distributions/normal.py b/mxfusion/components/distributions/normal.py index 3fb7393..1d62fb4 100644 --- a/mxfusion/components/distributions/normal.py +++ b/mxfusion/components/distributions/normal.py @@ -25,8 +25,9 @@ class Normal(UnivariateDistribution): """ - The one-dimensional normal distribution. The normal distribution can be defined over a scalar random variable or an array of random variables. In case - of an array of random variables, the mean and variance are broadcasted to the shape of the output random variable (array). + The one-dimensional normal distribution. The normal distribution can be defined over a scalar random variable or an + array of random variables. In case of an array of random variables, the mean and variance are broadcasted to the + shape of the output random variable (array). :param mean: Mean of the normal distribution. :type mean: Variable @@ -68,8 +69,7 @@ def log_pdf_impl(self, mean, variance, random_variable, F=None): F.broadcast_minus(random_variable, mean)), -2 * variance)) * self.log_pdf_scaling return logL - def draw_samples_impl(self, mean, variance, rv_shape, num_samples=1, - F=None): + def draw_samples_impl(self, mean, variance, rv_shape, num_samples=1, F=None): """ Draw samples from the normal distribution. @@ -80,7 +80,7 @@ def draw_samples_impl(self, mean, variance, rv_shape, num_samples=1, :param rv_shape: the shape of each sample. :type rv_shape: tuple :param num_samples: the number of drawn samples (default: one). - :int num_samples: int + :type num_samples: int :param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray). :returns: a set samples of the normal distribution. :rtypes: MXNet NDArray or MXNet Symbol @@ -177,8 +177,7 @@ def log_pdf_impl(self, mean, covariance, random_variable, F=None): sqnorm_z = - F.sum(F.square(zvec), axis=-1) return (0.5 * (sqnorm_z - (N * np.log(2 * np.pi))) + logdetl)* self.log_pdf_scaling - def draw_samples_impl(self, mean, covariance, rv_shape, num_samples=1, - F=None): + def draw_samples_impl(self, mean, covariance, rv_shape, num_samples=1, F=None): """ Draw a number of samples from the normal distribution. @@ -189,7 +188,7 @@ def draw_samples_impl(self, mean, covariance, rv_shape, num_samples=1, :param rv_shape: the shape of each sample. :type rv_shape: tuple :param num_samples: the number of drawn samples (default: one). - :int num_samples: int + :type num_samples: int :param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray). :returns: a set samples of the normal distribution :rtypes: MXNet NDArray or MXNet Symbol @@ -203,8 +202,7 @@ def draw_samples_impl(self, mean, covariance, rv_shape, num_samples=1, return F.broadcast_add(lmat_eps.sum(-1), mean) @staticmethod - def define_variable(shape, mean=0., covariance=None, rand_gen=None, - minibatch_ratio=1., dtype=None, ctx=None): + def define_variable(shape, mean=0., covariance=None, rand_gen=None, minibatch_ratio=1., dtype=None, ctx=None): """ Creates and returns a random variable drawn from a normal distribution. @@ -285,8 +283,7 @@ def log_pdf_impl(self, mean, precision, random_variable, F=None): F.broadcast_minus(random_variable, mean)), -precision / 2)) * self.log_pdf_scaling return logL - def draw_samples_impl(self, mean, precision, rv_shape, num_samples=1, - F=None): + def draw_samples_impl(self, mean, precision, rv_shape, num_samples=1, F=None): """ Draw samples from the normal distribution. @@ -297,7 +294,7 @@ def draw_samples_impl(self, mean, precision, rv_shape, num_samples=1, :param rv_shape: the shape of each sample. :type rv_shape: tuple :param num_samples: the number of drawn samples (default: one). - :int num_samples: int + :type num_samples: int :param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray). :returns: a set samples of the normal distribution. :rtypes: MXNet NDArray or MXNet Symbol @@ -396,8 +393,7 @@ def log_pdf_impl(self, mean, precision, random_variable, F=None): return -0.5 * (sqnorm_z + c + logdetl) * self.log_pdf_scaling - def draw_samples_impl(self, mean, precision, rv_shape, num_samples=1, - F=None): + def draw_samples_impl(self, mean, precision, rv_shape, num_samples=1, F=None): """ Draw a number of samples from the normal distribution. @@ -408,7 +404,7 @@ def draw_samples_impl(self, mean, precision, rv_shape, num_samples=1, :param rv_shape: the shape of each sample. :type rv_shape: tuple :param num_samples: the number of drawn samples (default: one). - :int num_samples: int + :type num_samples: int :param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray). :returns: a set samples of the normal distribution :rtypes: MXNet NDArray or MXNet Symbol diff --git a/mxfusion/components/distributions/pointmass.py b/mxfusion/components/distributions/pointmass.py index 3f38c2c..68124cf 100644 --- a/mxfusion/components/distributions/pointmass.py +++ b/mxfusion/components/distributions/pointmass.py @@ -21,7 +21,7 @@ class PointMass(UnivariateDistribution): """ The Point Mass distribution. - :param value: the location of the point mass. + :param location: the location of the point mass. """ def __init__(self, location, rand_gen=None, dtype=None, ctx=None): inputs = [('location', location)] @@ -51,13 +51,18 @@ def draw_samples_impl(self, location, rv_shape, num_samples=1, F=None): location, shape=(num_samples,)+location.shape[1:]) @staticmethod - def define_variable(location, shape=None, rand_gen=None, dtype=None, - ctx=None): + def define_variable(location, shape=None, rand_gen=None, dtype=None, ctx=None): """ Creates and returns a random variable drawn from a Normal distribution. :param location: the location of the point mass. :param shape: Shape of random variables drawn from the distribution. If non-scalar, each variable is drawn iid. + :param rand_gen: the random generator (default: MXNetRandomGenerator). + :type rand_gen: RandomGenerator + :param dtype: the data type for float point numbers. + :type dtype: numpy.float32 or numpy.float64 + :param ctx: the mxnet context (default: None/current context). + :type ctx: None or mxnet.cpu or mxnet.gpu :returns: RandomVariable drawn from the distribution specified. """ diff --git a/mxfusion/components/distributions/random_gen.py b/mxfusion/components/distributions/random_gen.py index a155da4..58c7a09 100644 --- a/mxfusion/components/distributions/random_gen.py +++ b/mxfusion/components/distributions/random_gen.py @@ -39,7 +39,6 @@ def sample_multinomial(data, get_prob=True, dtype='int32', F=None): def sample_bernoulli(prob_true=0.5, dtype='bool', F=None): pass - @staticmethod def sample_uniform(low=0., high=1., shape=None, dtype=None, out=None, ctx=None, F=None): pass @@ -48,6 +47,7 @@ def sample_uniform(low=0., high=1., shape=None, dtype=None, out=None, ctx=None, def sample_laplace(location=0., scale=1., shape=None, dtype=None, out=None, ctx=None, F=None): pass + class MXNetRandomGenerator(RandomGenerator): """ The MXNet pseudo-random number generator. @@ -98,8 +98,7 @@ def sample_normal(loc=0, scale=1, shape=None, dtype=None, out=None, ctx=None, F= shape=shape, dtype=dtype, out=out, ctx=ctx, F=F) @staticmethod - def sample_multinomial(data, shape=None, get_prob=False, dtype='int32', - F=None): + def sample_multinomial(data, shape=None, get_prob=False, dtype='int32', F=None): """ Sample Multinomial distributed variables @@ -107,6 +106,7 @@ def sample_multinomial(data, shape=None, get_prob=False, dtype='int32', `k` is the number of possible outcomes of each multinomial distribution. For example, data with shape `(m, n, k)` specifies `m*n` multinomial distributions each with `k` possible outcomes. + :param shape: Shape of the random variable :param get_prob: If true, a second array containing log likelihood of the drawn samples will also be returned. This is usually used for reinforcement learning, where you can provide diff --git a/mxfusion/components/distributions/uniform.py b/mxfusion/components/distributions/uniform.py index 8c0921c..a17f2eb 100644 --- a/mxfusion/components/distributions/uniform.py +++ b/mxfusion/components/distributions/uniform.py @@ -73,7 +73,7 @@ def draw_samples_impl(self, low, high, rv_shape, num_samples=1, F=None): :param rv_shape: the shape of each sample. :type rv_shape: tuple :param num_samples: the number of drawn samples (default: one). - :int num_samples: int + :type num_samples: int :param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray). :returns: a set samples of the Uniform distribution. :rtypes: MXNet NDArray or MXNet Symbol diff --git a/mxfusion/components/distributions/wishart.py b/mxfusion/components/distributions/wishart.py index 4b06901..1f1ef57 100644 --- a/mxfusion/components/distributions/wishart.py +++ b/mxfusion/components/distributions/wishart.py @@ -113,7 +113,7 @@ def draw_samples_impl(self, degrees_of_freedom, scale, rv_shape, num_samples=1, :param rv_shape: the shape of each sample. :type rv_shape: tuple :param num_samples: the number of drawn samples (default: one). - :int num_samples: int + :type num_samples: int :param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray). :returns: a set samples of the Wishart distribution :rtypes: MXNet NDArray or MXNet Symbol diff --git a/mxfusion/components/factor.py b/mxfusion/components/factor.py index 514dfb6..3c96621 100644 --- a/mxfusion/components/factor.py +++ b/mxfusion/components/factor.py @@ -45,17 +45,19 @@ def _define_variable_from_constant(v): elif isinstance(v, NDArray): return Variable(value=v) else: - raise ModelSpecificationError('The inputs/outputs of a factor can only be a int, float, MXNet NDArray or Variable, but get '+str(v)+'.') + raise ModelSpecificationError('The inputs/outputs of a factor can only be a int, float, ' + 'MXNet NDArray or Variable, but get '+str(v)+'.') class Factor(ModelComponent): """ - A factor represents a relation among multiple variables in a model such as a distribution, a function or a module. It consists of a list of output - variables and optionally a list of input variables. + A factor represents a relation among multiple variables in a model such as a distribution, a function or a module. + It consists of a list of output variables and optionally a list of input variables. - The ``inputs`` and ``outputs`` argument of ``__init__`` holds the input and output of the factor, which are represented in Python dict. The key of a variable in - the dict is the name of the variable referred in the context of the factor, e.g., the mean and variance of a normal distribution. The value of a - variable is the reference to the variable in memory. Both input and output variables are accessible as class attributes. + The ``inputs`` and ``outputs`` argument of ``__init__`` holds the input and output of the factor, which are + represented in Python dict. The key of a variable in the dict is the name of the variable referred in the context + of the factor, e.g., the mean and variance of a normal distribution. The value of a variable is the reference to + the variable in memory. Both input and output variables are accessible as class attributes. The ``inputs`` and ``outputs`` argument of ``__init__`` can be: @@ -74,9 +76,11 @@ class Factor(ModelComponent): def __getattr__(self, value): if value.startswith("__"): """ - When python copies objects, it begins by checking for ``__setstate__()`` which doesn't exist, so it calls ``__getattr__()``. Our implementation then - calls the ``self.inputs`` getter before the object is fully prepared because ``__init__()`` never gets called during the copy. This causes an infinite - recursion to ``__getattr__()``. By skipping magic methods with "__" prefix, we allow the object to initialize correctly during copying. + When python copies objects, it begins by checking for ``__setstate__()`` which doesn't exist, so it calls + ``__getattr__()``. Our implementation then calls the ``self.inputs`` getter before the object is fully + prepared because ``__init__()`` never gets called during the copy. This causes an infinite recursion to + ``__getattr__()``. By skipping magic methods with "__" prefix, we allow the object to initialize correctly + during copying. # TODO this is very inefficient, can be improved. """ @@ -118,7 +122,8 @@ def replicate_self(self, attribute_map=None): """ This functions is a copy constructor for the object. In order to perform copy construction we first call ``__new__()`` on the class which creates a blank object. - We then initialize that object using the method's standard init procedures, and do any extra copying of attributes. + We then initialize that object using the method's standard init procedures, and do any extra copying of + attributes. Replicates this Factor, using new inputs, outputs, and a new uuid. Used during model replication to functionally replicate a factor into a new graph. diff --git a/mxfusion/components/functions/function_evaluation.py b/mxfusion/components/functions/function_evaluation.py index ec982fa..164fa10 100644 --- a/mxfusion/components/functions/function_evaluation.py +++ b/mxfusion/components/functions/function_evaluation.py @@ -46,34 +46,38 @@ def replicate_self(self, attribute_map=None): def eval(self, F, variables, always_return_tuple=False): """ - Evaluate the function with the pre-specified input arguments in the model defintion. All the input arguments are automatically collected from a dictionary of variables according to the UUIDs of the input arguments. + Evaluate the function with the pre-specified input arguments in the model defintion. All the input arguments + are automatically collected from a dictionary of variables according to the UUIDs of the input arguments. :param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray). :param variables: the set of variables where the dependent variables are collected from. :type variables: {str(UUID): MXNet NDArray or Symbol} - :param always_return_tuple: whether to always return the function outcome in a tuple, even if there is only one output variable. This makes programming easy, as the downstream code can consistently expect a tuple. + :param always_return_tuple: whether to always return the function outcome in a tuple, even if there is only + one output variable. This makes programming easy, as the downstream code can consistently expect a tuple. :type always_return_tuple: boolean :returns: the outcome of the function evaluation :rtypes: MXNet NDArray or MXNet Symbol or [MXNet NDArray or MXNet Symbol] """ - kwargs = {name: variables[var.uuid] for name, var in self.inputs - if not var.isInherited or var.type == VariableType.RANDVAR} + if self.broadcastable: # If some of the inputs are samples and the function is # broadcastable, evaluate the function with the inputs that are # broadcasted to the right shape. + kwargs = {name: variables[var.uuid] for name, var in self.inputs if not var.isInherited} kwargs = broadcast_samples_dict(F, kwargs) + kwargs.update({name: variables[var.uuid][0] for name, var in self.inputs if var.isInherited}) results = self.eval_impl(F=F, **kwargs) results = results if isinstance(results, (list, tuple)) \ else [results] else: + kwargs = {name: variables[var.uuid] for name, var in self.inputs} # If some of the inputs are samples and the function is *not* # broadcastable, evaluate the function with each set of samples # and concatenate the output variables. - nSamples = max([get_num_samples(F, v) for v in kwargs.values()]) + num_samples = max([get_num_samples(F, v) for v in kwargs.values()]) results = None - for sample_idx in range(nSamples): + for sample_idx in range(num_samples): r = self.eval_impl(F=F, **{ n: v[sample_idx] if array_has_samples(F, v) else v[0] for n, v in kwargs.items()}) @@ -86,7 +90,7 @@ def eval(self, F, variables, always_return_tuple=False): else: for r_list, r_i in zip(results, r): r_list.append(r_i) - if nSamples == 1: + if num_samples == 1: results = [r[0] for r in results] else: results = [F.concat(*r, dim=0) for r in results] @@ -115,10 +119,10 @@ class FunctionEvaluationWithParameters(FunctionEvaluation): The evaluation of a function with internal function parameters. :param func: the function that this evaluation is generated from - :param inputs: MXFusion.components.functions.MXFusionFunction - :type inputs: {str : Variable} - :param outputs: the output variables of the function. - :type outputs: {str : Variable} + :param input_variables: MXFusion.components.functions.MXFusionFunction + :type input_variables: {str : Variable} + :param output_variables: the output variables of the function. + :type output_variables: {str : Variable} :param broadcastable: Whether the function supports broadcasting with the additional dimension for samples. :type: boolean """ @@ -156,9 +160,9 @@ def eval_impl(self, F, **input_kws): Invokes the MXNet Gluon block with the arguments passed in. :param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray) - :param **input_kws: the dict of inputs to the functions. The key in the dict should match with the name of inputs specified in the inputs - of FunctionEvaluation. - :type **input_kws: {variable name: MXNet NDArray or MXNet Symbol} + :param input_kws: the dict of inputs to the functions. The key in the dict should match with the name of + inputs specified in the inputs of FunctionEvaluation. + :type input_kws: {variable name: MXNet NDArray or MXNet Symbol} :returns: the return value of the function :rtypes: MXNet NDArray or MXNet Symbol """ diff --git a/mxfusion/components/functions/gluon_func_eval.py b/mxfusion/components/functions/gluon_func_eval.py index 45a9d99..78886e2 100644 --- a/mxfusion/components/functions/gluon_func_eval.py +++ b/mxfusion/components/functions/gluon_func_eval.py @@ -36,22 +36,3 @@ def __init__(self, func, input_variables, output_variables, func=func, input_variables=input_variables, output_variables=output_variables, broadcastable=broadcastable ) - - @property - def _input_to_gluon_names(self): - return [k for k, v in self.inputs if (not v.isInherited) or - v.type != VariableType.PARAMETER] - - def eval_impl(self, F, **input_kws): - """ - Invokes the MXNet Gluon block with the arguments passed in. - - :param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray) - :param **input_kws: the dict of inputs to the functions. The key in the dict should match with the name of inputs specified in the inputs - of FunctionEvaluation. - :type **input_kws: {variable name: MXNet NDArray or MXNet Symbol} - :returns: the return value of the function - :rtypes: MXNet NDArray or MXNet Symbol - """ - inputs_func = {k: input_kws[k] for k in self._input_to_gluon_names} - return self._func.eval(F, **inputs_func) diff --git a/mxfusion/components/functions/mxfusion_function.py b/mxfusion/components/functions/mxfusion_function.py index 2a86fba..7616cf8 100644 --- a/mxfusion/components/functions/mxfusion_function.py +++ b/mxfusion/components/functions/mxfusion_function.py @@ -43,10 +43,10 @@ def eval(self, F, **input_kws): The method handling the execution of the function. :param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray) - :param **input_kws: the dict of inputs to the functions. The key in the + :param input_kws: the dict of inputs to the functions. The key in the dict should match with the name of inputs specified in the inputs of FunctionEvaluation. - :type **input_kws: {variable name: MXNet NDArray or MXNet Symbol} + :type input_kws: {variable name: MXNet NDArray or MXNet Symbol} :returns: the return value of the function :rtypes: MXNet NDArray or MXNet Symbol """ diff --git a/mxfusion/components/functions/mxfusion_gluon_function.py b/mxfusion/components/functions/mxfusion_gluon_function.py index 47f8a6d..d242711 100644 --- a/mxfusion/components/functions/mxfusion_gluon_function.py +++ b/mxfusion/components/functions/mxfusion_gluon_function.py @@ -24,8 +24,9 @@ class MXFusionGluonFunction(MXFusionFunction): """ - The wrapper of a MXNet Gluon block in MXFusion. It automatically fetches all the Gluon parameters in its ParameterDict. When this function - wrapper is called in Model definition, it returns a factor corresponding to the function evaluation. + The wrapper of a MXNet Gluon block in MXFusion. It automatically fetches all the Gluon parameters in its + ParameterDict. When this function wrapper is called in Model definition, it returns a factor corresponding to + the function evaluation. :param block: The MXNet Gluon block to be wrapped. :type block: mxnet.gluon.Block or mxnet.gluon.HybridBlock @@ -98,9 +99,10 @@ def eval(self, F, **input_kws): Invokes the MXNet Gluon block with the arguments passed in. :param F: the MXNet computation mode (mxnet.symbol or mxnet.ndarray) - :param **input_kws: the dict of inputs to the functions. The key in the dict should match with the name of inputs specified in the inputs + :param input_kws: the dict of inputs to the functions. The key in the dict should match with the name of inputs + specified in the inputs of FunctionEvaluation. - :type **input_kws: {variable name: MXNet NDArray or MXNet Symbol} + :type input_kws: {variable name: MXNet NDArray or MXNet Symbol} :returns: the return value of the function :rtypes: MXNet NDArray or MXNet Symbol """ @@ -127,7 +129,8 @@ def __call__(self, *args, **kwargs): broadcastable = self.broadcastable for bv in kwargs.values(): if bv.type != VariableType.PARAMETER and self.broadcastable: - # Broadcasting function evaluation can not be applied to the Gluon block with gluon block parameters as random variables. + # Broadcasting function evaluation can not be applied to the Gluon block with gluon block + # parameters as random variables. broadcastable = False break @@ -155,39 +158,23 @@ def _create_variables_from_gluon_block(self, block): params = block.collect_params() vs = {} for param in params.values(): - v = Variable(isInherited=True, shape=param.shape) + v = Variable(isInherited=True, shape=param.shape, initial_value=param.data()) v.inherited_name = param.name vs[v.inherited_name] = v return vs - def collect_gluon_parameters(self): - """ - Return the parameters of the MXNet Gluon block that have *not* been set a prior distribution. - - :returns: the parameters of the MXNet Gluon block without a prior distribution. - :rtype: MXNet.gluon.ParameterDict - """ - params = ParameterDict() - gluon_params = self._gluon_block.collect_params() - params.update({var_name: gluon_params[var_name] for var_name, var in self._gluon_parameters.items() if var.type == VariableType.PARAMETER}) - return params - - def collect_params(self): - """ - Return a variable set / dict. Used for the function.collect_params.set_prior() functionality. - """ - # TODO: implement VariableSet - raise NotImplementedError - def _override_block_parameters(self, input_kws): """ - When a probabilistic distribution is defined for the parameters of a Gluon block (in ParameterDict), a special treatment is necessary - because otherwise these parameters will be directly exposed to a gradient optimizer as free parameters. + When a probabilistic distribution is defined for the parameters of a Gluon block (in ParameterDict), a special + treatment is necessary because otherwise these parameters will be directly exposed to a gradient optimizer as + free parameters. - For each parameters of the Gluon bock with probabilistic distribution, this method dynamically sets its values as the outcome of - upstream computation and ensure the correct gradient can be estimated via automatic differentiation. + For each parameters of the Gluon bock with probabilistic distribution, this method dynamically sets its values + as the outcome of upstream computation and ensure the correct gradient can be estimated via automatic + differentiation. - :param **input_kws: the dict of inputs to the functions. The key in the dict should match with the name of inputs specified in the + :param **input_kws: the dict of inputs to the functions. The key in the dict should match with the name of + inputs specified in the inputs of FunctionEvaluation. :type **input_kws: {variable name: MXNet NDArray or MXNet Symbol} """ diff --git a/mxfusion/components/functions/operators/operator_impl.py b/mxfusion/components/functions/operators/operator_impl.py index 9fba608..2d7781b 100644 --- a/mxfusion/components/functions/operators/operator_impl.py +++ b/mxfusion/components/functions/operators/operator_impl.py @@ -22,57 +22,77 @@ """ Basic Arithmetic """ + + @MXNetOperatorDecorator(name='add', args=['x', 'y'], inputs=['x', 'y']) def add(F, x, y): return F.add(x, y) + @MXNetOperatorDecorator(name='subtract', args=['x', 'y'], inputs=['x', 'y']) def subtract(F, x, y): return F.subtract(x, y) + @MXNetOperatorDecorator(name='multiply', args=['x', 'y'], inputs=['x', 'y']) def multiply(F, x, y): return F.multiply(x, y) + @MXNetOperatorDecorator(name='divide', args=['x', 'y'], inputs=['x', 'y']) def divide(F, x, y): return F.divide(x, y) + @MXNetOperatorDecorator(name='power', args=['x', 'y'], inputs=['x', 'y']) def power(F, x, y): return F.power(x, y) + """ Elementwise Operations """ + + @MXNetOperatorDecorator(name='square', args=['data'], inputs=['data']) def square(F, data): return F.square(data) + @MXNetOperatorDecorator(name='exp', args=['data'], inputs=['data']) def exp(F, data): return F.exp(data) + @MXNetOperatorDecorator(name='log', args=['data'], inputs=['data']) def log(F, data): return F.log(data) + """ Aggregation """ + + @MXNetOperatorDecorator(name='sum', args=['data', 'axis'], inputs=['data']) def sum(F, data, axis=None): return F.sum(data, axis) + @MXNetOperatorDecorator(name='mean', args=['data', 'axis'], inputs=['data']) def mean(F, data, axis=None): return F.mean(data, axis) + @MXNetOperatorDecorator(name='prod', args=['data', 'axis'], inputs=['data']) def prod(F, data, axis=None): return F.prod(data, axis) + """ Matrix Operations """ + + @MXNetOperatorDecorator(name='dot', args=['x', 'y'], inputs=['x', 'y']) def dot(F, x, y): return F.linalg.gemm2(x, y) + # TODO Bring in the axis arguments once it's in the release version of MXNet @MXNetOperatorDecorator(name='diag', args=['data', 'k', 'axis1', 'axis2'], inputs=['data']) def diag(F, data, k=0, axis1=None, axis2=None): @@ -80,11 +100,15 @@ def diag(F, data, k=0, axis1=None, axis2=None): raise Exception("axis1 and axis2 are not implemented yet.") return F.diag(data, k) + """ Matrix Manipulations """ + + @MXNetOperatorDecorator(name='reshape', args=['data', 'shape', 'reverse'], inputs=['data']) def reshape(F, data, shape, reverse=False): return F.reshape(data=data, shape=shape, reverse=reverse) + @MXNetOperatorDecorator(name='transpose', args=['data', 'axes'], inputs=['data']) def transpose(F, data, axes=None): axes = axes if axes is not None else [] @@ -96,7 +120,8 @@ def transpose(F, data, axes=None): def broadcast_to(data, shape): """ - This operator broadcast a variable to a target shape. The broadcasting rule is the same as [the numpy broadcasting rule](https://docs.scipy.org/doc/numpy-1.13.0/user/basics.broadcasting.html). See the following example: + This operator broadcast a variable to a target shape. The broadcasting rule is the same as [the numpy broadcasting + rule](https://docs.scipy.org/doc/numpy-1.13.0/user/basics.broadcasting.html). See the following example: ```python m.x = Gaussian.define_variable(mean=broadcast_to(array([0]), (2,)), diff --git a/mxfusion/components/functions/operators/operators.py b/mxfusion/components/functions/operators/operators.py index cf7e605..1c6a44a 100644 --- a/mxfusion/components/functions/operators/operators.py +++ b/mxfusion/components/functions/operators/operators.py @@ -21,7 +21,8 @@ class Operator(FunctionEvaluation): """ Abstract Operator object for using MXNet operators in MXFusion space. - Child classes implement the eval method with their operator and access necessary state through the properties dictionary. + Child classes implement the eval method with their operator and access necessary state through the + properties dictionary. """ def __init__(self, inputs, outputs, operator_name, properties=None, broadcastable=False): @@ -57,7 +58,8 @@ def __init__(self, name, args, inputs, num_outputs=1, broadcastable=False): :type name: string :param args: The names of the arguments for the mxnet operator in order. :type args: list of strings - :param inputs: The inputs to the MXNet operator that could have gradient's chained through them. I.E. the mx.nd.array or mx.sym.array parameters. This will be a subset of args (possibly the same set). + :param inputs: The inputs to the MXNet operator that could have gradient's chained through them. + I.E. the mx.nd.array or mx.sym.array parameters. This will be a subset of args (possibly the same set). :type inputs: list of strings :param num_outputs: How many output variables the operator produces. Defaults to 1. :type num_outputs: int @@ -87,7 +89,8 @@ def eval_impl(self, F, **input_kws): return func(F, **input_kws) if not len(all_args) >= len(self.input_names): - raise ModelSpecificationError("Must pass in arguments matching the input names {} but received {}.".format(self.input_names, all_args)) + raise ModelSpecificationError("Must pass in arguments matching the input names {} but received {}." + .format(self.input_names, all_args)) op = CustomOperator( inputs=[(n, all_args[n]) for n in self.input_names], diff --git a/mxfusion/components/model_component.py b/mxfusion/components/model_component.py index 0d6c569..82db1fa 100644 --- a/mxfusion/components/model_component.py +++ b/mxfusion/components/model_component.py @@ -31,7 +31,8 @@ class ModelComponent(object): **Mode 2 - Graph mode** If a node is attached to a FactorGraph, it does not store direct references to its successors and predecessors. - When accessed, the predecessors/successors properties directly query the graph they are attached to to find out what the respective neighbor nodes are. + When accessed, the predecessors/successors properties directly query the graph they are attached to to find out + what the respective neighbor nodes are. """ def __init__(self): @@ -73,9 +74,11 @@ def graph(self): @graph.setter def graph(self, graph): """ - Attaches the node to a graph, switching from Bidirectional mode to Graph mode if it is not already in Graph mode. + Attaches the node to a graph, switching from Bidirectional mode to Graph mode if it is not already + in Graph mode. - A node cannot be re-attached to a different graph once it is attached. Use the ``replicate()`` functionality if you need to do this. + A node cannot be re-attached to a different graph once it is attached. Use the ``replicate()`` functionality + if you need to do this. :param graph: The ``components_graph`` of the ``FactorGraph`` this node is attaching to. :type graph: networkx.DiGraph @@ -105,7 +108,8 @@ def _update_attributes(self): def _align_graph_modes(self, edge_nodes): """ - This function will update the current node and all nodes passed in to be in Graph mode if any of edge_nodes are in Graph mode. + This function will update the current node and all nodes passed in to be in Graph mode if any of edge_nodes are + in Graph mode. :param edge_nodes: All the nodes to align to the same graph mode. I.E. predecessors or successors. :type edge_nodes: List of tuples of name to node e.g. [('random_variable': Variable y)] @@ -146,7 +150,8 @@ def add_predecessor(successor, predecessor, successor_name): if successor.graph is None: successor._predecessors.append((successor_name, predecessor)) if successor.graph is not None: - raise ModelSpecificationError("Internal Error. Cannot add predecessor when a component is attached to a graph.") + raise ModelSpecificationError( + "Internal Error. Cannot add predecessor when a component is attached to a graph.") self._align_graph_modes(successors) if self.graph is not None: @@ -186,7 +191,8 @@ def add_successor(predecessor, successor, predecessor_name): if predecessor.graph is None: predecessor._successors.append((predecessor_name, successor)) if predecessor.graph is not None: - raise ModelSpecificationError("Internal Error. Cannot add a successor when a component is attached to a graph.") + raise ModelSpecificationError( + "Internal Error. Cannot add a successor when a component is attached to a graph.") self._align_graph_modes(predecessors) if self.graph is not None: @@ -229,20 +235,23 @@ def _replicate_neighbors(self, var_map, neighbors, recurse_type, replication_fun :param var_map: A mapping from the original model's components to the replicated components. :type var_map: {original_node: new_node} - :param neighbors: Dictionary containing the list of a node's neighbors in one direction (predecessors or successors). + :param neighbors: Dictionary containing the list of a node's neighbors in one direction + (predecessors or successors). :type neighbors: List of tuples of name to node e.g. [('random_variable': Variable y)] - :param recurse_type: Parameter that decides how to replicate the neighbor nodes. Must be one of: 'recursive', 'one_level', or None. + :param recurse_type: Parameter that decides how to replicate the neighbor nodes. Must be one of: 'recursive', + 'one_level', or None. :type recurse_type: String or None - :param replication_function: A function that takes in a ModelComponent and returns an answer for how to replicate that node's predecessors and successors. + :param replication_function: A function that takes in a ModelComponent and returns an answer for how to + replicate that node's predecessors and successors. :type replication_function: function """ if recurse_type == 'recursive': replicated_neighbors = [(name, i.replicate(var_map=var_map, replication_function=replication_function)) - for name, i in neighbors] + for name, i in neighbors] elif recurse_type == 'one_level': replicated_neighbors = [(name, i._replicate_self_with_attributes(var_map=var_map)) - for name, i in neighbors] + for name, i in neighbors] elif recurse_type is None: replicated_neighbors = [] else: @@ -253,10 +262,11 @@ def replicate(self, var_map=None, replication_function=None): """ Replicates this component and its neighbors based on the replication_function logic passed in. - :param var_map: A mapping from the original model's components to the replicated components. This is used to track which components - have already been replicated in a dynamic programming style. + :param var_map: A mapping from the original model's components to the replicated components. This is used to + track which components have already been replicated in a dynamic programming style. :type var_map: {original_node: new_node} - :param replication_function: A function that takes in a ModelComponent and returns an answer for how to replicate that node's predecessors and successors. If None, only replicates this node. + :param replication_function: A function that takes in a ModelComponent and returns an answer for how to + replicate that node's predecessors and successors. If None, only replicates this node. :type replication_function: function """ var_map = var_map if var_map is not None else {} diff --git a/mxfusion/inference/__init__.py b/mxfusion/inference/__init__.py index e89b8f3..32a391f 100644 --- a/mxfusion/inference/__init__.py +++ b/mxfusion/inference/__init__.py @@ -41,9 +41,10 @@ from .minibatch_loop import MinibatchInferenceLoop from .meanfield import create_Gaussian_meanfield from .forward_sampling import ForwardSampling, VariationalPosteriorForwardSampling, ForwardSamplingAlgorithm -from .grad_based_inference import GradBasedInference +from .grad_based_inference import GradBasedInference, GradTransferInference from .variational import StochasticVariationalInference from .inference_parameters import InferenceParameters from .score_function import ScoreFunctionInference, ScoreFunctionRBInference from .expectation import ExpectationAlgorithm, ExpectationScoreFunctionAlgorithm from .prediction import ModulePredictionAlgorithm +from .pilco_alg import PILCOAlgorithm diff --git a/mxfusion/inference/batch_loop.py b/mxfusion/inference/batch_loop.py index 1061365..35f0078 100644 --- a/mxfusion/inference/batch_loop.py +++ b/mxfusion/inference/batch_loop.py @@ -12,22 +12,6 @@ # permissions and limitations under the License. # ============================================================================== - -# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"). -# You may not use this file except in compliance with the License. -# A copy of the License is located at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# or in the "license" file accompanying this file. This file is distributed -# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either -# express or implied. See the License for the specific language governing -# permissions and limitations under the License. -# ============================================================================== - - import mxnet as mx from .grad_loop import GradLoop @@ -52,6 +36,8 @@ def run(self, infr_executor, data, param_dict, ctx, optimizer='adam', :type optimizer: str :param learning_rate: the learning rate of the gradient optimizer (default: 0.001) :type learning_rate: float + :param n_prints: number of messages to print + :type n_prints: int :param max_iter: the maximum number of iterations of gradient optimization :type max_iter: int :param verbose: whether to print per-iteration messages. @@ -66,10 +52,10 @@ def run(self, infr_executor, data, param_dict, ctx, optimizer='adam', with mx.autograd.record(): loss, loss_for_gradient = infr_executor(mx.nd.zeros(1, ctx=ctx), *data) loss_for_gradient.backward() + if verbose: - print('\rIteration {} loss: {}'.format(i + 1, loss.asscalar()), - end='') - if i % iter_step == 0 and i > 0: + print('\rIteration {} loss: {}\t\t\t\t'.format(i + 1, loss.asscalar()), end='') + if ((i+1) % iter_step == 0 and i > 0) or i == max_iter-1: print() trainer.step(batch_size=1, ignore_stale_grad=True) loss = infr_executor(mx.nd.zeros(1, ctx=ctx), *data) diff --git a/mxfusion/inference/expectation.py b/mxfusion/inference/expectation.py index ae71a31..6236c0e 100644 --- a/mxfusion/inference/expectation.py +++ b/mxfusion/inference/expectation.py @@ -101,7 +101,10 @@ def compute(self, F, variables): gradient_lambda = F.mean(q_z_lambda * F.stop_gradient(p_x_z), axis=0) - gradient_theta = F.mean(p_x_z, axis=0) # TODO known issue. This will double count the gradient of any distribution using the reparameterization trick (i.e. Normal). Issue #91 + # TODO known issue. + # This will double count the gradient of any distribution using the + # reparameterization trick (i.e. Normal). Issue #91 + gradient_theta = F.mean(p_x_z, axis=0) gradient_log_L = gradient_lambda + gradient_theta diff --git a/mxfusion/inference/grad_based_inference.py b/mxfusion/inference/grad_based_inference.py index f52495e..5935538 100644 --- a/mxfusion/inference/grad_based_inference.py +++ b/mxfusion/inference/grad_based_inference.py @@ -15,20 +15,18 @@ from .inference import Inference from .batch_loop import BatchInferenceLoop +from ..util.inference import discover_shape_constants, init_outcomes +from .minibatch_loop import MinibatchInferenceLoop class GradBasedInference(Inference): """ The abstract class for gradient-based inference methods. - An inference method consists of a few components: the applied inference algorithm, the model definition (optionally a definition of posterior - approximation), the inference parameters. + An inference method consists of a few components: the applied inference algorithm, the model definition + (optionally a definition of posterior approximation), the inference parameters. :param inference_algorithm: The applied inference algorithm :type inference_algorithm: InferenceAlgorithm - :param graphs: a list of graph definitions required by the inference method. It includes the model definition and necessary posterior approximation. - :type graphs: [FactorGraph] - :param observed: A list of observed variables - :type observed: [Variable] :param grad_loop: The reference to the main loop of gradient optimization :type grad_loop: GradLoop :param constants: Specify a list of model variables as constants @@ -79,14 +77,64 @@ def run(self, optimizer='adam', learning_rate=1e-3, max_iter=2000, :type max_iter: int :param verbose: whether to print per-iteration messages. :type verbose: boolean - :param **kwargs: The keyword arguments specify the data for inferences. The key of each argument is the name of the corresponding - variable in model definition and the value of the argument is the data in numpy array format. + :param kwargs: The keyword arguments specify the data for inferences. The key of each argument is the name of + the corresponding variable in model definition and the value of the argument is the data in numpy array format. """ data = [kwargs[v] for v in self.observed_variable_names] self.initialize(**kwargs) infr = self.create_executor() - return self._grad_loop.run( - infr_executor=infr, data=data, param_dict=self.params.param_dict, - ctx=self.mxnet_context, optimizer=optimizer, - learning_rate=learning_rate, max_iter=max_iter, verbose=verbose) + + if isinstance(self._grad_loop, MinibatchInferenceLoop): + def update_shape_constants(data_batch): + data_shapes = {i: d.shape for i, d in zip(self.observed_variable_UUIDs, + data_batch)} + shape_constants = discover_shape_constants(data_shapes, self._graphs) + self.params.update_constants(shape_constants) + + return self._grad_loop.run( + infr_executor=infr, data=data, param_dict=self.params.param_dict, + ctx=self.mxnet_context, optimizer=optimizer, + learning_rate=learning_rate, max_iter=max_iter, verbose=verbose, + update_shape_constants=update_shape_constants) + else: + return self._grad_loop.run( + infr_executor=infr, data=data, param_dict=self.params.param_dict, + ctx=self.mxnet_context, optimizer=optimizer, + learning_rate=learning_rate, max_iter=max_iter, verbose=verbose) + +class GradTransferInference(GradBasedInference): + """ + The abstract Inference method for transferring the outcome of one inference + method to another. + + :param inference_algorithm: The applied inference algorithm + :type inference_algorithm: InferenceAlgorithm + :param train_params: + :param constants: Specify a list of model variables as constants + :type constants: {Variable: mxnet.ndarray} + :param hybridize: Whether to hybridize the MXNet Gluon block of the inference method. + :type hybridize: boolean + :param dtype: data type for internal numerical representation + :type dtype: {numpy.float64, numpy.float32, 'float64', 'float32'} + :param context: The MXNet context + :type context: {mxnet.cpu or mxnet.gpu} + """ + + def __init__(self, inference_algorithm, infr_params, train_params, + grad_loop=None, var_tie=None, + constants=None, hybridize=False, + dtype=None, context=None): + self._var_tie = var_tie if var_tie is not None else {} + self._inherited_params = infr_params + self.train_params = train_params + super(GradTransferInference, self).__init__( + inference_algorithm=inference_algorithm, + grad_loop=grad_loop, constants=constants, + hybridize=hybridize, dtype=dtype, context=context) + + def _initialize_params(self): + self.params.initialize_with_carryover_params( + self._graphs, self.observed_variable_UUIDs, self._var_tie, + init_outcomes(self._inherited_params)) + self.params.fix_all() diff --git a/mxfusion/inference/inference.py b/mxfusion/inference/inference.py index fe6e686..fe2dc6f 100644 --- a/mxfusion/inference/inference.py +++ b/mxfusion/inference/inference.py @@ -21,7 +21,7 @@ import zipfile from .inference_parameters import InferenceParameters from ..common.config import get_default_device, get_default_dtype -from ..common.exceptions import InferenceError +from ..common.exceptions import InferenceError, SerializationError from ..util.inference import discover_shape_constants, init_outcomes from ..models import FactorGraph, Model, Posterior from ..util.serialization import ModelComponentEncoder, make_numpy, load_json_from_zip, load_parameters, \ @@ -61,7 +61,8 @@ def __init__(self, inference_algorithm, constants=None, def print_params(self): """ - Returns a string with the inference parameters nicely formatted for display, showing which model they came from and their name + uuid. + Returns a string with the inference parameters nicely formatted for display, showing which model they came from + and their name + uuid. Format: > infr.print_params() @@ -158,8 +159,9 @@ def run(self, **kwargs): """ Run the inference method. - :param **kwargs: The keyword arguments specify the data for inference self. The key of each argument is the name of the corresponding - variable in model definition and the value of the argument is the data in numpy array format. + :param kwargs: The keyword arguments specify the data for inference self. The key of each argument is the name + of the corresponding variable in model definition and the value of the argument is the data in numpy + array format. :returns: the samples of target variables (if not specified, the samples of all the latent variables) :rtype: {UUID: samples} """ @@ -232,8 +234,8 @@ def load_configuration(self, configuration, uuid_map): using the uuid_map parameter to store the correct current observed variables. - :param config_file: The loaded configuration dictionary - :type config_file: str + :param configuration: The loaded configuration dictionary + :type configuration: dict :param uuid_map: A map of previous/loaded model component uuids to their current variable in the loaded graph. :type uuid_map: { current_model_uuid : loaded_previous_uuid} @@ -243,10 +245,10 @@ def load_configuration(self, configuration, uuid_map): def get_serializable(self): """ - Returns the mimimum set of properties that the object needs to save in order to be + Returns the minimum set of properties that the object needs to save in order to be serialized down and loaded back in properly. :returns: A dictionary of configuration properties needed to serialize and reload this inference method. - :rtypes: Dictionary that is JSON serializable. + :rtype: Dictionary that is JSON serializable. """ return {'observed': self.observed_variable_UUIDs} @@ -271,8 +273,7 @@ def save(self, zip_filename=DEFAULT_ZIP): mxnet_parameters, mxnet_constants, variable_constants = self.params.get_serializable() configuration = self.get_serializable() graphs = [g.as_json()for g in self._graphs] - version_dict = {"serialization_version": - SERIALIZATION_VERSION} + version_dict = {"serialization_version": SERIALIZATION_VERSION} files_to_save = [] objects = [graphs, mxnet_parameters, mxnet_constants, @@ -338,6 +339,7 @@ def generate_executor(self, **kw): data_shapes = [kw[v] for v in self.observed_variable_names] if not self._initialized: + # TODO This function isn't defined anywhere? self._initialize_run(self._var_tie, self._inherited_params, data_shapes) self._initialized = True diff --git a/mxfusion/inference/inference_alg.py b/mxfusion/inference/inference_alg.py index 8ed624d..573fbe9 100644 --- a/mxfusion/inference/inference_alg.py +++ b/mxfusion/inference/inference_alg.py @@ -30,10 +30,11 @@ class ObjectiveBlock(HybridBlock): :type infr_method: a pointer to a function :param constants: the variables with constant values :type constants: {Variable UUID: int or float or mxnet.ndarray} - :param data_def: a list of variable UUID, which corresponds to the order of variables expected as the positional arguments in "hybrid_forward". + :param data_def: a list of variable UUID, which corresponds to the order of variables expected as the positional + arguments in "hybrid_forward". :type data_def: [UUID] :param var_trans: the transformations applied variables - :type var_trains: {UUID: VariableTransformation} + :type var_trans: {UUID: VariableTransformation} :param var_ties: A dictionary of variables that are tied and use the MXNet Parameter of the dict value uuid. :type var_ties: { UUID of source variable: UUID of target variable} :param excluded: a set of variables excluded from being set as Block parameters. @@ -58,7 +59,7 @@ def __init__(self, infr_method, constants, data_def, var_trans, var_ties, def hybrid_forward(self, F, x, *args, **kw): """ - This function does all the preprocesses and postprocesses for the execution of a InferenceAlgorithm. + This function does all the pre-processes and post-processes for the execution of a InferenceAlgorithm. :param F: the MXNet computation mode :type F: mxnet.symbol or mxnet.ndarray @@ -94,14 +95,6 @@ class InferenceAlgorithm(ABC): The abstract class for an inference algorithm. A concrete inference algorithm will inherit this class and overload the "compute" function with the actual computation logic. - - :param model: the definition of the probabilistic model - :type model: Model - :param observed: A list of observed variables - :type observed: [Variable] - :param extra_graphs: a list of extra FactorGraph used in the inference - algorithm. - :type extra_graphs: [FactorGraph] """ def replicate_self(self, model, extra_graphs=None): @@ -115,8 +108,17 @@ def replicate_self(self, model, extra_graphs=None): replicant._observed_names = [v.name for v in observed] return replicant - def __init__(self, model, observed, extra_graphs=None): + """ + Initialize the algorithm + + :param model: the definition of the probabilistic model + :type model: Model + :param observed: A list of observed variables + :type observed: [Variable] + :param extra_graphs: a list of extra FactorGraph used in the inference algorithm. + :type extra_graphs: [FactorGraph] + """ self._model_graph = model self._extra_graphs = extra_graphs if extra_graphs is not None else [] self._graphs = [model] if extra_graphs is None else \ @@ -162,11 +164,15 @@ def graphs(self): def prepare_executor(self, rv_scaling=None): """ - Prepare the creation of an executor. This includes collecting the list of variable transformations and the list of the variables that are inherited from external Gluon blocks, and setting log_pdf_scaling for random variables. + Prepare the creation of an executor. This includes collecting the list of variable transformations and the list + of the variables that are inherited from external Gluon blocks, and setting log_pdf_scaling for random + variables. - :param rv_scaling: The scaling of log_pdf of the random variables that are set by users for data sub-sampling or mini-batch learning. + :param rv_scaling: The scaling of log_pdf of the random variables that are set by users for data sub-sampling + or mini-batch learning. :type rv_scaling: {UUID: float} - :returns: the list of the variable transformations and the list of the variables that are excluded from being set as Gluon block parameters (see the excluded argument of __init__ of ObjectiveBlock). + :returns: the list of the variable transformations and the list of the variables that are excluded from being + set as Gluon block parameters (see the excluded argument of __init__ of ObjectiveBlock). :rtypes: {str(UUID): Transformation}, set(str(UUID)) """ excluded = set() @@ -176,8 +182,6 @@ def prepare_executor(self, rv_scaling=None): for v in g.variables.values(): if v.type == VariableType.PARAMETER and v.transformation is not None: var_trans[v.uuid] = v.transformation - if v.type == VariableType.PARAMETER and v.isInherited: - excluded.add(v.uuid) if v.type == VariableType.RANDVAR: if v.uuid in rv_scaling: v.factor.log_pdf_scaling = rv_scaling[v.uuid] @@ -197,7 +201,8 @@ def create_executor(self, data_def, params, var_ties, rv_scaling=None): :type params: InferenceParameters :param var_ties: A dictionary of variables that are tied and use the MXNet Parameter of the dict value uuid. :type var_ties: { UUID of source variable: UUID of target variable} - :param rv_scaling: The scaling of log_pdf of the random variables that are set by users for data sub-sampling or mini-batch learning. + :param rv_scaling: The scaling of log_pdf of the random variables that are set by users for data sub-sampling + or mini-batch learning. :type rv_scaling: {UUID: float} :returns: the Gluon block computing the outcome of inference :rtype: mxnet.gluon.HybridBlock @@ -230,7 +235,8 @@ def compute(self, F, variables): def set_parameter(self, variables, target_variable, target_value): """ - Set the value of a variable as the artifacts of this inference algorithm. This triggers to set the value to the corresponding variable into InferenceParameters at the end of inference. + Set the value of a variable as the artifacts of this inference algorithm. This triggers to set the value to the + corresponding variable into InferenceParameters at the end of inference. :param variables: the set of MXNet arrays that holds the values of all the variables at runtime. @@ -257,8 +263,7 @@ class SamplingAlgorithm(InferenceAlgorithm): :type num_samples: int :param target_variables: (optional) the target variables to sample :type target_variables: [UUID] - :param extra_graphs: a list of extra FactorGraph used in the inference - algorithm. + :param extra_graphs: a list of extra FactorGraph used in the inference algorithm. :type extra_graphs: [FactorGraph] """ @@ -273,7 +278,8 @@ def compute(self, F, variables): """ The abstract method for the computation of the inference algorithm. - If inference algorithm is used for gradient based optimizations, it should return two values. The first for the loss function, the second the gradient of the loss function. + If inference algorithm is used for gradient based optimizations, it should return two values. + The first for the loss function, the second the gradient of the loss function. :param F: the execution context (mxnet.ndarray or mxnet.symbol) :type F: Python module @@ -281,6 +287,7 @@ def compute(self, F, variables): variables at runtime. :type variables: {str(UUID): MXNet NDArray or MXNet Symbol} :returns: the outcome of the inference algorithm - :rtype: mxnet.ndarray.ndarray.NDArray or mxnet.symbol.symbol.Symbol. If gradient based, will return two values. The first the loss function, the second the gradient of the loss function. + :rtype: mxnet.ndarray.ndarray.NDArray or mxnet.symbol.symbol.Symbol. If gradient based, will return two values. + The first the loss function, the second the gradient of the loss function. """ raise NotImplementedError diff --git a/mxfusion/inference/inference_parameters.py b/mxfusion/inference/inference_parameters.py index 398c309..768ae74 100644 --- a/mxfusion/inference/inference_parameters.py +++ b/mxfusion/inference/inference_parameters.py @@ -14,16 +14,13 @@ import warnings -import numpy as np import mxnet as mx from mxnet import initializer -from mxnet import ndarray from mxnet.gluon import ParameterDict from ..components.variables import VariableType, Variable from ..components import ModelComponent from ..util.inference import realize_shape from ..common.config import get_default_device, get_default_dtype -from ..components.functions.gluon_func_eval import GluonFunctionEvaluation class InferenceParameters(object): @@ -75,18 +72,11 @@ def initialize_params(self, graphs, observed_uuid): self._params = ParameterDict() for g in graphs: - # load in parameterdict from external gluon blocks. - for f in g.functions.values(): - if isinstance(f, GluonFunctionEvaluation): - self._params.update( - f.function.collect_gluon_parameters()) - for var in g.get_constants(): self._constants[var.uuid] = var.constant excluded = set(self._constants.keys()).union(observed_uuid) - for var in g.get_parameters(excluded=excluded, - include_inherited=False): + for var in g.get_parameters(excluded=excluded): var_shape = realize_shape(var.shape, self._constants) init = initializer.Constant(var.initial_value_before_transformation) \ if var.initial_value is not None else None @@ -146,6 +136,10 @@ def initialize_with_carryover_params(self, graphs, observed_uuid, var_ties, # if to_var_uuid in carryover.param_dict} self._params.update(carryover_pairs) + def fix_all(self): + for p in self.param_dict.values(): + p.grad_req = 'null' + @property def param_dict(self): return self._params @@ -161,8 +155,7 @@ def var_ties(self): def __getitem__(self, key, ctx=None): if not isinstance(key, Variable): raise KeyError("The access key of inference parameter needs to be Variable, but got "+str(type(key))+".") - pkey = key.inherited_name if key.isInherited else key.uuid - val = self._params.get(pkey).data(ctx) + val = self._params.get(key.uuid).data(ctx) if key.transformation is not None: val = key.transformation.transform(val) return val diff --git a/mxfusion/inference/meanfield.py b/mxfusion/inference/meanfield.py index 537bec1..68e2350 100644 --- a/mxfusion/inference/meanfield.py +++ b/mxfusion/inference/meanfield.py @@ -25,8 +25,8 @@ def create_Gaussian_meanfield(model, observed, dtype=None): """ Create the Meanfield posterior for Variational Inference. - :param model_graph: the definition of the probabilistic model - :type model_graph: Model + :param model: the definition of the probabilistic model + :type model: Model :param observed: A list of observed variables :type observed: [Variable] :returns: the resulting posterior representation diff --git a/mxfusion/inference/minibatch_loop.py b/mxfusion/inference/minibatch_loop.py index e196824..517cd0c 100644 --- a/mxfusion/inference/minibatch_loop.py +++ b/mxfusion/inference/minibatch_loop.py @@ -14,8 +14,8 @@ import mxnet as mx -from .grad_loop import GradLoop from mxnet.gluon.data import ArrayDataset +from .grad_loop import GradLoop class MinibatchInferenceLoop(GradLoop): @@ -40,7 +40,7 @@ def __init__(self, batch_size=100, rv_scaling=None): if rv_scaling is not None else rv_scaling def run(self, infr_executor, data, param_dict, ctx, optimizer='adam', - learning_rate=1e-3, max_iter=1000, verbose=False): + learning_rate=1e-3, max_iter=1000, verbose=False, update_shape_constants=None): """ :param infr_executor: The MXNet function that computes the training objective. :type infr_executor: MXNet Gluon Block @@ -58,6 +58,8 @@ def run(self, infr_executor, data, param_dict, ctx, optimizer='adam', :type max_iter: int :param verbose: whether to print per-iteration messages. :type verbose: boolean + :param update_shape_constants: The callback function to update the shape constants according to the size of minibatch + :type update_shape_constants: Python function """ if isinstance(data, mx.gluon.data.DataLoader): @@ -74,6 +76,10 @@ def run(self, infr_executor, data, param_dict, ctx, optimizer='adam', L_e = 0 n_batches = 0 for i, data_batch in enumerate(data_loader): + if not isinstance(data_batch, list or tuple): + data_batch = [data_batch] + if update_shape_constants is not None: + update_shape_constants(data_batch) with mx.autograd.record(): loss, loss_for_gradient = infr_executor(mx.nd.zeros(1, ctx=ctx), *data_batch) loss_for_gradient.backward() diff --git a/mxfusion/inference/pilco_alg.py b/mxfusion/inference/pilco_alg.py new file mode 100644 index 0000000..04d83ed --- /dev/null +++ b/mxfusion/inference/pilco_alg.py @@ -0,0 +1,92 @@ +# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# ============================================================================== + +import mxnet as mx + +from .inference_alg import SamplingAlgorithm +from ..common.config import get_default_dtype, get_default_device + + +class PILCOAlgorithm(SamplingAlgorithm): + """ + Sampling-based inference algorithm that returns the expectation of each variable in the model. + + :param model: the definition of the probabilistic model + :type model: Model + :param observed: A list of observed variables + :type observed: [Variable] + :param num_samples: the number of samples used in estimating the variational lower bound + :type num_samples: int + :param target_variables: (optional) the target variables to sample + :type target_variables: [UUID] + :param extra_graphs: a list of extra FactorGraph used in the inference + algorithm. + :type extra_graphs: [FactorGraph] + """ + def __init__(self, model, observed, cost_function, policy, n_time_steps, initial_state_generator, extra_graphs=None, num_samples=3, ctx=None, dtype=None): + """ + :param model: The model to use to generate the next state from a state/action pair. + :param observed: Observed variables for the model. + :param cost_function: The cost function to evaluate state/action pairs on. + :param policy: The policy function to determine what action to take next from a particular state. + :param n_time_steps: How many time steps to roll forward using the model+policy to generate a trajectory. + :param initial_state_generator: Function that generates initial states for the model to begin at. + :param num_samples: How many sample trajectories to compute at once + """ + super(PILCOAlgorithm, self).__init__(model, observed, extra_graphs=extra_graphs) + self.cost_function = cost_function + self.policy = policy + self.initial_state_generator = initial_state_generator + self.n_time_steps = n_time_steps + self.num_samples = num_samples + self.dtype = dtype if dtype is not None else get_default_dtype() + self.mxnet_context = ctx if ctx is not None else get_default_device() + + + def compute(self, F, variables): + """ + Compute the PILCO algorithm's policy computation loop. + + 1. Generates a number of initial state + action pairs + 2. For each state+action pair: + 1. Predict the new state (s_t_plus_1) given the current state and action pair + 2. Compute the cost of being in that state + 3. Use the policy to compute the next action (a_t_plus_1) to take from s_t_plus_1 + 4. Repeat n_time_steps into the future, using the previous round's state/action pairs to roll forward. + 3. Return the total cost of all sample trajectories over time. + + :param F: the execution context (mxnet.ndarray or mxnet.symbol) + :type F: Python module + :param variables: the set of MXNet arrays that holds the values of + variables at runtime. + :type variables: {str(UUID): MXNet NDArray or MXNet Symbol} + :returns: the outcome of the inference algorithm + :rtype: mxnet.NDArray or mxnet.Symbol + """ + s_0 = self.initial_state_generator(self.num_samples) + a_0 = self.policy(s_0) + a_t_plus_1 = a_0 + x_t = F.expand_dims(F.concat(s_0, a_0, dim=1), axis=1) + cost = 0 + for t in range(self.n_time_steps): + variables[self.model.X] = x_t + res = self.model.Y.factor.predict(F, variables, targets=[self.model.Y], num_samples=self.num_samples)[0] + s_t_plus_1 = res[0] + + cost = cost + self.cost_function(s_t_plus_1, a_t_plus_1) + + a_t_plus_1 = mx.nd.expand_dims(self.policy(s_t_plus_1), axis=2) + x_t = mx.nd.concat(s_t_plus_1, a_t_plus_1, dim=2) + total_cost = F.sum(cost) + return total_cost, total_cost diff --git a/mxfusion/inference/prediction.py b/mxfusion/inference/prediction.py index 40634a3..7c86d79 100644 --- a/mxfusion/inference/prediction.py +++ b/mxfusion/inference/prediction.py @@ -21,7 +21,8 @@ class ModulePredictionAlgorithm(SamplingAlgorithm): """ - A prediction algorithm for modules. The algorithm evaluates all the functions, draws samples from distributions and runs the predict method on all the modules. + A prediction algorithm for modules. The algorithm evaluates all the functions, draws samples from distributions and + runs the predict method on all the modules. :param model: the definition of the probabilistic model :type model: Model @@ -61,7 +62,8 @@ def compute(self, F, variables): if all(known): continue elif any(known): - raise InferenceError("Part of the outputs of the distribution " + f.__class__.__name__ + " has been observed!") + raise InferenceError("Part of the outputs of the distribution " + f.__class__.__name__ + + " has been observed!") outcome_uuid = [v.uuid for _, v in f.outputs] outcome = f.draw_samples( F=F, num_samples=self.num_samples, variables=variables, diff --git a/mxfusion/inference/score_function.py b/mxfusion/inference/score_function.py index b82a983..c96c891 100644 --- a/mxfusion/inference/score_function.py +++ b/mxfusion/inference/score_function.py @@ -85,7 +85,8 @@ class ScoreFunctionRBInference(ScoreFunctionInference): """ Implemented following the [Black Box Variational Inference](https://arxiv.org/abs/1401.0118) paper. - The addition of Rao-Blackwellization and Control Variates (RBCV) requires that the posterior passed in be of meanfield form (i.e. all posterior variables independent.) + The addition of Rao-Blackwellization and Control Variates (RBCV) requires that the posterior passed in be of + meanfield form (i.e. all posterior variables independent.) Terminology: Lambda - Posterior parameters @@ -173,7 +174,6 @@ def compute(self, F, variables): gradient_lambda = F.sum(grad) - # Robbins-Monro sequence?? gradient_log_L = gradient_lambda + gradient_theta @@ -181,10 +181,12 @@ def compute(self, F, variables): def _extract_descendant_blanket_params(self, graph, node): """ - Returns a set of the markov blankets of all of the descendants of the node in the graph, mapped to their parameter form. + Returns a set of the markov blankets of all of the descendants of the node in the graph, + mapped to their parameter form. """ if node.graph != graph.components_graph: - raise InferenceError("Graph of node and graph to find it's descendants in differ. These should match so something went wrong.") + raise InferenceError("Graph of node and graph to find it's descendants in differ. " + "These should match so something went wrong.") descendants = graph.get_descendants(node) varset = [graph.get_markov_blanket(d) for d in descendants] diff --git a/mxfusion/inference/variational.py b/mxfusion/inference/variational.py index a438763..eaaa0ab 100644 --- a/mxfusion/inference/variational.py +++ b/mxfusion/inference/variational.py @@ -18,10 +18,8 @@ class VariationalInference(InferenceAlgorithm): """ - The class of the Stochastic Variational Inference (SVI) algorithm. + The base class for Variational Inference (VI) algorithms. - :param num_samples: the number of samples used in estimating the variational lower bound - :type num_samples: int :param model: the definition of the probabilistic model :type model: Model :param posterior: the definition of the variational posterior of the probabilistic model diff --git a/mxfusion/models/factor_graph.py b/mxfusion/models/factor_graph.py index c6fc0c3..b1a5311 100644 --- a/mxfusion/models/factor_graph.py +++ b/mxfusion/models/factor_graph.py @@ -162,8 +162,7 @@ def ordered_factors(self): :rtype: A topologically sorted list of Factors in the graph. """ - return [node for node in nx.topological_sort(self.components_graph) - if isinstance(node, Factor)] + return [node for node in nx.topological_sort(self.components_graph) if isinstance(node, Factor)] @property def roots(self): @@ -192,9 +191,9 @@ def var_ties(self): def log_pdf(self, F, variables, targets=None): """ - Compute the logarithm of the probability/probability density of a set of random variables in the factor graph. The set of random - variables are specified in the "target" argument and any necessary conditional variables are specified in the "conditionals" argument. - Any relevant constants are specified in the "constants" argument. + Compute the logarithm of the probability/probability density of a set of random variables in the factor graph. + The set of random variables are specified in the "target" argument and any necessary conditional variables are + specified in the "conditionals" argument. Any relevant constants are specified in the "constants" argument. :param F: the MXNet computation mode (``mxnet.symbol`` or ``mxnet.ndarray``). :param variables: The set of variables @@ -215,7 +214,9 @@ def log_pdf(self, F, variables, targets=None): outcome_uuid = [v.uuid for _, v in f.outputs] for v, uuid in zip(outcome, outcome_uuid): if uuid in variables: - warnings.warn('Function evaluation in FactorGraph.compute_log_prob_RT: the outcome variable '+str(uuid)+' of the function evaluation '+str(f)+' has already existed in the variable set.') + warnings.warn('Function evaluation in FactorGraph.compute_log_prob_RT: the outcome variable ' + + str(uuid) + ' of the function evaluation ' + str(f) + + ' has already existed in the variable set.') variables[uuid] = v elif isinstance(f, Distribution): if targets is None or f.random_variable.uuid in targets: @@ -232,13 +233,16 @@ def log_pdf(self, F, variables, targets=None): logL = logL + F.sum(expectation(F, f.log_pdf( F=F, variables=variables, targets=module_targets))) else: - raise ModelSpecificationError("There is an object in the factor graph that isn't a factor." + "That shouldn't happen.") + raise ModelSpecificationError("There is an object in the factor graph that isn't a factor." + + "That shouldn't happen.") return logL def draw_samples(self, F, variables, num_samples=1, targets=None): """ - Draw samples from the target variables of the Factor Graph. If the ``targets`` argument is None, draw samples from all the variables - that are *not* in the conditional variables. If the ``targets`` argument is given, this method returns a list of samples of variables in the order of the target argument, otherwise it returns a dict of samples where the keys are the UUIDs of variables and the values are the samples. + Draw samples from the target variables of the Factor Graph. If the ``targets`` argument is None, draw samples + from all the variables that are *not* in the conditional variables. If the ``targets`` argument is given, + this method returns a list of samples of variables in the order of the target argument, otherwise it returns a + dict of samples where the keys are the UUIDs of variables and the values are the samples. :param F: the MXNet computation mode (``mxnet.symbol`` or ``mxnet.ndarray``). :param variables: The set of variables @@ -258,7 +262,9 @@ def draw_samples(self, F, variables, num_samples=1, targets=None): outcome_uuid = [v.uuid for _, v in f.outputs] for v, uuid in zip(outcome, outcome_uuid): if uuid in variables: - warnings.warn('Function evaluation in FactorGraph.draw_samples_RT: the outcome of the function evaluation '+str(f)+' has already existed in the variable set.') + warnings.warn('Function evaluation in FactorGraph.draw_samples_RT: ' + 'the outcome of the function evaluation ' + str(f) + + ' has already existed in the variable set.') variables[uuid] = v samples[uuid] = v elif isinstance(f, Distribution): @@ -266,7 +272,8 @@ def draw_samples(self, F, variables, num_samples=1, targets=None): if all(known): continue elif any(known): - raise InferenceError("Part of the outputs of the distribution " + f.__class__.__name__ + " has been observed!") + raise InferenceError("Part of the outputs of the distribution " + + f.__class__.__name__ + " has been observed!") outcome_uuid = [v.uuid for _, v in f.outputs] outcome = f.draw_samples( F=F, num_samples=num_samples, variables=variables, always_return_tuple=True) @@ -282,7 +289,8 @@ def draw_samples(self, F, variables, num_samples=1, targets=None): variables[uuid] = v samples[uuid] = v else: - raise ModelSpecificationError("There is an object in the factor graph that isn't a factor." + "That shouldn't happen.") + raise ModelSpecificationError("There is an object in the factor graph that isn't a factor." + + "That shouldn't happen.") if targets: return tuple(samples[uuid] for uuid in targets) else: @@ -302,7 +310,7 @@ def remove_component(self, component): try: self.components_graph.remove_node(component) # implicitly removes edges except NetworkXError as e: - raise ModelSpecificationError("Attempted to remove a node "+str(component)+" that isn't in the graph.") + raise ModelSpecificationError("Attempted to remove a node " + str(component) + " that isn't in the graph.") if component.name is not None: @@ -322,14 +330,18 @@ def _replicate_class(self, **kwargs): def get_markov_blanket(self, node): """ - Gets the Markov Blanket for a node, which is the node's predecessors, the nodes successors, and those successors' other predecessors. + Gets the Markov Blanket for a node, which is the node's predecessors, the nodes successors, and those + successors' other predecessors. """ def get_variable_predecessors(node): return [v2 for k1,v1 in node.predecessors for k2,v2 in v1.predecessors if isinstance(v2, Variable)] + def get_variable_successors(node): return [v2 for k1,v1 in node.successors for k2,v2 in v1.successors if isinstance(v2, Variable)] + def flatten(node_list): return set([p for varset in node_list for p in varset]) + successors = set(get_variable_successors(node)) n = set([node]) pred = set(get_variable_predecessors(node)) @@ -381,8 +393,8 @@ def replace_subgraph(self, target_variable, new_subgraph): def extract_distribution_of(self, variable): """ - Extracts the distribution of the variable passed in, returning a replicated copy of the passed in variable with only its parent - subgraph attached (also replicated). + Extracts the distribution of the variable passed in, returning a replicated copy of the passed in variable with + only its parent subgraph attached (also replicated). :param variable: The variable to extract the distribution from. :type variable: Variable @@ -400,14 +412,23 @@ def extract_distribution_function(component): return predecessor_direction, successor_direction return variable.replicate(replication_function=extract_distribution_function) - def clone(self, leaves=None): + """ + Clones a model, maintaining the same functionality and topology. Replicates all of its ModelComponents, + while maintaining the same UUIDs. + + Starts upward from the leaves and copies everything in the graph recursively. + + :param leaves: If None, use the leaves in this model, otherwise use the provided leaves. + :return: the cloned model + """ new_model = self._replicate_class(name=self.name, verbose=self._verbose) return self._clone(new_model, leaves) def _clone(self, new_model, leaves=None): """ - Clones a model, maintaining the same functionality and topology. Replicates all of its ModelComponents, while maintaining the same UUIDs. + Clones a model, maintaining the same functionality and topology. Replicates all of its ModelComponents, + while maintaining the same UUIDs. Starts upward from the leaves and copies everything in the graph recursively. @@ -415,13 +436,12 @@ def _clone(self, new_model, leaves=None): :returns: the cloned model """ - var_map = {} # from old model to new model + var_map = {} # from old model to new model leaves = self.leaves if leaves is None else leaves for v in leaves: if v.name is not None: - new_leaf = v.replicate(var_map=var_map, - replication_function=lambda x: ('recursive', 'recursive')) + new_leaf = v.replicate(var_map=var_map, replication_function=lambda x: ('recursive', 'recursive')) setattr(new_model, v.name, new_leaf) else: v.graph = new_model.graph @@ -430,7 +450,7 @@ def _clone(self, new_model, leaves=None): setattr(new_model, v.name, new_model[v.uuid]) return new_model - def get_parameters(self, excluded=None, include_inherited=False): + def get_parameters(self, excluded=None, include_inherited=True): """ Get all the parameters not in the excluded list. @@ -444,7 +464,8 @@ def get_parameters(self, excluded=None, include_inherited=False): if include_inherited: return [v for v in self.variables.values() if (v.type == VariableType.PARAMETER and v.uuid not in excluded)] else: - return [v for v in self.variables.values() if (v.type == VariableType.PARAMETER and v.uuid not in excluded and not v.isInherited)] + return [v for v in self.variables.values() if (v.type == VariableType.PARAMETER and v.uuid not in excluded + and not v.isInherited)] def get_constants(self): """ @@ -455,44 +476,45 @@ def get_constants(self): """ return [v for v in self.variables.values() if v.type == VariableType.CONSTANT] - @staticmethod - def reconcile_graphs(current_graphs, primary_previous_graph, secondary_previous_graphs=None, primary_current_graph=None): + def reconcile_graphs(current_graphs, primary_previous_graph, secondary_previous_graphs=None, + primary_current_graph=None): """ Reconciles two sets of graphs, matching the model components in the previous graph to the current graph. - This is primarily used when loading back a graph from a file and matching it to an existing in-memory graph in order to load the previous - graph's parameters correctly. - - :param current_graphs: A list of the graphs we are reconciling a loaded factor graph against. This must be a fully built set of graphs - generated through the model definition process. - :param primary_previous_graph: A graph which may have been loaded in from a file and be partially specified, or could be a full graph - built through model definition. - :param secondary_previous_graphs: A list of secondary graphs (e.g. posteriors) that share components with the primary_previous_graph. - :param primary_current_graph: Optional parameter to specify the primary_current_graph, otherwise it is taken to be the model in the - current_graphs (which should be unique). + This is primarily used when loading back a graph from a file and matching it to an existing in-memory graph in + order to load the previous graph's parameters correctly. + + :param current_graphs: A list of the graphs we are reconciling a loaded factor graph against. This must be a + fully built set of graphs generated through the model definition process. + :param primary_previous_graph: A graph which may have been loaded in from a file and be partially specified, or + could be a full graph built through model definition. + :param secondary_previous_graphs: A list of secondary graphs (e.g. posteriors) that share components with the + primary_previous_graph. + :param primary_current_graph: Optional parameter to specify the primary_current_graph, otherwise it is taken to + be the model in the current_graphs (which should be unique). :rtype: {previous ModelComponent : current ModelComponent} """ def update_with_named_components(previous_components, current_components, component_map, nodes_to_traverse_from): - name_pre = {c.name: c for c in previous_components if c.name} - name_cur = {c.name: c for c in current_components if c.name} + name_pre = {c.name: c for c in previous_components if c.name} + name_cur = {c.name: c for c in current_components if c.name} for name, previous_c in name_pre.items(): current_c = name_cur[name] component_map[previous_c.uuid] = current_c.uuid nodes_to_traverse_from[previous_c.uuid] = current_c.uuid - - from .model import Model component_map = {} nodes_to_traverse_from = {} current_graph = primary_current_graph if primary_current_graph is not None else current_graphs[0] secondary_current_graphs = current_graphs[1:] secondary_previous_graphs = secondary_previous_graphs if secondary_previous_graphs is not None else [] if len(secondary_current_graphs) != len(secondary_previous_graphs): - raise ModelSpecificationError("Different number of secondary graphs passed in {} {}".format(secondary_current_graphs, secondary_previous_graphs)) + raise ModelSpecificationError("Different number of secondary graphs passed in {} {}".format( + secondary_current_graphs, secondary_previous_graphs)) - update_with_named_components(primary_previous_graph.components.values(), current_graph.components.values(), component_map, nodes_to_traverse_from) + update_with_named_components(primary_previous_graph.components.values(), current_graph.components.values(), + component_map, nodes_to_traverse_from) # Reconcile the primary graph FactorGraph._reconcile_graph(nodes_to_traverse_from, component_map, @@ -503,7 +525,8 @@ def update_with_named_components(previous_components, current_components, compon secondary_previous_graphs): nodes_to_traverse_from = {pc: cc for pc, cc in component_map.items() if pc in pg.components.keys()} - update_with_named_components(pg.components.values(), cg.components.values(), component_map, nodes_to_traverse_from) + update_with_named_components(pg.components.values(), cg.components.values(), component_map, + nodes_to_traverse_from) FactorGraph._reconcile_graph( nodes_to_traverse_from, component_map, cg, pg) @@ -513,14 +536,16 @@ def update_with_named_components(previous_components, current_components, compon @staticmethod def _reconcile_graph(nodes_to_traverse_from, component_map, current_graph, previous_graph): """ - Traverses the components (breadth first) in nodes_to_traverse_from of the current_graph/previous_graph, matching components where possible and generating - new calls to _reconcile_graph where the graph is still incompletely traversed. This method makes no attempt to resolve ambiguities - in naming between the graphs and request the user to more completely specify names in their graph if such an ambiguity exists. Such + Traverses the components (breadth first) in nodes_to_traverse_from of the current_graph/previous_graph, + matching components where possible and generating new calls to _reconcile_graph where the graph is still + incompletely traversed. This method makes no attempt to resolve ambiguities in naming between the graphs and + request the user to more completely specify names in their graph if such an ambiguity exists. Such naming can be more completely specified by attaching names to each leaf node in the original graph. :param nodes_to_traverse_from: A list of items to traverse the graph upwards from. :type nodes_to_traverse_from: [previous ModelComponents] - :param component_map: The current mapping from the previous graph's MCs to the current_graph's MCs. This is used and modified during reconciliation. + :param component_map: The current mapping from the previous graph's MCs to the current_graph's MCs. + This is used and modified during reconciliation. :type component_map: {previous_graph ModelComponent : current_graph ModelComponent} :param current_graph: The current graph to match components against. :type current_graph: FactorGraph @@ -540,8 +565,10 @@ def reconcile_direction(direction, previous_c, current_c, new_level, component_m for edge_name, node in previous_neighbors: if node.uuid not in component_map: if edge_name in duplicate_names: - # TODO if all the other parts of the ambiguity are resolved, we have the answer still. Otherwise throw an exception - raise Exception("Multiple edges connecting unnamed nodes have the same name, this isn't supported currently.") # TODO Support the ambiguities :) + # TODO if all the other parts of the ambiguity are resolved, we have the answer still. + # Otherwise throw an exception + raise Exception("Multiple edges connecting unnamed nodes have the same name, " + "this isn't supported currently.") # TODO Support the ambiguities :) current_node = [item for name, item in current_neighbors if edge_name == name][0] component_map[node.uuid] = current_node.uuid new_level[node.uuid] = current_node.uuid @@ -550,10 +577,13 @@ def reconcile_direction(direction, previous_c, current_c, new_level, component_m component_map.update(module_component_map) new_level = {} for previous_c, current_c in nodes_to_traverse_from.items(): - reconcile_direction('predecessor', previous_graph[previous_c], current_graph[current_c], new_level, component_map) + reconcile_direction('predecessor', previous_graph[previous_c], current_graph[current_c], new_level, + component_map) """ - TODO Reconciling in both directions currently breaks the reconciliation process and can cause multiple previous_uuid's to map to the same current_uuid. It's unclear why that happens. - This shouldn't be necessary until we implement multi-output Factors though (and even then, only if not all the outputs are in a named chain). + TODO Reconciling in both directions currently breaks the reconciliation process and can cause multiple + previous_uuid's to map to the same current_uuid. It's unclear why that happens. This shouldn't be necessary + until we implement multi-output Factors though (and even then, only if not all the outputs are in a + named chain). """ # reconcile_direction('successor', previous_graph[c], current_graph[current_c], new_level, component_map) if len(new_level) > 0: @@ -575,14 +605,15 @@ def load_from_json(self, json_graph): @staticmethod def load_graphs(graphs_list, existing_graphs=None): """ - Method to load back in a graph. The graphs should have been saved down using the save method, and be a JSON representation of the graph - generated by the [networkx](https://networkx.github.io) library. + Method to load back in a graph. The graphs should have been saved down using the save method, and be a JSON + representation of the graph generated by the [networkx](https://networkx.github.io) library. :param graphs_list: A list of raw json dicts loaded in from memory representing the FactorGraphs to create. :type graphs_list: list of dicts loaded in using the ModelComponentDecoder class. """ import json - existing_graphs = existing_graphs if existing_graphs is not None else [FactorGraph(graph['name']) for graph in graphs_list] + existing_graphs = existing_graphs if existing_graphs is not None else [FactorGraph(graph['name']) + for graph in graphs_list] return [existing_graph.load_from_json(graph) for existing_graph, graph in zip(existing_graphs, graphs_list)] def as_json(self): @@ -598,8 +629,8 @@ def as_json(self): @staticmethod def save(graph_file, json_graphs): """ - Method to save this graph down into a file. The graph file will be saved down as a JSON representation of the graph generated by the - [networkx](https://networkx.github.io) library. + Method to save this graph down into a file. The graph file will be saved down as a JSON representation of the + graph generated by the [networkx](https://networkx.github.io) library. :param graph_file: The file containing the primary model to load back for this inference algorithm. :type graph_file: str of filename diff --git a/mxfusion/modules/gp_modules/__init__.py b/mxfusion/modules/gp_modules/__init__.py index 14ee835..b4a9bb5 100644 --- a/mxfusion/modules/gp_modules/__init__.py +++ b/mxfusion/modules/gp_modules/__init__.py @@ -28,6 +28,6 @@ __all__ = ['gp_regression', 'sparsegp_regression', 'svgp_regression'] -from .gp_regression import GPRegression -from .sparsegp_regression import SparseGPRegression +from .gp_regression import GPRegression, GPRegressionSamplingPrediction +from .sparsegp_regression import SparseGPRegression, SparseGPRegressionSamplingPrediction from .svgp_regression import SVGPRegression diff --git a/mxfusion/modules/gp_modules/gp_regression.py b/mxfusion/modules/gp_modules/gp_regression.py index a3de449..7ba80c6 100644 --- a/mxfusion/modules/gp_modules/gp_regression.py +++ b/mxfusion/modules/gp_modules/gp_regression.py @@ -40,6 +40,7 @@ def __init__(self, model, posterior, observed, jitter=0.): self.jitter = jitter def compute(self, F, variables): + has_mean = self.model.F.factor.has_mean X = variables[self.model.X] Y = variables[self.model.Y] noise_var = variables[self.model.noise_var] @@ -59,8 +60,8 @@ def compute(self, F, variables): self.jitter L = F.linalg.potrf(K) - if self.model.mean_func is not None: - mean = self.model.mean_func(F, X) + if has_mean: + mean = variables[self.model.mean] Y = Y - mean LinvY = F.linalg.trsm(L, Y) logdet_l = F.linalg.sumlogdiag(F.abs(L)) @@ -89,6 +90,18 @@ def __init__(self, model, observed, num_samples=1, target_variables=None, rand_gen def compute(self, F, variables): + """ + The method for the computation of the sampling algorithm + + :param F: the execution context (mxnet.ndarray or mxnet.symbol) + :type F: Python module + :param variables: the set of MXNet arrays that holds the values of + variables at runtime. + :type variables: {str(UUID): MXNet NDArray or MXNet Symbol} + :returns: the outcome of the inference algorithm + :rtype: mxnet.ndarray.ndarray.NDArray or mxnet.symbol.symbol.Symbol + """ + has_mean = self.model.F.factor.has_mean X = variables[self.model.X] noise_var = variables[self.model.noise_var] N = X.shape[-2] @@ -110,8 +123,8 @@ def compute(self, F, variables): dtype=self.model.F.factor.dtype) y_samples = F.linalg.trmm(L, die) - if self.model.mean_func is not None: - mean = self.model.mean_func(F, X) + if has_mean: + mean = variables[self.model.mean] y_samples = y_samples + mean samples = {self.model.Y.uuid: y_samples} @@ -131,6 +144,18 @@ def __init__(self, model, posterior, observed, noise_free=True, self.diagonal_variance = diagonal_variance def compute(self, F, variables): + """ + The method for the computation of the sampling algorithm + + :param F: the execution context (mxnet.ndarray or mxnet.symbol) + :type F: Python module + :param variables: the set of MXNet arrays that holds the values of + variables at runtime. + :type variables: {str(UUID): MXNet NDArray or MXNet Symbol} + :returns: the outcome of the inference algorithm + :rtype: mxnet.ndarray.ndarray.NDArray or mxnet.symbol.symbol.Symbol + """ + has_mean = self.model.F.factor.has_mean X = variables[self.model.X] N = X.shape[-2] noise_var = variables[self.model.noise_var] @@ -147,8 +172,8 @@ def compute(self, F, variables): LinvKxt = F.linalg.trsm(L, Kxt) mu = F.linalg.gemm2(LinvKxt, LinvY, True, False) - if self.model.mean_func is not None: - mean = self.model.mean_func(F, X) + if has_mean: + mean = variables[self.model.mean] mu = mu + mean if self.diagonal_variance: @@ -172,6 +197,9 @@ def compute(self, F, variables): class GPRegressionSamplingPrediction(SamplingAlgorithm): + """ + The method for drawing samples from the posterior distribution of a Gaussian process regression model. + """ def __init__(self, model, posterior, observed, rand_gen=None, noise_free=True, diagonal_variance=True, jitter=0.): super(GPRegressionSamplingPrediction, self).__init__( @@ -183,6 +211,18 @@ def __init__(self, model, posterior, observed, rand_gen=None, self.jitter = jitter def compute(self, F, variables): + """ + The method for the computation of the sampling algorithm + + :param F: the execution context (mxnet.ndarray or mxnet.symbol) + :type F: Python module + :param variables: the set of MXNet arrays that holds the values of + variables at runtime. + :type variables: {str(UUID): MXNet NDArray or MXNet Symbol} + :returns: the outcome of the inference algorithm + :rtype: mxnet.ndarray.ndarray.NDArray or mxnet.symbol.symbol.Symbol + """ + has_mean = self.model.F.factor.has_mean X = variables[self.model.X] N = X.shape[-2] noise_var = variables[self.model.noise_var] @@ -199,8 +239,8 @@ def compute(self, F, variables): LinvKxt = F.linalg.trsm(L, Kxt) mu = F.linalg.gemm2(LinvKxt, LinvY, True, False) - if self.model.mean_func is not None: - mean = self.model.mean_func(F, X) + if has_mean: + mean = variables[self.model.mean] mu = mu + mean if self.diagonal_variance: @@ -247,8 +287,8 @@ class GPRegression(Module): :type kernel: Kernel :param noise_var: the variance of the Gaussian likelihood :type noise_var: Variable - :param mean_func: the mean function of Gaussian process. - :type mean_func: MXFusionFunction + :param mean: the mean of Gaussian process. + :type mean: Variable :param rand_gen: the random generator (default: MXNetRandomGenerator). :type rand_gen: RandomGenerator :param dtype: the data type for float point numbers. @@ -257,7 +297,7 @@ class GPRegression(Module): :type ctx: None or mxnet.cpu or mxnet.gpu """ - def __init__(self, X, kernel, noise_var, mean_func=None, rand_gen=None, + def __init__(self, X, kernel, noise_var, mean=None, rand_gen=None, dtype=None, ctx=None): if not isinstance(X, Variable): X = Variable(value=X) @@ -265,21 +305,26 @@ def __init__(self, X, kernel, noise_var, mean_func=None, rand_gen=None, noise_var = Variable(value=noise_var) inputs = [('X', X), ('noise_var', noise_var)] input_names = [k for k, _ in inputs] + if mean is not None: + inputs.append(('mean', mean)) + input_names.append('mean') + self._has_mean = True + else: + self._has_mean = False output_names = ['random_variable'] super(GPRegression, self).__init__( inputs=inputs, outputs=None, input_names=input_names, output_names=output_names, rand_gen=rand_gen, dtype=dtype, ctx=ctx) - self.mean_func = mean_func self.kernel = kernel - def _generate_outputs(self, output_shapes=None): + def _generate_outputs(self, output_shapes): """ Generate the output of the module with given output_shapes. - :param output_shape: the shapes of all the output variables - :type output_shape: {str: tuple} + :param output_shapes: the shapes of all the output variables + :type output_shapes: {str: tuple} """ - if output_shapes is None: + if output_shapes['random_variable'] is None: Y_shape = self.X.shape[:-1] + (1,) else: Y_shape = output_shapes['random_variable'] @@ -293,15 +338,19 @@ def _build_module_graphs(self): graph = Model(name='gp_regression') graph.X = self.X.replicate_self() graph.noise_var = self.noise_var.replicate_self() + if self._has_mean: + mean = self.mean.replicate_self() + graph.mean = mean + else: + mean = None graph.F = GaussianProcess.define_variable( X=graph.X, kernel=self.kernel, shape=Y.shape, - mean_func=self.mean_func, rand_gen=self._rand_gen, + mean=mean, rand_gen=self._rand_gen, dtype=self.dtype, ctx=self.ctx) graph.Y = Y.replicate_self() graph.Y.set_prior(Normal( mean=graph.F, variance=broadcast_to(graph.noise_var, graph.Y.shape), rand_gen=self._rand_gen, dtype=self.dtype, ctx=self.ctx)) - graph.mean_func = self.mean_func graph.kernel = graph.F.factor.kernel # The posterior graph is used to store parameters for prediction post = Posterior(graph) @@ -321,8 +370,7 @@ def _attach_default_inference_algorithms(self): [v for k, v in self.outputs] self.attach_log_pdf_algorithms( targets=self.output_names, conditionals=self.input_names, - algorithm=GPRegressionLogPdf(self._module_graph, self._extra_graphs[0], - observed), + algorithm=GPRegressionLogPdf(self._module_graph, self._extra_graphs[0], observed), alg_name='gp_log_pdf') observed = [v for k, v in self.inputs] @@ -339,7 +387,7 @@ def _attach_default_inference_algorithms(self): alg_name='gp_predict') @staticmethod - def define_variable(X, kernel, noise_var, shape=None, mean_func=None, + def define_variable(X, kernel, noise_var, shape=None, mean=None, rand_gen=None, dtype=None, ctx=None): """ Creates and returns a variable drawn from a Gaussian process regression. @@ -354,8 +402,8 @@ def define_variable(X, kernel, noise_var, shape=None, mean_func=None, :param shape: the shape of the random variable(s) (the default shape is the same shape as *X* but the last dimension is changed to one.) :type shape: tuple or [tuple] - :param mean_func: the mean function of Gaussian process - :type mean_func: MXFusionFunction + :param mean: the mean of Gaussian process. + :type mean: Variable :param rand_gen: the random generator (default: MXNetRandomGenerator) :type rand_gen: RandomGenerator :param dtype: the data type for float point numbers @@ -364,7 +412,7 @@ def define_variable(X, kernel, noise_var, shape=None, mean_func=None, :type ctx: None or mxnet.cpu or mxnet.gpu """ gp = GPRegression( - X=X, kernel=kernel, noise_var=noise_var, mean_func=mean_func, + X=X, kernel=kernel, noise_var=noise_var, mean=mean, rand_gen=rand_gen, dtype=dtype, ctx=ctx) gp._generate_outputs({'random_variable': shape}) return gp.random_variable @@ -376,5 +424,5 @@ def replicate_self(self, attribute_map=None): rep = super(GPRegression, self).replicate_self(attribute_map) rep.kernel = self.kernel.replicate_self(attribute_map) - rep.mean_func = None if self.mean_func is None else self.mean_func.replicate_self(attribute_map) + rep._has_mean = self._has_mean return rep diff --git a/mxfusion/modules/gp_modules/sparsegp_regression.py b/mxfusion/modules/gp_modules/sparsegp_regression.py index 3379243..57b7e48 100644 --- a/mxfusion/modules/gp_modules/sparsegp_regression.py +++ b/mxfusion/modules/gp_modules/sparsegp_regression.py @@ -40,6 +40,18 @@ def __init__(self, model, posterior, observed, jitter=0.): self.jitter = jitter def compute(self, F, variables): + """ + The method for the computation of the sampling algorithm + + :param F: the execution context (mxnet.ndarray or mxnet.symbol) + :type F: Python module + :param variables: the set of MXNet arrays that holds the values of + variables at runtime. + :type variables: {str(UUID): MXNet NDArray or MXNet Symbol} + :returns: the outcome of the inference algorithm + :rtype: mxnet.ndarray.ndarray.NDArray or mxnet.symbol.symbol.Symbol + """ + has_mean = self.model.F.factor.has_mean X = variables[self.model.X] Y = variables[self.model.Y] Z = variables[self.model.inducing_inputs] @@ -69,8 +81,8 @@ def compute(self, F, variables): F.broadcast_div(F.linalg.syrk(LinvKuf), noise_var_m) LA = F.linalg.potrf(A) - if self.model.mean_func is not None: - mean = self.model.mean_func(F, X) + if has_mean: + mean = variables[self.model.mean] Y = Y - mean LAInvLinvKufY = F.linalg.trsm(LA, F.linalg.gemm2(LinvKuf, Y)) @@ -105,6 +117,18 @@ def __init__(self, model, posterior, observed, target_variables=None, self.diagonal_variance = diagonal_variance def compute(self, F, variables): + """ + The method for the computation of the sampling algorithm + + :param F: the execution context (mxnet.ndarray or mxnet.symbol) + :type F: Python module + :param variables: the set of MXNet arrays that holds the values of + variables at runtime. + :type variables: {str(UUID): MXNet NDArray or MXNet Symbol} + :returns: the outcome of the inference algorithm + :rtype: mxnet.ndarray.ndarray.NDArray or mxnet.symbol.symbol.Symbol + """ + has_mean = self.model.F.factor.has_mean X = variables[self.model.X] N = X.shape[-2] Z = variables[self.model.inducing_inputs] @@ -121,8 +145,8 @@ def compute(self, F, variables): Kxt = kern.K(F, Z, X, **kern_params) mu = F.linalg.gemm2(Kxt, wv, True, False) - if self.model.mean_func is not None: - mean = self.model.mean_func(F, X) + if has_mean: + mean = variables[self.model.mean] mu = mu + mean LinvKxt = F.linalg.trsm(L, Kxt) @@ -162,6 +186,18 @@ def __init__(self, model, posterior, observed, rand_gen=None, self.jitter = jitter def compute(self, F, variables): + """ + The method for the computation of the sampling algorithm + + :param F: the execution context (mxnet.ndarray or mxnet.symbol) + :type F: Python module + :param variables: the set of MXNet arrays that holds the values of + variables at runtime. + :type variables: {str(UUID): MXNet NDArray or MXNet Symbol} + :returns: the outcome of the inference algorithm + :rtype: mxnet.ndarray.ndarray.NDArray or mxnet.symbol.symbol.Symbol + """ + has_mean = self.model.F.factor.has_mean X = variables[self.model.X] N = X.shape[-2] Z = variables[self.model.inducing_inputs] @@ -178,8 +214,8 @@ def compute(self, F, variables): Kxt = kern.K(F, Z, X, **kern_params) mu = F.linalg.gemm2(Kxt, wv, True, False) - if self.model.mean_func is not None: - mean = self.model.mean_func(F, X) + if has_mean: + mean = variables[self.model.mean] mu = mu + mean LinvKxt = F.linalg.trsm(L, Kxt) @@ -231,12 +267,13 @@ class SparseGPRegression(Module): :type kernel: Kernel :param noise_var: the variance of the Gaussian likelihood :type noise_var: Variable - :param inducing_inputs: the inducing inputs of the sparse GP (optional). This variable will be auto-generated if not specified. + :param inducing_inputs: the inducing inputs of the sparse GP (optional). This variable will be auto-generated + if not specified. :type inducing_inputs: Variable - :param inducing_num: the number of inducing points of sparse GP (default: 10) - :type inducing_num: int - :param mean_func: the mean function of Gaussian process. - :type mean_func: MXFusionFunction + :param num_inducing: the number of inducing points of sparse GP (default: 10) + :type num_inducing: int + :param mean: the mean of Gaussian process. + :type mean: Variable :param rand_gen: the random generator (default: MXNetRandomGenerator). :type rand_gen: RandomGenerator :param dtype: the data type for float point numbers. @@ -246,7 +283,7 @@ class SparseGPRegression(Module): """ def __init__(self, X, kernel, noise_var, inducing_inputs=None, - num_inducing=10, mean_func=None, + num_inducing=10, mean=None, rand_gen=None, dtype=None, ctx=None): if not isinstance(X, Variable): X = Variable(value=X) @@ -257,21 +294,26 @@ def __init__(self, X, kernel, noise_var, inducing_inputs=None, inputs = [('X', X), ('inducing_inputs', inducing_inputs), ('noise_var', noise_var)] input_names = [k for k, _ in inputs] + if mean is not None: + inputs.append(('mean', mean)) + input_names.append('mean') + self._has_mean = True + else: + self._has_mean = False output_names = ['random_variable'] super(SparseGPRegression, self).__init__( inputs=inputs, outputs=None, input_names=input_names, output_names=output_names, rand_gen=rand_gen, dtype=dtype, ctx=ctx) - self.mean_func = mean_func self.kernel = kernel def _generate_outputs(self, output_shapes=None): """ Generate the output of the module with given output_shapes. - :param output_shape: the shapes of all the output variables - :type output_shape: {str: tuple} + :param output_shapes: the shapes of all the output variables + :type output_shapes: {str: tuple} """ - if output_shapes is None: + if output_shapes['random_variable'] is None: Y_shape = self.X.shape[:-1] + (1,) else: Y_shape = output_shapes['random_variable'] @@ -290,25 +332,24 @@ def _build_module_graphs(self): graph.U = GaussianProcess.define_variable( X=graph.inducing_inputs, kernel=self.kernel, shape=(graph.inducing_inputs.shape[0], Y.shape[-1]), - mean_func=self.mean_func, rand_gen=self._rand_gen, dtype=self.dtype, - ctx=self.ctx) + rand_gen=self._rand_gen, dtype=self.dtype, ctx=self.ctx) + if self._has_mean: + mean = self.mean.replicate_self() + graph.mean = mean + else: + mean = None graph.F = ConditionalGaussianProcess.define_variable( X=graph.X, X_cond=graph.inducing_inputs, Y_cond=graph.U, - kernel=self.kernel, shape=Y.shape, mean_func=self.mean_func, + kernel=self.kernel, shape=Y.shape, mean=mean, rand_gen=self._rand_gen, dtype=self.dtype, ctx=self.ctx) graph.Y = Y.replicate_self() graph.Y.set_prior(Normal( mean=graph.F, variance=broadcast_to(graph.noise_var, graph.Y.shape), rand_gen=self._rand_gen, dtype=self.dtype, ctx=self.ctx)) - graph.mean_func = self.mean_func graph.kernel = graph.U.factor.kernel post = Posterior(graph) - # TODO: allow cloning kernel to be in both model and posterior. - # post.F.assign_factor(ConditionalGaussianProcess( - # X=post.X, X_cond=post.inducing_inputs, Y_cond=post.U, - # kernel=self.kernel, mean_func=self.mean_func, - # rand_gen=self.rand_gen, dtype=self.dtype, ctx=self.ctx)) - # post.U.assign_factor(MultivariateNormal()) + # The posterior graph here is used as the place holder + # intermediate inference results, which will be used for prediction. post.L = Variable(shape=(M, M)) post.LA = Variable(shape=(M, M)) post.wv = Variable(shape=(M, Y.shape[-1])) @@ -325,7 +366,8 @@ def _attach_default_inference_algorithms(self): [v for k, v in self.outputs] self.attach_log_pdf_algorithms( targets=self.output_names, conditionals=self.input_names, - algorithm=SparseGPRegressionLogPdf(self._module_graph, self._extra_graphs[0], observed), alg_name='sgp_log_pdf') + algorithm=SparseGPRegressionLogPdf(self._module_graph, self._extra_graphs[0], observed), + alg_name='sgp_log_pdf') observed = [v for k, v in self.inputs] self.attach_draw_samples_algorithms( @@ -342,7 +384,7 @@ def _attach_default_inference_algorithms(self): @staticmethod def define_variable(X, kernel, noise_var, shape=None, inducing_inputs=None, - num_inducing=10, mean_func=None, rand_gen=None, + num_inducing=10, mean=None, rand_gen=None, dtype=None, ctx=None): """ Creates and returns a variable drawn from a sparse Gaussian process regression. @@ -356,12 +398,13 @@ def define_variable(X, kernel, noise_var, shape=None, inducing_inputs=None, :param shape: the shape of the random variable(s) (the default shape is the same shape as *X* but the last dimension is changed to one.) :type shape: tuple or [tuple] - :param inducing_inputs: the inducing inputs of the sparse GP (optional). This variable will be auto-generated if not specified. + :param inducing_inputs: the inducing inputs of the sparse GP (optional). This variable will be auto-generated + if not specified. :type inducing_inputs: Variable - :param inducing_num: the number of inducing points of sparse GP (default: 10) - :type inducing_num: int - :param mean_func: the mean function of Gaussian process. - :type mean_func: MXFusionFunction + :param num_inducing: the number of inducing points of sparse GP (default: 10) + :type num_inducing: int + :param mean: the mean of Gaussian process. + :type mean: Variable :param rand_gen: the random generator (default: MXNetRandomGenerator). :type rand_gen: RandomGenerator :param dtype: the data type for float point numbers. @@ -372,7 +415,7 @@ def define_variable(X, kernel, noise_var, shape=None, inducing_inputs=None, gp = SparseGPRegression( X=X, kernel=kernel, noise_var=noise_var, inducing_inputs=inducing_inputs, num_inducing=num_inducing, - mean_func=mean_func, rand_gen=rand_gen, dtype=dtype, ctx=ctx) + mean=mean, rand_gen=rand_gen, dtype=dtype, ctx=ctx) gp._generate_outputs({'random_variable': shape}) return gp.random_variable @@ -383,5 +426,5 @@ def replicate_self(self, attribute_map=None): rep = super(SparseGPRegression, self).replicate_self(attribute_map) rep.kernel = self.kernel.replicate_self(attribute_map) - rep.mean_func = None if self.mean_func is None else self.mean_func.replicate_self(attribute_map) + rep._has_mean = self._has_mean return rep diff --git a/mxfusion/modules/gp_modules/svgp_regression.py b/mxfusion/modules/gp_modules/svgp_regression.py index 5161d74..bfbda08 100644 --- a/mxfusion/modules/gp_modules/svgp_regression.py +++ b/mxfusion/modules/gp_modules/svgp_regression.py @@ -31,7 +31,8 @@ class SVGPRegressionLogPdf(VariationalInference): """ - The inference algorithm for computing the variational lower bound of the stochastic variational Gaussian process with Gaussian likelihood. + The inference algorithm for computing the variational lower bound of the stochastic variational Gaussian process + with Gaussian likelihood. """ def __init__(self, model, posterior, observed, jitter=0.): super(SVGPRegressionLogPdf, self).__init__( @@ -40,6 +41,7 @@ def __init__(self, model, posterior, observed, jitter=0.): self.jitter = jitter def compute(self, F, variables): + has_mean = self.model.F.factor.has_mean X = variables[self.model.X] Y = variables[self.model.Y] Z = variables[self.model.inducing_inputs] @@ -56,7 +58,13 @@ def compute(self, F, variables): X, Y, Z, noise_var, mu, S_W, S_diag, kern_params = arrays_as_samples( F, [X, Y, Z, noise_var, mu, S_W, S_diag, kern_params]) - noise_var_m = F.expand_dims(noise_var, axis=-2) + if noise_var.ndim == 2: # it is heteroscedastic noise, when ndim == 3 + noise_var = F.expand_dims(noise_var, axis=-2) + + if noise_var.shape[-1] == 1: + beta_sum = D*F.sum(1/noise_var, axis=-1) + else: + beta_sum = F.sum(1/noise_var, axis=-1) Kuu = kern.K(F, Z, **kern_params) if self.jitter > 0.: @@ -67,33 +75,35 @@ def compute(self, F, variables): S = F.linalg.syrk(S_W) + make_diagonal(F, S_diag) - if self.model.mean_func is not None: - mean = self.model.mean_func(F, X) + if has_mean: + mean = variables[self.model.mean] Y = Y - mean - psi1Y = F.linalg.gemm2(Kuf, Y, False, False) + psi1Y = F.linalg.gemm2(Kuf, Y/noise_var, False, False) L = F.linalg.potrf(Kuu) Ls = F.linalg.potrf(S) LinvLs = F.linalg.trsm(L, Ls) Linvmu = F.linalg.trsm(L, mu) LinvKuf = F.linalg.trsm(L, Kuf) - LinvKufY = F.linalg.trsm(L, psi1Y)/noise_var_m - LmInvPsi2LmInvT = F.linalg.syrk(LinvKuf)/noise_var_m - LinvSLinvT = F.linalg.syrk(LinvLs) - LmInvSmuLmInvT = LinvSLinvT*D + F.linalg.syrk(Linvmu) + KfuKuuInvmu = F.linalg.gemm2(LinvKuf, Linvmu, True, False) + KfuKuuInvLs = F.linalg.gemm2(LinvKuf, LinvLs, True, False) + + LinvKufY = F.linalg.trsm(L, psi1Y) KL_u = (M/2. + F.linalg.sumlogdiag(Ls))*D - F.linalg.sumlogdiag(L)*D\ - F.sum(F.sum(F.square(LinvLs), axis=-1), axis=-1)/2.*D \ - F.sum(F.sum(F.square(Linvmu), axis=-1), axis=-1)/2. logL = -F.sum(F.sum(F.square(Y)/noise_var + np.log(2. * np.pi) + - F.log(noise_var_m), axis=-1), axis=-1)/2. - logL = logL - D/2.*F.sum(Kff_diag/noise_var, axis=-1) - logL = logL - F.sum(F.sum(LmInvSmuLmInvT*LmInvPsi2LmInvT, axis=-1), + F.log(noise_var), axis=-1), axis=-1)/2. + logL = logL - F.sum(Kff_diag*beta_sum, axis=-1)/2. + logL = logL - F.sum(F.sum(F.square(KfuKuuInvmu)/noise_var, axis=-1), + axis=-1)/2. + logL = logL - F.sum(F.sum(F.square(KfuKuuInvLs)*F.expand_dims(beta_sum, axis=-1), axis=-1), + axis=-1)/2. + logL = logL + F.sum(F.sum(F.square(LinvKuf)*F.expand_dims(beta_sum, axis=-2), axis=-1), axis=-1)/2. - logL = logL + F.sum(F.sum(F.square(LinvKuf)/noise_var_m, axis=-1), - axis=-1)*D/2. logL = logL + F.sum(F.sum(Linvmu*LinvKufY, axis=-1), axis=-1) logL = self.log_pdf_scaling*logL + KL_u return logL @@ -109,6 +119,18 @@ def __init__(self, model, posterior, observed, noise_free=True, self.diagonal_variance = diagonal_variance def compute(self, F, variables): + """ + The method for the computation of the sampling algorithm + + :param F: the execution context (mxnet.ndarray or mxnet.symbol) + :type F: Python module + :param variables: the set of MXNet arrays that holds the values of + variables at runtime. + :type variables: {str(UUID): MXNet NDArray or MXNet Symbol} + :returns: the outcome of the inference algorithm + :rtype: mxnet.ndarray.ndarray.NDArray or mxnet.symbol.symbol.Symbol + """ + has_mean = self.model.F.factor.has_mean X = variables[self.model.X] N = X.shape[-2] Z = variables[self.model.inducing_inputs] @@ -135,8 +157,8 @@ def compute(self, F, variables): Kxt = kern.K(F, Z, X, **kern_params) mu = F.linalg.gemm2(Kxt, wv, True, False) - if self.model.mean_func is not None: - mean = self.model.mean_func(F, X) + if has_mean: + mean = variables[self.model.mean] mu = mu + mean LinvKxt = F.linalg.trsm(L, Kxt) @@ -145,15 +167,19 @@ def compute(self, F, variables): tmp = F.linalg.gemm2(LinvSLinvT, LinvKxt) var = Ktt - F.sum(F.square(LinvKxt), axis=-2) + \ F.sum(tmp*LinvKxt, axis=-2) + var = F.expand_dims(var, axis=-1) if not self.noise_free: - var += noise_var + var = var + noise_var else: Ktt = kern.K(F, X, **kern_params) tmp = F.linalg.gemm2(LinvSLinvT, LinvKxt) var = Ktt - F.linalg.syrk(LinvKxt, True) + \ F.linalg.gemm2(LinvKxt, tmp, True, False) + var = F.expand_dims(var, axis=-1) if not self.noise_free: - var += F.eye(N, dtype=X.dtype) * noise_var + var = var + \ + F.reshape(F.eye(N, dtype=X.dtype), shape=(1, N, N, 1)) * \ + F.expand_dims(noise_var, axis=-2) outcomes = {self.model.Y.uuid: (mu, var)} @@ -175,6 +201,18 @@ def __init__(self, model, posterior, observed, rand_gen=None, self.jitter = jitter def compute(self, F, variables): + """ + The method for the computation of the sampling algorithm + + :param F: the execution context (mxnet.ndarray or mxnet.symbol) + :type F: Python module + :param variables: the set of MXNet arrays that holds the values of + variables at runtime. + :type variables: {str(UUID): MXNet NDArray or MXNet Symbol} + :returns: the outcome of the inference algorithm + :rtype: mxnet.ndarray.ndarray.NDArray or mxnet.symbol.symbol.Symbol + """ + has_mean = self.model.F.factor.has_mean X = variables[self.model.X] N = X.shape[-2] Z = variables[self.model.inducing_inputs] @@ -186,6 +224,9 @@ def compute(self, F, variables): kern = self.model.kernel kern_params = kern.fetch_parameters(variables) + X, Z, noise_var, mu, S_W, S_diag, kern_params = arrays_as_samples( + F, [X, Z, noise_var, mu, S_W, S_diag, kern_params]) + S = F.linalg.syrk(S_W) + make_diagonal(F, S_diag) Kuu = kern.K(F, Z, **kern_params) @@ -201,8 +242,8 @@ def compute(self, F, variables): Kxt = kern.K(F, Z, X, **kern_params) mu = F.linalg.gemm2(Kxt, wv, True, False) - if self.model.mean_func is not None: - mean = self.model.mean_func(F, X) + if has_mean: + mean = variables[self.model.mean] mu = mu + mean LinvKxt = F.linalg.trsm(L, Kxt) @@ -213,7 +254,8 @@ def compute(self, F, variables): F.sum(tmp*LinvKxt, axis=-2) if not self.noise_free: var += noise_var - die = self._rand_gen.sample_normal(shape=(self.num_samples,) + mu.shape[1:], dtype=self.model.F.factor.dtype) + die = self._rand_gen.sample_normal(shape=(self.num_samples,) + mu.shape[1:], + dtype=self.model.F.factor.dtype) samples = mu + die * F.sqrt(F.expand_dims(var, axis=-1)) else: Ktt = kern.K(F, X, **kern_params) @@ -250,12 +292,13 @@ class SVGPRegression(Module): :type kernel: Kernel :param noise_var: the variance of the Gaussian likelihood :type noise_var: Variable - :param inducing_inputs: the inducing inputs of the sparse GP (optional). This variable will be auto-generated if not specified. + :param inducing_inputs: the inducing inputs of the sparse GP (optional). This variable will be auto-generated + if not specified. :type inducing_inputs: Variable - :param inducing_num: the number of inducing points of sparse GP (default: 10) - :type inducing_num: int - :param mean_func: the mean function of Gaussian process. - :type mean_func: MXFusionFunction + :param num_inducing: the number of inducing points of sparse GP (default: 10) + :type num_inducing: int + :param mean: the mean of Gaussian process. + :type mean: Variable :param rand_gen: the random generator (default: MXNetRandomGenerator). :type rand_gen: RandomGenerator :param dtype: the data type for float point numbers. @@ -265,32 +308,39 @@ class SVGPRegression(Module): """ def __init__(self, X, kernel, noise_var, inducing_inputs=None, - num_inducing=10, mean_func=None, + num_inducing=10, mean=None, rand_gen=None, dtype=None, ctx=None): if not isinstance(X, Variable): X = Variable(value=X) if not isinstance(noise_var, Variable): noise_var = Variable(value=noise_var) if inducing_inputs is None: - inducing_inputs = Variable(shape=(num_inducing, kernel.input_dim)) + inducing_inputs = Variable( + shape=(num_inducing, kernel.input_dim), + initial_value=np.random.randn(num_inducing, kernel.input_dim)) inputs = [('X', X), ('inducing_inputs', inducing_inputs), ('noise_var', noise_var)] input_names = [k for k, _ in inputs] + if mean is not None: + inputs.append(('mean', mean)) + input_names.append('mean') + self._has_mean = True + else: + self._has_mean = False output_names = ['random_variable'] super(SVGPRegression, self).__init__( inputs=inputs, outputs=None, input_names=input_names, output_names=output_names, dtype=dtype, ctx=ctx) - self.mean_func = mean_func self.kernel = kernel def _generate_outputs(self, output_shapes=None): """ Generate the output of the module with given output_shapes. - :param output_shape: the shapes of all the output variables - :type output_shape: {str: tuple} + :param output_shapes: the shapes of all the output variables + :type output_shapes: {str: tuple} """ - if output_shapes is None: + if output_shapes['random_variable'] is None: Y_shape = self.X.shape[:-1] + (1,) else: Y_shape = output_shapes['random_variable'] @@ -309,17 +359,20 @@ def _build_module_graphs(self): graph.U = GaussianProcess.define_variable( X=graph.inducing_inputs, kernel=self.kernel, shape=(graph.inducing_inputs.shape[0], Y.shape[-1]), - mean_func=self.mean_func, rand_gen=self._rand_gen, dtype=self.dtype, - ctx=self.ctx) + rand_gen=self._rand_gen, dtype=self.dtype, ctx=self.ctx) + if self._has_mean: + mean = self.mean.replicate_self() + graph.mean = mean + else: + mean = None graph.F = ConditionalGaussianProcess.define_variable( X=graph.X, X_cond=graph.inducing_inputs, Y_cond=graph.U, - kernel=self.kernel, shape=Y.shape, mean_func=self.mean_func, + kernel=self.kernel, shape=Y.shape, mean=mean, rand_gen=self._rand_gen, dtype=self.dtype, ctx=self.ctx) graph.Y = Y.replicate_self() graph.Y.set_prior(Normal( mean=graph.F, variance=broadcast_to(graph.noise_var, graph.Y.shape), rand_gen=self._rand_gen, dtype=self.dtype, ctx=self.ctx)) - graph.mean_func = self.mean_func graph.kernel = graph.U.factor.kernel post = Posterior(graph) post.qU_cov_diag = Variable(shape=(M,), transformation=PositiveTransformation()) @@ -357,10 +410,11 @@ def _attach_default_inference_algorithms(self): @staticmethod def define_variable(X, kernel, noise_var, shape=None, inducing_inputs=None, - num_inducing=10, mean_func=None, rand_gen=None, + num_inducing=10, mean=None, rand_gen=None, dtype=None, ctx=None): """ - Creates and returns a variable drawn from a Stochastic variational sparse Gaussian process regression with Gaussian likelihood. + Creates and returns a variable drawn from a Stochastic variational sparse Gaussian process regression with + Gaussian likelihood. :param X: the input variables on which the random variables are conditioned. :type X: Variable @@ -371,12 +425,13 @@ def define_variable(X, kernel, noise_var, shape=None, inducing_inputs=None, :param shape: the shape of the random variable(s) (the default shape is the same shape as *X* but the last dimension is changed to one.) :type shape: tuple or [tuple] - :param inducing_inputs: the inducing inputs of the sparse GP (optional). This variable will be auto-generated if not specified. + :param inducing_inputs: the inducing inputs of the sparse GP (optional). This variable will be auto-generated + if not specified. :type inducing_inputs: Variable - :param inducing_num: the number of inducing points of sparse GP (default: 10) - :type inducing_num: int - :param mean_func: the mean function of Gaussian process. - :type mean_func: MXFusionFunction + :param num_inducing: the number of inducing points of sparse GP (default: 10) + :type num_inducing: int + :param mean: the mean of Gaussian process. + :type mean: Variable :param rand_gen: the random generator (default: MXNetRandomGenerator). :type rand_gen: RandomGenerator :param dtype: the data type for float point numbers. @@ -387,7 +442,7 @@ def define_variable(X, kernel, noise_var, shape=None, inducing_inputs=None, gp = SVGPRegression( X=X, kernel=kernel, noise_var=noise_var, inducing_inputs=inducing_inputs, num_inducing=num_inducing, - mean_func=mean_func, rand_gen=rand_gen, dtype=dtype, ctx=ctx) + mean=mean, rand_gen=rand_gen, dtype=dtype, ctx=ctx) gp._generate_outputs({'random_variable': shape}) return gp.random_variable @@ -398,5 +453,5 @@ def replicate_self(self, attribute_map=None): rep = super(SVGPRegression, self).replicate_self(attribute_map) rep.kernel = self.kernel.replicate_self(attribute_map) - rep.mean_func = None if self.mean_func is None else self.mean_func.replicate_self(attribute_map) + rep._has_mean = self._has_mean return rep diff --git a/mxfusion/modules/module.py b/mxfusion/modules/module.py index 588b762..bbedbd4 100644 --- a/mxfusion/modules/module.py +++ b/mxfusion/modules/module.py @@ -55,8 +55,7 @@ def __init__(self, inputs, outputs, input_names, super(Module, self).__init__( inputs=inputs, outputs=outputs, input_names=input_names, output_names=output_names) - self._rand_gen = MXNetRandomGenerator if rand_gen is None else \ - rand_gen + self._rand_gen = MXNetRandomGenerator if rand_gen is None else rand_gen self.dtype = get_default_dtype() if dtype is None else dtype self.ctx = ctx self._module_graph = None @@ -77,18 +76,20 @@ def __getitem__(self, key): if key in g: return g[key] return self._module_graph[key] + def _generate_outputs(self, output_shapes): """ Generate the output of the module with given output_shapes. - :param output_shape: the shapes of all the output variables - :type output_shape: {str: tuple} + :param output_shapes: the shapes of all the output variables + :type output_shapes: {str: tuple} """ raise NotImplementedError def _build_module_graphs(self): """ - The internal method for constructing the internal factor graphs of the module. This method needs to be overridden by specific probabilistic modules. + The internal method for constructing the internal factor graphs of the module. + This method needs to be overridden by specific probabilistic modules. :returns: model, extra factor graphs :rtypes: Model, [FactorGraph] @@ -97,19 +98,20 @@ def _build_module_graphs(self): def _attach_default_inference_algorithms(self): """ - The internal method for attaching default inference algorithms of the module. This method needs to be overridden by specific probabilistic modules. + The internal method for attaching default inference algorithms of the module. + This method needs to be overridden by specific probabilistic modules. """ raise NotImplementedError def set_outputs(self, variables): """ - This method overrides the set_outputs method of Factor. It triggers the initialization produces of a probabilistic module including building the factor graphs and attaching default inference algorithms. + This method overrides the set_outputs method of Factor. It triggers the initialization produces of a + probabilistic module including building the factor graphs and attaching default inference algorithms. :param variables: The list of variables to be set as the outputs of the module - :type variable: Variable or (Variable,) + :type variables: Variable or (Variable,) """ - variables = [variables] if not isinstance(variables, (list, tuple)) \ - else variables + variables = [variables] if not isinstance(variables, (list, tuple)) else variables outputs = {name: variable for name, variable in zip(self.output_names, variables)} self.successors = [(k, v) for k, v in outputs.items()] @@ -188,8 +190,7 @@ def get_names_from_uuid(self, uuids): return tuple(sorted([uuid_to_names[uuid] for uuid in uuids if uuid in uuid_to_names])) - def attach_log_pdf_algorithms(self, targets, conditionals, algorithm, - alg_name=None): + def attach_log_pdf_algorithms(self, targets, conditionals, algorithm, alg_name=None): """ Attach an inference algorithm for computing the log_pdf of the module. @@ -200,11 +201,12 @@ def attach_log_pdf_algorithms(self, targets, conditionals, algorithm, :param algorithm: the inference algorithm to compute log probability of the module. :type algorithm: InferenceAlgorithm + :param alg_name: The name of the algorithm + :type alg_name: str """ self._attach_algorithm(self._log_pdf_algorithms, targets, conditionals, algorithm, alg_name) - def attach_draw_samples_algorithms(self, targets, conditionals, algorithm, - alg_name=None): + def attach_draw_samples_algorithms(self, targets, conditionals, algorithm, alg_name=None): """ Attach an inference algorithm for drawing samples from the module. @@ -214,12 +216,12 @@ def attach_draw_samples_algorithms(self, targets, conditionals, algorithm, :type conditionals: tuple of str :param algorithm: the inference algorithm to draw samples of the chosen target variables from the module. :type algorithm: InferenceAlgorithm + :param alg_name: The name of the algorithm + :type alg_name: str """ self._attach_algorithm(self._draw_samples_algorithms, targets, conditionals, algorithm, alg_name) - - def attach_prediction_algorithms(self, targets, conditionals, algorithm, - alg_name=None): + def attach_prediction_algorithms(self, targets, conditionals, algorithm, alg_name=None): """ Attach an inference algorithm for prediction from the module. @@ -229,6 +231,8 @@ def attach_prediction_algorithms(self, targets, conditionals, algorithm, :type conditionals: tuple of str :param algorithm: the inference algorithm to predict the chosen target variables from the module. :type algorithm: InferenceAlgorithm + :param alg_name: The name of the algorithm + :type alg_name: str """ self._attach_algorithm(self._prediction_algorithms, targets, conditionals, algorithm, alg_name) @@ -257,7 +261,8 @@ def _preprocess_attach_parameters(self, targets, conditionals): def _set_algorithm_name(self, alg_name, algorithm): """ - Sets the attribute of self with the algorithm name, overriding an old algorithm that had the same name. If something other than an InferenceAlgorithm has that name, prints a warning and returns None for alg_name. + Sets the attribute of self with the algorithm name, overriding an old algorithm that had the same name. + If something other than an InferenceAlgorithm has that name, prints a warning and returns None for alg_name. """ from ..inference.inference_alg import InferenceAlgorithm @@ -267,14 +272,17 @@ def _set_algorithm_name(self, alg_name, algorithm): elif isinstance(getattr(self, alg_name), InferenceAlgorithm): setattr(self, alg_name, algorithm) else: - warnings.warn('Something ({}) in this module ({}) is already using the attribute \"{}\". Skipping setting that name to the algorithm.'.format(str(getattr(self, alg_name)),str(self), str(alg_name))) + warnings.warn('Something ({}) in this module ({}) is already using the attribute \"{}\". ' + 'Skipping setting that name to the algorithm.'.format(str(getattr(self, alg_name)), + str(self), str(alg_name))) alg_name = None return alg_name def _attach_duplicate_conditional_algorithm(self, algorithms, targets, conditionals, algorithm, alg_name): """ Mutates the algorithms object, adding the new algorithm to it. - Also removes the name of an old inference algorithm if it had the same (targets, conditional) pair as the new algorithm. + Also removes the name of an old inference algorithm if it had the same (targets, conditional) + pair as the new algorithm. """ methods = algorithms[conditionals] no_match = True @@ -295,9 +303,9 @@ def _attach_duplicate_conditional_algorithm(self, algorithms, targets, condition def log_pdf(self, F, variables, targets=None): """ - Compute the logarithm of the probability/probability density of a set of random variables in the Module. The set of random - variables are specified in the "target" argument and any necessary conditional variables are specified in the "conditionals" argument. - Any relevant constants are specified in the "constants" argument. + Compute the logarithm of the probability/probability density of a set of random variables in the Module. + The set of random variables are specified in the "target" argument and any necessary conditional variables + are specified in the "conditionals" argument. Any relevant constants are specified in the "constants" argument. :param F: the MXNet computation mode (``mxnet.symbol`` or ``mxnet.ndarray``). :param variables: The set of variables @@ -307,15 +315,18 @@ def log_pdf(self, F, variables, targets=None): :returns: the sum of the log probability of all the target variables. :rtype: mxnet NDArray or mxnet Symbol """ - alg = self._get_algorithm_for_target_conditional_pair(self._log_pdf_algorithms, targets, variables, exact_match=True) + alg = self._get_algorithm_for_target_conditional_pair(self._log_pdf_algorithms, targets, variables, + exact_match=True) alg.log_pdf_scaling = self.log_pdf_scaling result = alg.compute(F, variables) return result def draw_samples(self, F, variables, num_samples=1, targets=None): """ - Draw samples from the target variables of the Module. If the ``targets`` argument is None, draw samples from all the variables - that are *not* in the conditional variables. If the ``targets`` argument is given, this method returns a list of samples of variables in the order of the target argument, otherwise it returns a dict of samples where the keys are the UUIDs of variables and the values are the samples. + Draw samples from the target variables of the Module. If the ``targets`` argument is None, draw samples from + all the variables that are *not* in the conditional variables. If the ``targets`` argument is given, this + method returns a list of samples of variables in the order of the target argument, otherwise it returns a dict + of samples where the keys are the UUIDs of variables and the values are the samples. :param F: the MXNet computation mode (``mxnet.symbol`` or ``mxnet.ndarray``). :param variables: The set of variables @@ -339,14 +350,15 @@ def predict(self, F, variables, num_samples=1, targets=None): :param F: the MXNet computation mode (``mxnet.symbol`` or ``mxnet.ndarray``). :param variables: The set of variables :type variables: {UUID : MXNet NDArray or MXNet Symbol} - :param num_samples: The number of samples to draw for the target variables if sampling is used for prediction. (optional) + :param num_samples: The number of samples to draw for the target variables if sampling is used for prediction. + (optional) :type num_samples: int :param targets: a list of Variables to predict. :type targets: [UUID] :returns: the sum of the log probability of all the target variables. :rtype: mxnet NDArray or mxnet Symbol """ - alg = self._get_algorithm_for_target_conditional_pair(self._prediction_algorithms, targets, variables) + alg = self._get_algorithm_for_target_conditional_pair(self._prediction_algorithms, targets, variables, exact_match=True) alg.num_samples = num_samples alg.target_variables = targets return alg.compute(F, variables) @@ -354,14 +366,16 @@ def predict(self, F, variables, num_samples=1, targets=None): def _get_algorithm_for_target_conditional_pair(self, algorithms, targets, variables, exact_match=False): """ Searches through the algorithms to find the right algorithm for the target/conditional pair. - :param exact_match: This indicates whether the targets passed in must be precisely those in the algorithm, or whether a subset of targets will suffice. + :param exact_match: This indicates whether the targets passed in must be precisely those in the algorithm, + or whether a subset of targets will suffice. """ if targets is None: target_names = tuple(sorted(self.output_names.copy())) else: target_names = self.get_names_from_uuid(targets) conditionals_names = self.get_names_from_uuid(variables.keys()) - conditionals_names = conditionals_names if not exact_match else tuple(sorted(set(conditionals_names) - set(target_names))) + conditionals_names = conditionals_names if not exact_match else \ + tuple(sorted(set(conditionals_names) - set(target_names))) if conditionals_names in algorithms: algs = algorithms[conditionals_names] @@ -372,15 +386,21 @@ def _get_algorithm_for_target_conditional_pair(self, algorithms, targets, variab if exact_match and target_names == set(t): return alg - raise ModelSpecificationError("The targets-conditionals pattern for draw_samples computation "+str((target_names, conditionals_names))+" cannot find a matched inference algorithm.") + raise ModelSpecificationError("The targets-conditionals pattern for draw_samples computation " + + str((target_names, conditionals_names)) + + " cannot find a matched inference algorithm.") def prepare_executor(self, rv_scaling=None): """ - Prepare the creation of an executor. This includes collecting the list of variable transformations and the list of the variables that are inherited from external Gluon blocks, and setting log_pdf_scaling for random variables. + Prepare the creation of an executor. This includes collecting the list of variable transformations and the list + of the variables that are inherited from external Gluon blocks, and setting log_pdf_scaling for + random variables. - :param rv_scaling: The scaling of log_pdf of the random variables that are set by users for data sub-sampling or mini-batch learning. + :param rv_scaling: The scaling of log_pdf of the random variables that are set by users for data sub-sampling + or mini-batch learning. :type rv_scaling: {UUID: float} - :returns: the list of the variable transformations and the list of the variables that are excluded from being set as Gluon block parameters (see the excluded argument of __init__ of ObjectiveBlock). + :returns: the list of the variable transformations and the list of the variables that are excluded from being + set as Gluon block parameters (see the excluded argument of __init__ of ObjectiveBlock). :rtypes: {str(UUID): Transformation}, set(str(UUID)) """ excluded = set() @@ -390,8 +410,6 @@ def prepare_executor(self, rv_scaling=None): for v in g.variables.values(): if v.type == VariableType.PARAMETER and v.transformation is not None: var_trans[v.uuid] = v.transformation - if v.type == VariableType.PARAMETER and v.isInherited: - excluded.add(v.uuid) if v.type == VariableType.RANDVAR: if v.uuid in rv_scaling: v.factor.log_pdf_scaling = rv_scaling[v.uuid] @@ -407,8 +425,11 @@ def _clone_algorithms(self, algorithms, replicant): for conditionals, algorithms in algorithms.items(): for targets, algorithm, alg_name in algorithms: graphs_index = {g: i for i,g in enumerate(self._extra_graphs)} - extra_graphs = [replicant._extra_graphs[graphs_index[graph]] for graph in algorithm.graphs if graph in graphs_index] - algs[conditionals] = (targets, algorithm.replicate_self(replicant._module_graph, extra_graphs), alg_name) + extra_graphs = [replicant._extra_graphs[graphs_index[graph]] for graph in algorithm.graphs + if graph in graphs_index] + algs[conditionals] = (targets, + algorithm.replicate_self(replicant._module_graph, extra_graphs), + alg_name) return algs def reconcile_with_module(self, previous_module): @@ -417,7 +438,9 @@ def reconcile_with_module(self, previous_module): primary_previous_graph = previous_module._module_graph secondary_previous_graphs = previous_module._extra_graphs primary_current_graph = self._module_graph - component_map = FactorGraph.reconcile_graphs(current_graphs, primary_previous_graph, secondary_previous_graphs=secondary_previous_graphs, primary_current_graph=primary_current_graph) + component_map = FactorGraph.reconcile_graphs(current_graphs, primary_previous_graph, + secondary_previous_graphs=secondary_previous_graphs, + primary_current_graph=primary_current_graph) return component_map def replicate_self(self, attribute_map=None): @@ -445,10 +468,10 @@ def load_module(self, module_json): from ..models import FactorGraph self._module_graph = FactorGraph(module_json['graphs'][0]['name']).load_from_json(module_json['graphs'][0]) if len(module_json['graphs']) > 1: - self._extra_graphs = [FactorGraph(extra_graph['name']).load_from_json(extra_graph) for extra_graph in module_json['graphs'][1:]] + self._extra_graphs = [FactorGraph(extra_graph['name']).load_from_json(extra_graph) + for extra_graph in module_json['graphs'][1:]] return self - def as_json(self): mod_dict = super(Module, self).as_json() graphs = [g.as_json()for g in [self._module_graph] + self._extra_graphs] diff --git a/mxfusion/util/testutils.py b/mxfusion/util/testutils.py index f699d04..bf9b9a8 100644 --- a/mxfusion/util/testutils.py +++ b/mxfusion/util/testutils.py @@ -215,3 +215,18 @@ def plot_bivariate(samples, dist, buffer=0, **kwargs): ax.contour(x, y, z, levels=10, linewidth=10) ax.scatter(samples[:, 0], samples[:, 1], alpha=0.05) plt.show() + + +def make_spd_matrix(dim): + """ + Generate a random symmetric, positive-definite matrix. + + :param dim: The matrix dimension (matrix is square). + :type dim: int + :return X: The random symmetric, positive-definite matrix. + :rtype: array of shape [n_dim, n_dim] + """ + A = np.random.rand(dim, dim) + U, s, V = np.linalg.svd(np.dot(A.T, A)) + X = np.dot(np.dot(U, 1.0 + np.diag(np.random.rand(dim))), V) + return X diff --git a/requirements/test_requirements.txt b/requirements/test_requirements.txt index 4c7d901..2553dea 100644 --- a/requirements/test_requirements.txt +++ b/requirements/test_requirements.txt @@ -6,5 +6,4 @@ pytest-cov>=2.5.1 scipy>=1.1.0 GPy>=1.9.6 matplotlib -scikit-learn>=0.20.0 mxnet>=1.3 diff --git a/testing/components/distributions/gp/cond_gp_test.py b/testing/components/distributions/gp/cond_gp_test.py index 1cacbb8..b82cbb3 100644 --- a/testing/components/distributions/gp/cond_gp_test.py +++ b/testing/components/distributions/gp/cond_gp_test.py @@ -15,14 +15,17 @@ import pytest import mxnet as mx +import mxnet.gluon.nn as nn import numpy as np from mxfusion.models import Model from mxfusion.components.variables.runtime_variable import array_has_samples, get_num_samples from mxfusion.components.distributions import ConditionalGaussianProcess from mxfusion.components.distributions.gp.kernels import RBF from mxfusion.components.variables import Variable +from mxfusion.components.functions import MXFusionGluonFunction from mxfusion.util.testutils import prepare_mxnet_array from mxfusion.util.testutils import MockMXNetRandomGenerator +from mxfusion.common.exceptions import ModelSpecificationError from scipy.stats import multivariate_normal import matplotlib matplotlib.use('Agg') @@ -88,6 +91,78 @@ def test_log_pdf(self, dtype, X, X_isSamples, X_cond, X_cond_isSamples, Y_cond, assert get_num_samples(mx.nd, log_pdf_rt) == num_samples assert np.allclose(log_pdf_np, log_pdf_rt) + @pytest.mark.parametrize("dtype, X, X_isSamples, X_cond, X_cond_isSamples, Y_cond, Y_cond_isSamples, rbf_lengthscale, rbf_lengthscale_isSamples, rbf_variance, rbf_variance_isSamples, rv, rv_isSamples, num_samples", [ + (np.float64, np.random.rand(5,2), False, np.random.rand(8,2), False, np.random.rand(8,1), False, np.random.rand(2)+0.1, False, np.random.rand(1)+0.1, False, np.random.rand(3,5,1), True, 3), + (np.float64, np.random.rand(3,5,2), True, np.random.rand(8,2), False, np.random.rand(8,1), False, np.random.rand(2)+0.1, False, np.random.rand(1)+0.1, False, np.random.rand(5,1), False, 3), + (np.float64, np.random.rand(3,5,2), True, np.random.rand(8,2), False, np.random.rand(3,8,1), True, np.random.rand(3,2)+0.1, True, np.random.rand(3,1)+0.1, True, np.random.rand(3,5,1), True, 3), + (np.float64, np.random.rand(5,2), False, np.random.rand(8,2), False, np.random.rand(8,1), False, np.random.rand(2)+0.1, False, np.random.rand(1)+0.1, False, np.random.rand(5,1), False, 1), + ]) + def test_log_pdf_w_mean(self, dtype, X, X_isSamples, X_cond, X_cond_isSamples, Y_cond, Y_cond_isSamples, rbf_lengthscale, rbf_lengthscale_isSamples, rbf_variance, rbf_variance_isSamples, + rv, rv_isSamples, num_samples): + net = nn.HybridSequential(prefix='nn_') + with net.name_scope(): + net.add(nn.Dense(rv.shape[-1], flatten=False, activation="tanh", + in_units=X.shape[-1], dtype=dtype)) + net.initialize(mx.init.Xavier(magnitude=3)) + + from scipy.linalg.lapack import dtrtrs + X_mx = prepare_mxnet_array(X, X_isSamples, dtype) + X_cond_mx = prepare_mxnet_array(X_cond, X_cond_isSamples, dtype) + Y_cond_mx = prepare_mxnet_array(Y_cond, Y_cond_isSamples, dtype) + rbf_lengthscale_mx = prepare_mxnet_array(rbf_lengthscale, rbf_lengthscale_isSamples, dtype) + rbf_variance_mx = prepare_mxnet_array(rbf_variance, rbf_variance_isSamples, dtype) + rv_mx = prepare_mxnet_array(rv, rv_isSamples, dtype) + rv_shape = rv.shape[1:] if rv_isSamples else rv.shape + mean_mx = net(X_mx) + mean_np = mean_mx.asnumpy() + mean_cond_mx = net(X_cond_mx) + mean_cond_np = mean_cond_mx.asnumpy() + + rbf = RBF(2, True, 1., 1., 'rbf', None, dtype) + X_var = Variable(shape=(5,2)) + X_cond_var = Variable(shape=(8,2)) + Y_cond_var = Variable(shape=(8,1)) + mean_func = MXFusionGluonFunction(net, num_outputs=1, + broadcastable=True) + mean_var = mean_func(X_var) + mean_cond_var = mean_func(X_cond_var) + gp = ConditionalGaussianProcess.define_variable(X=X_var, X_cond=X_cond_var, Y_cond=Y_cond_var, mean=mean_var, mean_cond=mean_cond_var, kernel=rbf, shape=rv_shape, dtype=dtype).factor + + variables = {gp.X.uuid: X_mx, gp.X_cond.uuid: X_cond_mx, gp.Y_cond.uuid: Y_cond_mx, gp.rbf_lengthscale.uuid: rbf_lengthscale_mx, gp.rbf_variance.uuid: rbf_variance_mx, gp.random_variable.uuid: rv_mx, gp.mean.uuid: mean_mx, gp.mean_cond.uuid: mean_cond_mx} + log_pdf_rt = gp.log_pdf(F=mx.nd, variables=variables).asnumpy() + + log_pdf_np = [] + for i in range(num_samples): + X_i = X[i] if X_isSamples else X + X_cond_i = X_cond[i] if X_cond_isSamples else X_cond + Y_cond_i = Y_cond[i] if Y_cond_isSamples else Y_cond + Y_cond_i = Y_cond_i - mean_cond_np[i] if X_cond_isSamples else Y_cond_i - mean_cond_np[0] + lengthscale_i = rbf_lengthscale[i] if rbf_lengthscale_isSamples else rbf_lengthscale + variance_i = rbf_variance[i] if rbf_variance_isSamples else rbf_variance + rv_i = rv[i] if rv_isSamples else rv + rv_i = rv_i - mean_np[i] if X_isSamples else rv_i - mean_np[0] + rbf_np = GPy.kern.RBF(input_dim=2, ARD=True) + rbf_np.lengthscale = lengthscale_i + rbf_np.variance = variance_i + K_np = rbf_np.K(X_i) + Kc_np = rbf_np.K(X_cond_i, X_i) + Kcc_np = rbf_np.K(X_cond_i) + + L = np.linalg.cholesky(Kcc_np) + LInvY = dtrtrs(L, Y_cond_i, lower=1, trans=0)[0] + LinvKxt = dtrtrs(L, Kc_np, lower=1, trans=0)[0] + + mu = LinvKxt.T.dot(LInvY) + cov = K_np - LinvKxt.T.dot(LinvKxt) + log_pdf_np.append(multivariate_normal.logpdf(rv_i[:,0], mean=mu[:,0], cov=cov)) + log_pdf_np = np.array(log_pdf_np) + isSamples_any = any([X_isSamples, rbf_lengthscale_isSamples, rbf_variance_isSamples, rv_isSamples]) + assert np.issubdtype(log_pdf_rt.dtype, dtype) + assert array_has_samples(mx.nd, log_pdf_rt) == isSamples_any + if isSamples_any: + assert get_num_samples(mx.nd, log_pdf_rt) == num_samples + assert np.allclose(log_pdf_np, log_pdf_rt) + @pytest.mark.parametrize("dtype, X, X_isSamples, X_cond, X_cond_isSamples, Y_cond, Y_cond_isSamples, rbf_lengthscale, rbf_lengthscale_isSamples, rbf_variance, rbf_variance_isSamples, rv_shape, num_samples", [ (np.float64, np.random.rand(5,2), False, np.random.rand(8,2), False, np.random.rand(8,1), False, np.random.rand(2)+0.1, False, np.random.rand(1)+0.1, False, (5,1), 3), (np.float64, np.random.rand(3,5,2), True, np.random.rand(3,8,2), True, np.random.rand(8,1), False, np.random.rand(2)+0.1, False, np.random.rand(1)+0.1, False, (5,1), 3), @@ -142,7 +217,77 @@ def test_draw_samples(self, dtype, X, X_isSamples, X_cond, X_cond_isSamples, Y_c samples_np = np.array(samples_np) assert np.issubdtype(samples_rt.dtype, dtype) assert get_num_samples(mx.nd, samples_rt) == num_samples - print(samples_np, samples_rt) + assert np.allclose(samples_np, samples_rt) + + @pytest.mark.parametrize("dtype, X, X_isSamples, X_cond, X_cond_isSamples, Y_cond, Y_cond_isSamples, rbf_lengthscale, rbf_lengthscale_isSamples, rbf_variance, rbf_variance_isSamples, rv_shape, num_samples", [ + (np.float64, np.random.rand(5,2), False, np.random.rand(8,2), False, np.random.rand(8,1), False, np.random.rand(2)+0.1, False, np.random.rand(1)+0.1, False, (5,1), 3), + (np.float64, np.random.rand(3,5,2), True, np.random.rand(3,8,2), True, np.random.rand(8,1), False, np.random.rand(2)+0.1, False, np.random.rand(1)+0.1, False, (5,1), 3), + (np.float64, np.random.rand(3,5,2), True, np.random.rand(3,8,2), True, np.random.rand(3,8,1), True, np.random.rand(3,2)+0.1, True, np.random.rand(3,1)+0.1, True, (5,1), 3), + (np.float64, np.random.rand(5,2), False, np.random.rand(8,2), False, np.random.rand(8,1), False, np.random.rand(2)+0.1, False, np.random.rand(1)+0.1, False, (5,1), 1), + ]) + def test_draw_samples_w_mean(self, dtype, X, X_isSamples, X_cond, X_cond_isSamples, Y_cond, Y_cond_isSamples, rbf_lengthscale, rbf_lengthscale_isSamples, rbf_variance, rbf_variance_isSamples, + rv_shape, num_samples): + net = nn.HybridSequential(prefix='nn_') + with net.name_scope(): + net.add(nn.Dense(rv_shape[-1], flatten=False, activation="tanh", + in_units=X.shape[-1], dtype=dtype)) + net.initialize(mx.init.Xavier(magnitude=3)) + + from scipy.linalg.lapack import dtrtrs + X_mx = prepare_mxnet_array(X, X_isSamples, dtype) + X_cond_mx = prepare_mxnet_array(X_cond, X_cond_isSamples, dtype) + Y_cond_mx = prepare_mxnet_array(Y_cond, Y_cond_isSamples, dtype) + rbf_lengthscale_mx = prepare_mxnet_array(rbf_lengthscale, rbf_lengthscale_isSamples, dtype) + rbf_variance_mx = prepare_mxnet_array(rbf_variance, rbf_variance_isSamples, dtype) + mean_mx = net(X_mx) + mean_np = mean_mx.asnumpy() + mean_cond_mx = net(X_cond_mx) + mean_cond_np = mean_cond_mx.asnumpy() + + rand = np.random.randn(num_samples, *rv_shape) + rand_gen = MockMXNetRandomGenerator(mx.nd.array(rand.flatten(), dtype=dtype)) + + rbf = RBF(2, True, 1., 1., 'rbf', None, dtype) + X_var = Variable(shape=(5,2)) + X_cond_var = Variable(shape=(8,2)) + Y_cond_var = Variable(shape=(8,1)) + mean_func = MXFusionGluonFunction(net, num_outputs=1, + broadcastable=True) + mean_var = mean_func(X_var) + mean_cond_var = mean_func(X_cond_var) + gp = ConditionalGaussianProcess.define_variable(X=X_var, X_cond=X_cond_var, Y_cond=Y_cond_var, mean=mean_var, mean_cond=mean_cond_var, kernel=rbf, shape=rv_shape, dtype=dtype, rand_gen=rand_gen).factor + + variables = {gp.X.uuid: X_mx, gp.X_cond.uuid: X_cond_mx, gp.Y_cond.uuid: Y_cond_mx, gp.rbf_lengthscale.uuid: rbf_lengthscale_mx, gp.rbf_variance.uuid: rbf_variance_mx, gp.mean.uuid: mean_mx, gp.mean_cond.uuid: mean_cond_mx} + samples_rt = gp.draw_samples(F=mx.nd, variables=variables, num_samples=num_samples).asnumpy() + + samples_np = [] + for i in range(num_samples): + X_i = X[i] if X_isSamples else X + X_cond_i = X_cond[i] if X_cond_isSamples else X_cond + Y_cond_i = Y_cond[i] if Y_cond_isSamples else Y_cond + Y_cond_i = Y_cond_i - mean_cond_np[i] if X_cond_isSamples else Y_cond_i - mean_cond_np[0] + lengthscale_i = rbf_lengthscale[i] if rbf_lengthscale_isSamples else rbf_lengthscale + variance_i = rbf_variance[i] if rbf_variance_isSamples else rbf_variance + rand_i = rand[i] + rbf_np = GPy.kern.RBF(input_dim=2, ARD=True) + rbf_np.lengthscale = lengthscale_i + rbf_np.variance = variance_i + K_np = rbf_np.K(X_i) + Kc_np = rbf_np.K(X_cond_i, X_i) + Kcc_np = rbf_np.K(X_cond_i) + + L = np.linalg.cholesky(Kcc_np) + LInvY = dtrtrs(L, Y_cond_i, lower=1, trans=0)[0] + LinvKxt = dtrtrs(L, Kc_np, lower=1, trans=0)[0] + + mu = LinvKxt.T.dot(LInvY) + cov = K_np - LinvKxt.T.dot(LinvKxt) + L_cov_np = np.linalg.cholesky(cov) + sample_np = mu + L_cov_np.dot(rand_i) + samples_np.append(sample_np) + samples_np = np.array(samples_np)+mean_np + assert np.issubdtype(samples_rt.dtype, dtype) + assert get_num_samples(mx.nd, samples_rt) == num_samples assert np.allclose(samples_np, samples_rt) @pytest.mark.parametrize("dtype, X, X_isSamples, X_cond, X_cond_isSamples, Y_cond, Y_cond_isSamples, rbf_lengthscale, rbf_lengthscale_isSamples, rbf_variance, rbf_variance_isSamples, rv, rv_isSamples, num_samples", [ @@ -200,3 +345,24 @@ def test_clone_cond_gp(self, dtype, X, X_isSamples, X_cond, X_cond_isSamples, Y_ if isSamples_any: assert get_num_samples(mx.nd, log_pdf_rt) == num_samples assert np.allclose(log_pdf_np, log_pdf_rt) + + def test_mean_argument(self): + + with pytest.raises(ModelSpecificationError): + dtype='float64' + + net = nn.HybridSequential(prefix='nn_') + with net.name_scope(): + net.add(nn.Dense(1, flatten=False, activation="tanh", + in_units=2, dtype=dtype)) + net.initialize(mx.init.Xavier(magnitude=3)) + + rbf = RBF(2, True, 1., 1., 'rbf', None, dtype) + X_var = Variable(shape=(5, 2)) + X_cond_var = Variable(shape=(8, 2)) + Y_cond_var = Variable(shape=(8, 1)) + mean_func = MXFusionGluonFunction(net, num_outputs=1, + broadcastable=True) + mean_var = mean_func(X_var) + mean_cond_var = mean_func(X_cond_var) + gp = ConditionalGaussianProcess.define_variable(X=X_var, X_cond=X_cond_var, Y_cond=Y_cond_var, mean_cond=mean_cond_var, kernel=rbf, shape=(5, 1), dtype=dtype) diff --git a/testing/components/distributions/gp/gp_test.py b/testing/components/distributions/gp/gp_test.py index 28b99c5..caa64b1 100644 --- a/testing/components/distributions/gp/gp_test.py +++ b/testing/components/distributions/gp/gp_test.py @@ -15,12 +15,14 @@ import pytest import mxnet as mx +import mxnet.gluon.nn as nn import numpy as np from mxfusion.models import Model from mxfusion.components.variables.runtime_variable import array_has_samples, get_num_samples from mxfusion.components.distributions import GaussianProcess from mxfusion.components.distributions.gp.kernels import RBF from mxfusion.components import Variable +from mxfusion.components.functions import MXFusionGluonFunction from mxfusion.util.testutils import prepare_mxnet_array from mxfusion.util.testutils import MockMXNetRandomGenerator from scipy.stats import multivariate_normal @@ -72,6 +74,61 @@ def test_log_pdf(self, dtype, X, X_isSamples, rbf_lengthscale, rbf_lengthscale_i assert get_num_samples(mx.nd, log_pdf_rt) == num_samples assert np.allclose(log_pdf_np, log_pdf_rt) + + @pytest.mark.parametrize("dtype, X, X_isSamples, rbf_lengthscale, rbf_lengthscale_isSamples, rbf_variance, rbf_variance_isSamples, rv, rv_isSamples, num_samples", [ + (np.float64, np.random.rand(5,2), False, np.random.rand(2)+0.1, False, np.random.rand(1)+0.1, False, np.random.rand(3,5,1), True, 3), + (np.float64, np.random.rand(3,5,2), True, np.random.rand(2)+0.1, False, np.random.rand(1)+0.1, False, np.random.rand(5,1), False, 3), + (np.float64, np.random.rand(3,5,2), True, np.random.rand(3,2)+0.1, True, np.random.rand(3,1)+0.1, True, np.random.rand(3,5,1), True, 3), + (np.float64, np.random.rand(5,2), False, np.random.rand(2)+0.1, False, np.random.rand(1)+0.1, False, np.random.rand(5,1), False, 1), + ]) + def test_log_pdf_w_mean(self, dtype, X, X_isSamples, rbf_lengthscale, rbf_lengthscale_isSamples, rbf_variance, rbf_variance_isSamples, + rv, rv_isSamples, num_samples): + + net = nn.HybridSequential(prefix='nn_') + with net.name_scope(): + net.add(nn.Dense(rv.shape[-1], flatten=False, activation="tanh", + in_units=X.shape[-1], dtype=dtype)) + net.initialize(mx.init.Xavier(magnitude=3)) + + X_mx = prepare_mxnet_array(X, X_isSamples, dtype) + rbf_lengthscale_mx = prepare_mxnet_array(rbf_lengthscale, rbf_lengthscale_isSamples, dtype) + rbf_variance_mx = prepare_mxnet_array(rbf_variance, rbf_variance_isSamples, dtype) + rv_mx = prepare_mxnet_array(rv, rv_isSamples, dtype) + rv_shape = rv.shape[1:] if rv_isSamples else rv.shape + mean_mx = net(X_mx) + mean_np = mean_mx.asnumpy() + + rbf = RBF(2, True, 1., 1., 'rbf', None, dtype) + X_var = Variable(shape=(5,2)) + mean_func = MXFusionGluonFunction(net, num_outputs=1, + broadcastable=True) + mean_var = mean_func(X_var) + gp = GaussianProcess.define_variable(X=X_var, kernel=rbf, shape=rv_shape, mean=mean_var, dtype=dtype).factor + + variables = {gp.X.uuid: X_mx, gp.rbf_lengthscale.uuid: rbf_lengthscale_mx, gp.rbf_variance.uuid: rbf_variance_mx, gp.random_variable.uuid: rv_mx, gp.mean.uuid: mean_mx} + log_pdf_rt = gp.log_pdf(F=mx.nd, variables=variables).asnumpy() + + log_pdf_np = [] + for i in range(num_samples): + X_i = X[i] if X_isSamples else X + lengthscale_i = rbf_lengthscale[i] if rbf_lengthscale_isSamples else rbf_lengthscale + variance_i = rbf_variance[i] if rbf_variance_isSamples else rbf_variance + rv_i = rv[i] if rv_isSamples else rv + rv_i = rv_i - mean_np[i] if X_isSamples else rv_i - mean_np[0] + rbf_np = GPy.kern.RBF(input_dim=2, ARD=True) + rbf_np.lengthscale = lengthscale_i + rbf_np.variance = variance_i + K_np = rbf_np.K(X_i) + log_pdf_np.append(multivariate_normal.logpdf(rv_i[:,0], mean=None, cov=K_np)) + log_pdf_np = np.array(log_pdf_np) + isSamples_any = any([X_isSamples, rbf_lengthscale_isSamples, rbf_variance_isSamples, rv_isSamples]) + assert np.issubdtype(log_pdf_rt.dtype, dtype) + assert array_has_samples(mx.nd, log_pdf_rt) == isSamples_any + if isSamples_any: + assert get_num_samples(mx.nd, log_pdf_rt) == num_samples + assert np.allclose(log_pdf_np, log_pdf_rt) + + @pytest.mark.parametrize("dtype, X, X_isSamples, rbf_lengthscale, rbf_lengthscale_isSamples, rbf_variance, rbf_variance_isSamples, rv_shape, num_samples", [ (np.float64, np.random.rand(5,2), False, np.random.rand(2)+0.1, False, np.random.rand(1)+0.1, False, (5,1), 3), (np.float64, np.random.rand(3,5,2), True, np.random.rand(2)+0.1, False, np.random.rand(1)+0.1, False, (5,1), 3), @@ -114,6 +171,60 @@ def test_draw_samples(self, dtype, X, X_isSamples, rbf_lengthscale, rbf_lengthsc assert np.allclose(samples_np, samples_rt) + @pytest.mark.parametrize("dtype, X, X_isSamples, rbf_lengthscale, rbf_lengthscale_isSamples, rbf_variance, rbf_variance_isSamples, rv_shape, num_samples", [ + (np.float64, np.random.rand(5,2), False, np.random.rand(2)+0.1, False, np.random.rand(1)+0.1, False, (5,1), 3), + (np.float64, np.random.rand(3,5,2), True, np.random.rand(2)+0.1, False, np.random.rand(1)+0.1, False, (5,1), 3), + (np.float64, np.random.rand(3,5,2), True, np.random.rand(3,2)+0.1, True, np.random.rand(3,1)+0.1, True, (5,1), 3), + (np.float64, np.random.rand(5,2), False, np.random.rand(2)+0.1, False, np.random.rand(1)+0.1, False, (5,1), 1), + ]) + def test_draw_samples_w_mean(self, dtype, X, X_isSamples, rbf_lengthscale, rbf_lengthscale_isSamples, rbf_variance, rbf_variance_isSamples, + rv_shape, num_samples): + + net = nn.HybridSequential(prefix='nn_') + with net.name_scope(): + net.add(nn.Dense(rv_shape[-1], flatten=False, activation="tanh", + in_units=X.shape[-1], dtype=dtype)) + net.initialize(mx.init.Xavier(magnitude=3)) + + X_mx = prepare_mxnet_array(X, X_isSamples, dtype) + rbf_lengthscale_mx = prepare_mxnet_array(rbf_lengthscale, rbf_lengthscale_isSamples, dtype) + rbf_variance_mx = prepare_mxnet_array(rbf_variance, rbf_variance_isSamples, dtype) + mean_mx = net(X_mx) + mean_np = mean_mx.asnumpy() + + rand = np.random.randn(num_samples, *rv_shape) + rand_gen = MockMXNetRandomGenerator(mx.nd.array(rand.flatten(), dtype=dtype)) + + rbf = RBF(2, True, 1., 1., 'rbf', None, dtype) + X_var = Variable(shape=(5,2)) + mean_func = MXFusionGluonFunction(net, num_outputs=1, + broadcastable=True) + mean_var = mean_func(X_var) + gp = GaussianProcess.define_variable(X=X_var, kernel=rbf, shape=rv_shape, mean=mean_var, dtype=dtype, rand_gen=rand_gen).factor + + variables = {gp.X.uuid: X_mx, gp.rbf_lengthscale.uuid: rbf_lengthscale_mx, gp.rbf_variance.uuid: rbf_variance_mx, gp.mean.uuid: mean_mx} + samples_rt = gp.draw_samples(F=mx.nd, variables=variables, num_samples=num_samples).asnumpy() + + samples_np = [] + for i in range(num_samples): + X_i = X[i] if X_isSamples else X + lengthscale_i = rbf_lengthscale[i] if rbf_lengthscale_isSamples else rbf_lengthscale + variance_i = rbf_variance[i] if rbf_variance_isSamples else rbf_variance + rand_i = rand[i] + rbf_np = GPy.kern.RBF(input_dim=2, ARD=True) + rbf_np.lengthscale = lengthscale_i + rbf_np.variance = variance_i + K_np = rbf_np.K(X_i) + L_np = np.linalg.cholesky(K_np) + sample_np = L_np.dot(rand_i) + samples_np.append(sample_np) + samples_np = np.array(samples_np)+mean_np + + assert np.issubdtype(samples_rt.dtype, dtype) + assert get_num_samples(mx.nd, samples_rt) == num_samples + assert np.allclose(samples_np, samples_rt) + + @pytest.mark.parametrize("dtype, X, X_isSamples, rbf_lengthscale, rbf_lengthscale_isSamples, rbf_variance, rbf_variance_isSamples, rv, rv_isSamples, num_samples", [ (np.float64, np.random.rand(5,2), False, np.random.rand(2)+0.1, False, np.random.rand(1)+0.1, False, np.random.rand(3,5,1), True, 3), ]) diff --git a/testing/components/distributions/wishart_test.py b/testing/components/distributions/wishart_test.py index 873d6f3..d982c90 100644 --- a/testing/components/distributions/wishart_test.py +++ b/testing/components/distributions/wishart_test.py @@ -15,46 +15,48 @@ import pytest import mxnet as mx -from sklearn.datasets import make_spd_matrix import numpy as np from scipy.stats import wishart, chi2 from mxfusion.components.distributions import Wishart from mxfusion.components.variables.runtime_variable import add_sample_dimension, array_has_samples, get_num_samples -from mxfusion.util.testutils import MockMXNetRandomGenerator, numpy_array_reshape, plot_univariate +from mxfusion.util.testutils import MockMXNetRandomGenerator, numpy_array_reshape, plot_univariate, make_spd_matrix -def make_spd_matrices_3d(num_samples, num_dimensions, random_state): +def make_spd_matrices_3d(num_samples, num_dimensions, random_seed): matrices = np.zeros((num_samples, num_dimensions, num_dimensions)) + np.random.seed(random_seed) for i in range(num_samples): - matrices[i, :, :] = make_spd_matrix(num_dimensions, random_state=random_state) + matrices[i, :, :] = make_spd_matrix(num_dimensions) return matrices -def make_spd_matrices_4d(num_samples, num_data_points, num_dimensions, random_state): +def make_spd_matrices_4d(num_samples, num_data_points, num_dimensions, random_seed): matrices = np.zeros((num_samples, num_data_points, num_dimensions, num_dimensions)) + np.random.seed(random_seed) for i in range(num_samples): for j in range(num_data_points): - matrices[i, j, :, :] = make_spd_matrix(num_dimensions, random_state=random_state) + matrices[i, j, :, :] = make_spd_matrix(num_dimensions) return matrices @pytest.mark.usefixtures("set_seed") class TestWishartDistribution(object): - @pytest.mark.parametrize("dtype_dof, dtype, degrees_of_freedom, random_state, scale_is_samples, " + @pytest.mark.parametrize("dtype_dof, dtype, degrees_of_freedom, random_seed, scale_is_samples, " "rv_is_samples, num_data_points, num_samples, broadcast", [ (np.int32, np.float32, 2, 0, True, True, 3, 6, False), ]) - def test_log_pdf(self, dtype_dof, dtype, degrees_of_freedom, random_state, + def test_log_pdf(self, dtype_dof, dtype, degrees_of_freedom, random_seed, scale_is_samples, rv_is_samples, num_data_points, num_samples, broadcast): # Create positive semi-definite matrices - rv = make_spd_matrices_4d(num_samples, num_data_points, degrees_of_freedom, random_state=random_state) + np.random.seed(random_seed) + rv = make_spd_matrices_4d(num_samples, num_data_points, degrees_of_freedom, random_seed=random_seed) if broadcast: - scale = make_spd_matrix(n_dim=degrees_of_freedom, random_state=random_state) + scale = make_spd_matrix(dim=degrees_of_freedom) else: - scale = make_spd_matrices_4d(num_samples, num_data_points, degrees_of_freedom, random_state=random_state) + scale = make_spd_matrices_4d(num_samples, num_data_points, degrees_of_freedom, random_seed=random_seed) degrees_of_freedom_mx = mx.nd.array([degrees_of_freedom], dtype=dtype_dof) degrees_of_freedom = degrees_of_freedom_mx.asnumpy()[0] # ensures the correct dtype @@ -100,12 +102,11 @@ def test_log_pdf(self, dtype_dof, dtype, degrees_of_freedom, random_state, @pytest.mark.parametrize( "dtype_dof, dtype, degrees_of_freedom, scale, scale_is_samples, rv_shape, num_samples", [ - (np.int64, np.float64, 3, make_spd_matrix(3, 0), False, (3, 3), 5), + (np.int64, np.float64, 3, make_spd_matrix(3), False, (3, 3), 5), (np.int64, np.float64, 3, make_spd_matrices_4d(5, 5, 3, 0), True, (5, 3, 3), 5), ]) def test_draw_samples_no_broadcast(self, dtype_dof, dtype, degrees_of_freedom, scale, scale_is_samples, rv_shape, num_samples): - degrees_of_freedom_mx = mx.nd.array([degrees_of_freedom], dtype=dtype_dof) scale_mx = mx.nd.array(scale, dtype=dtype) if not scale_is_samples: diff --git a/testing/components/factor_test.py b/testing/components/factor_test.py index f93486c..4e9b95a 100644 --- a/testing/components/factor_test.py +++ b/testing/components/factor_test.py @@ -41,7 +41,8 @@ def test_replicate_function_only_self(self): self.D = 10 self.net = nn.HybridSequential() with self.net.name_scope(): - self.net.add(nn.Dense(self.D, activation="relu")) + self.net.add(nn.Dense(self.D, in_units=1, activation="relu")) + self.net.initialize() m = mf.models.Model(verbose=False) f = MXFusionGluonFunction(self.net, num_outputs=1) diff --git a/testing/components/functions/mxfusion_gluon_function_test.py b/testing/components/functions/mxfusion_gluon_function_test.py index 576b579..8b5d8aa 100644 --- a/testing/components/functions/mxfusion_gluon_function_test.py +++ b/testing/components/functions/mxfusion_gluon_function_test.py @@ -22,6 +22,8 @@ from mxfusion.components.functions.mxfusion_gluon_function import MXFusionGluonFunction from mxfusion.components import Variable from mxfusion.components.variables.runtime_variable import add_sample_dimension, array_has_samples +from mxfusion import Model +from mxfusion.inference import Inference, ForwardSamplingAlgorithm @pytest.mark.usefixtures("set_seed") @@ -33,7 +35,8 @@ def setUp(self): self.D = 10 self.net = nn.HybridSequential() with self.net.name_scope(): - self.net.add(nn.Dense(self.D, activation="relu")) + self.net.add(nn.Dense(self.D, in_units=1, activation="relu")) + self.net.initialize() def _make_gluon_function_evaluation(self, dtype, broadcastable): class Dot(HybridBlock): @@ -162,3 +165,15 @@ def test_success(self): x = Variable() y = f(x) #z = y.value.eval({'x' : mx.nd.ones(self.D)}) + + def test_gluon_parameters(self): + self.setUp() + + m = Model() + m.x = Variable(shape=(1,1)) + m.f = MXFusionGluonFunction(self.net, num_outputs=1) + m.y = m.f(m.x) + + infr = Inference(ForwardSamplingAlgorithm(m, observed=[m.x])) + infr.run(x=mx.nd.ones((1, 1))) + assert all([v.uuid in infr.params.param_dict for v in m.f.parameters.values()]) diff --git a/testing/inference/inference_serialization_test.py b/testing/inference/inference_serialization_test.py index 64c5a3a..68dd276 100644 --- a/testing/inference/inference_serialization_test.py +++ b/testing/inference/inference_serialization_test.py @@ -24,7 +24,8 @@ from mxfusion.components.functions import MXFusionGluonFunction from mxfusion.common.config import get_default_dtype from mxfusion.components.functions.operators import broadcast_to -from mxfusion import Variable +from mxfusion import Variable, Model +from mxfusion.inference import Inference, ForwardSamplingAlgorithm class InferenceSerializationTests(unittest.TestCase): @@ -64,6 +65,14 @@ def make_net(self): net.initialize(mx.init.Xavier(magnitude=3)) return net + def make_simple_gluon_model(self): + net = self.make_net() + m = Model() + m.x = Variable(shape=(1, 1)) + m.f = MXFusionGluonFunction(net, num_outputs=1) + m.y = m.f(m.x) + return m + def make_gpregr_model(self, lengthscale, variance, noise_var): from mxfusion.models import Model from mxfusion.components.variables import Variable, PositiveTransformation @@ -210,3 +219,20 @@ def test_gp_module_save_and_load(self): loss2, _ = infr2.run(X=mx.nd.array(X, dtype=dtype), Y=mx.nd.array(Y, dtype=dtype)) os.remove(self.ZIPNAME) + + def test_gluon_func_save_and_load(self): + m = self.make_simple_gluon_model() + infr = Inference(ForwardSamplingAlgorithm(m, observed=[m.x])) + infr.run(x=mx.nd.ones((1, 1))) + infr.save(self.ZIPNAME) + + m2 = self.make_simple_gluon_model() + infr2 = Inference(ForwardSamplingAlgorithm(m2, observed=[m2.x])) + infr2.run(x=mx.nd.ones((1, 1))) + infr2.load(self.ZIPNAME) + infr2.run(x=mx.nd.ones((1, 1))) + + for n in m.f.parameter_names: + assert np.allclose(infr.params[getattr(m.y.factor, n)].asnumpy(), infr2.params[getattr(m2.y.factor, n)].asnumpy()) + + os.remove(self.ZIPNAME) diff --git a/testing/inference/pilco_test.py b/testing/inference/pilco_test.py new file mode 100644 index 0000000..ac35f9e --- /dev/null +++ b/testing/inference/pilco_test.py @@ -0,0 +1,183 @@ +# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# or in the "license" file accompanying this file. This file is distributed +# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either +# express or implied. See the License for the specific language governing +# permissions and limitations under the License. +# ============================================================================== + +import numpy as np +import pytest +import mxnet as mx +from mxfusion import Model, Variable +from mxfusion.components.variables import PositiveTransformation +from mxfusion.components.distributions.gp.kernels import RBF +from mxfusion.modules.gp_modules import GPRegression +from mxfusion.inference import GradBasedInference, MAP +from mxnet.gluon import HybridBlock +from mxnet.gluon.nn import Dense +from mxfusion.inference import GradTransferInference +from mxfusion.inference.pilco_alg import PILCOAlgorithm + +class NNController(HybridBlock): + def __init__(self, prefix=None, params=None, in_units=100, obs_space_high=3): + super(NNController, self).__init__(prefix=prefix, params=params) + self.dense1 = Dense(100, in_units=obs_space_high, activation='relu') + self.dense2 = Dense(1, in_units=100, activation='tanh') + def hybrid_forward(self, F, x): + out = self.dense2(self.dense1(x))*2 + return out + +class CostFunction(mx.gluon.HybridBlock): + """ + The goal is to get the pendulum upright and stable as quickly as possible. + Taken from the code for Pendulum. + """ + def hybrid_forward(self, F, state, action): + """ + :param state: [np.cos(theta), np.sin(theta), ~ momentum(theta)] + a -> 0 when pendulum is upright, largest when pendulum is hanging down completely. + b -> penalty for taking action + c -> penalty for pendulum momentum + """ + a_scale = 2. + b_scale = .001 + c_scale = .1 + a = F.sum(a_scale * (state[:,:,0:1] -1) ** 2, axis=-1) + b = F.sum(b_scale * action ** 2, axis=-1) + c = F.sum(c_scale * state[:,:,2:3] ** 2, axis=-1) + return (a + c + b) + +@pytest.mark.usefixtures("set_seed") +class TestPILCOInference(object): + """ + Test class that tests the MXFusion.inference.PILCOAlgorithm and MXFusion.inferenceGradTransferInference classes. + """ + + def run_one_episode(self): + reward_shape = (1,) + observations_shape = (200,3) # steps, obs shape + actions_shape = (199,1) # steps - 1, action shape + return np.random.rand(*reward_shape), np.random.rand(*observations_shape), np.random.rand(*actions_shape) + + def prepare_data(self, state_list, action_list, win_in): + """ + Prepares a list of states and a list of actions as inputs to the Gaussian Process for training. + """ + + X_list = [] + Y_list = [] + + for state_array, action_array in zip(state_list, action_list): + # the state and action array shape should be aligned. + assert state_array.shape[0]-1 == action_array.shape[0] + + for i in range(state_array.shape[0]-win_in): + Y_list.append(state_array[i+win_in:i+win_in+1]) + X_list.append(np.hstack([state_array[i:i+win_in].flatten(), action_array[i:i+win_in].flatten()])) + X = np.vstack(X_list) + Y = np.vstack(Y_list) + return X, Y + + + def fit_model(self, state_list, action_list, win_in, verbose=True, max_iter=1000): + """ + Fits a Gaussian Process model to the state / action pairs passed in. + This creates a model of the environment which is used during + policy optimization instead of querying the environment directly. + + See mxfusion.gp_modules for additional types of GP models to fit, + including Sparse GP and Stochastic Varitional Inference Sparse GP. + """ + X, Y = self.prepare_data(state_list, action_list, win_in) + + m = Model() + m.N = Variable() + m.X = Variable(shape=(m.N, X.shape[-1])) + m.noise_var = Variable(shape=(1,), transformation=PositiveTransformation(), + initial_value=0.01) + m.kernel = RBF(input_dim=X.shape[-1], variance=1, lengthscale=1, ARD=True) + m.Y = GPRegression.define_variable( + X=m.X, kernel=m.kernel, noise_var=m.noise_var, + shape=(m.N, Y.shape[-1])) + m.Y.factor.gp_log_pdf.jitter = 1e-6 + + infr = GradBasedInference( + inference_algorithm=MAP(model=m, observed=[m.X, m.Y])) + infr.run(X=mx.nd.array(X), + Y=mx.nd.array(Y), + max_iter=max_iter, learning_rate=0.1, verbose=verbose) + return m, infr, X, Y + + def optimize_policy(self, alg, policy, cost_func, model, infr, + model_data_X, model_data_Y, + initial_state_generator, num_grad_steps, + learning_rate=1e-2, num_time_steps=100, + num_samples=10, verbose=True): + """ + Takes as primary inputs a policy, cost function, and trained model. + Optimizes the policy for num_grad_steps number of iterations. + """ + mb_alg = alg( + model=model, observed=[model.X, model.Y], + cost_function=cost_func, + policy=policy, n_time_steps=num_time_steps, + initial_state_generator=initial_state_generator, + num_samples=num_samples) + + infr_pred = GradTransferInference( + mb_alg, infr_params=infr.params, train_params=policy.collect_params()) + infr_pred.run( + max_iter=num_grad_steps, + X=mx.nd.array(model_data_X), + Y=mx.nd.array(model_data_Y), + verbose=verbose, learning_rate=learning_rate) + return policy + + def initial_state_generator(self, num_initial_states, obs_space_shape=3): + """ + Starts from valid states by drawing theta and momentum + then computing np.cos(theta) and np.sin(theta) for state[0:2].s + """ + return mx.nd.array( + [np.random.rand(obs_space_shape) for i in range(num_initial_states)]) + + + @pytest.mark.parametrize("pilco_alg", [ + (PILCOAlgorithm)]) + def test_pilco_basic_passthrough(self, pilco_alg): + policy = NNController() + policy.collect_params().initialize(mx.initializer.Xavier(magnitude=1)) + cost = CostFunction() + num_episode = 2 # how many model fit + policy optimization episodes to run + num_samples = 2 # how many sample trajectories the policy optimization loop uses + num_grad_steps = 2 # how many gradient steps the optimizer takes per episode + num_time_steps = 2 # how far to roll out each sample trajectory + learning_rate = 1e-3 # learning rate for the policy optimization + + all_states = [] + all_actions = [] + + for i_ep in range(num_episode): + # Run an episode and collect data. + policy_func = lambda x: policy(mx.nd.expand_dims(mx.nd.array(x), axis=0)).asnumpy()[0] + total_reward, states, actions = self.run_one_episode() + all_states.append(states) + all_actions.append(actions) + + # Fit a model. + model, infr, model_data_X, model_data_Y = self.fit_model( + all_states, all_actions, win_in=1, verbose=True, max_iter=5) + + # Optimize the policy. + policy = self.optimize_policy(pilco_alg, + policy, cost, model, infr, model_data_X, model_data_Y, + self.initial_state_generator, num_grad_steps=num_grad_steps, + num_samples=num_samples, learning_rate=learning_rate, + num_time_steps=num_time_steps) diff --git a/testing/models/factor_graph_test.py b/testing/models/factor_graph_test.py index 641a9a9..1055d24 100644 --- a/testing/models/factor_graph_test.py +++ b/testing/models/factor_graph_test.py @@ -68,9 +68,9 @@ def make_net(self): D = 100 net = nn.HybridSequential(prefix='hybrid0_') with net.name_scope(): - net.add(nn.Dense(D, activation="tanh")) - net.add(nn.Dense(D, activation="tanh")) - net.add(nn.Dense(2, flatten=True)) + net.add(nn.Dense(D, in_units=10, activation="tanh", flatten=False)) + net.add(nn.Dense(D, in_units=D, activation="tanh", flatten=False)) + net.add(nn.Dense(2, in_units=D, flatten=False)) net.initialize(mx.init.Xavier(magnitude=3)) return net @@ -96,7 +96,8 @@ def setUp(self): self.D = 10 self.basic_net = nn.HybridSequential() with self.basic_net.name_scope(): - self.basic_net.add(nn.Dense(self.D, activation="relu")) + self.basic_net.add(nn.Dense(self.D, in_units=10, activation="relu")) + self.basic_net.initialize() self.bnn_net = self.make_net() def test_bnn_model(self): @@ -291,8 +292,8 @@ def test_reconcile_gp_model(self): self.assertTrue(len(component_map) == len(set(m1.components).union(set(m1.Y.factor._module_graph.components)).union(set(m1.Y.factor._extra_graphs[0].components)))) def test_reconcile_model_and_posterior(self): - x = np.random.rand(1000, 1) - y = np.random.rand(1000, 1) + x = np.random.rand(1000, 10) + y = np.random.rand(1000, 10) x_nd, y_nd = mx.nd.array(y), mx.nd.array(x) net1 = self.make_net() diff --git a/testing/modules/gpregression_test.py b/testing/modules/gpregression_test.py index 8bf7ed2..b982a1e 100644 --- a/testing/modules/gpregression_test.py +++ b/testing/modules/gpregression_test.py @@ -13,18 +13,18 @@ # ============================================================================== -import pytest import warnings import mxnet as mx +import mxnet.gluon.nn as nn import numpy as np from mxfusion.models import Model from mxfusion.modules.gp_modules import GPRegression from mxfusion.components.distributions.gp.kernels import RBF, White from mxfusion.components.distributions import GaussianProcess, Normal from mxfusion.components import Variable -from mxfusion.inference import Inference, MAP, ModulePredictionAlgorithm, TransferInference, create_Gaussian_meanfield, StochasticVariationalInference, GradBasedInference, ForwardSamplingAlgorithm, ModulePredictionAlgorithm +from mxfusion.components.functions import MXFusionGluonFunction +from mxfusion.inference import Inference, MAP, TransferInference, create_Gaussian_meanfield, StochasticVariationalInference, GradBasedInference, ForwardSamplingAlgorithm, ModulePredictionAlgorithm from mxfusion.components.variables.var_trans import PositiveTransformation -from mxfusion.inference.forward_sampling import ForwardSamplingAlgorithm from mxfusion.util.testutils import MockMXNetRandomGenerator from mxfusion.modules.gp_modules.gp_regression import GPRegressionSamplingPrediction @@ -57,6 +57,25 @@ def gen_mxfusion_model(self, dtype, D, noise_var, lengthscale, variance, m.Y = GPRegression.define_variable(X=m.X, kernel=kernel, noise_var=m.noise_var, shape=(m.N, D), dtype=dtype, rand_gen=rand_gen) return m + def gen_mxfusion_model_w_mean(self, dtype, D, noise_var, lengthscale, + variance, rand_gen=None): + net = nn.HybridSequential(prefix='nn_') + with net.name_scope(): + net.add(nn.Dense(D, flatten=False, activation="tanh", + in_units=3, dtype=dtype)) + net.initialize(mx.init.Xavier(magnitude=3)) + + m = Model() + m.N = Variable() + m.X = Variable(shape=(m.N, 3)) + m.noise_var = Variable(transformation=PositiveTransformation(), initial_value=mx.nd.array(noise_var, dtype=dtype)) + kernel = RBF(input_dim=3, ARD=True, variance=mx.nd.array(variance, dtype=dtype), lengthscale=mx.nd.array(lengthscale, dtype=dtype), dtype=dtype) + m.mean_func = MXFusionGluonFunction(net, num_outputs=1, + broadcastable=True) + m.Y = GPRegression.define_variable(X=m.X, kernel=kernel, mean=m.mean_func(m.X), noise_var=m.noise_var, shape=(m.N, D), dtype=dtype, rand_gen=rand_gen) + m.Y.factor.gp_log_pdf.jitter = 1e-6 + return m, net + def test_log_pdf(self): D, X, Y, noise_var, lengthscale, variance = self.gen_data() @@ -76,6 +95,28 @@ def test_log_pdf(self): assert np.allclose(l_mf.asnumpy(), l_gpy) + def test_log_pdf_w_mean(self): + D, X, Y, noise_var, lengthscale, variance = self.gen_data() + + # MXFusion log-likelihood + dtype = 'float64' + m, net = self.gen_mxfusion_model_w_mean( + dtype, D, noise_var, lengthscale, variance) + + mean = net(mx.nd.array(X, dtype=dtype)).asnumpy() + + # GPy log-likelihood + m_gpy = GPy.models.GPRegression(X=X, Y=Y-mean, kernel=GPy.kern.RBF(3, ARD=True, lengthscale=lengthscale, variance=variance), noise_var=noise_var) + l_gpy = m_gpy.log_likelihood() + + observed = [m.X, m.Y] + infr = Inference(MAP(model=m, observed=observed), dtype=dtype) + + loss, _ = infr.run(X=mx.nd.array(X, dtype=dtype), Y=mx.nd.array(Y, dtype=dtype)) + l_mf = -loss + + assert np.allclose(l_mf.asnumpy(), l_gpy) + def test_draw_samples(self): D, X, Y, noise_var, lengthscale, variance = self.gen_data() dtype = 'float64' @@ -99,6 +140,32 @@ def test_draw_samples(self): assert np.allclose(samples, samples_2), (samples, samples_2) + def test_draw_samples_w_mean(self): + D, X, Y, noise_var, lengthscale, variance = self.gen_data() + dtype = 'float64' + + rand_gen = MockMXNetRandomGenerator(mx.nd.array(np.random.rand(20*D), dtype=dtype)) + + m, net = self.gen_mxfusion_model_w_mean(dtype, D, noise_var, lengthscale, variance, rand_gen) + + observed = [m.X] + infr = Inference(ForwardSamplingAlgorithm( + m, observed, num_samples=2, target_variables=[m.Y]), dtype=dtype) + + samples = infr.run(X=mx.nd.array(X, dtype=dtype), Y=mx.nd.array(Y, dtype=dtype))[0].asnumpy() + + kern = RBF(3, True, name='rbf', dtype=dtype) + White(3, dtype=dtype) + X_var = Variable(shape=(10, 3)) + mean_func = MXFusionGluonFunction(net, num_outputs=1, + broadcastable=True) + mean_var = mean_func(X_var) + gp = GaussianProcess.define_variable(X=X_var, kernel=kern, mean=mean_var, shape=(10, D), dtype=dtype, rand_gen=rand_gen).factor + + variables = {gp.X.uuid: mx.nd.expand_dims(mx.nd.array(X, dtype=dtype), axis=0), gp.add_rbf_lengthscale.uuid: mx.nd.expand_dims(mx.nd.array(lengthscale, dtype=dtype), axis=0), gp.add_rbf_variance.uuid: mx.nd.expand_dims(mx.nd.array(variance, dtype=dtype), axis=0), gp.add_white_variance.uuid: mx.nd.expand_dims(mx.nd.array(noise_var, dtype=dtype), axis=0), mean_var.uuid: mx.nd.expand_dims(net(mx.nd.array(X, dtype=dtype)), axis=0)} + samples_2 = gp.draw_samples(F=mx.nd, variables=variables, num_samples=2).asnumpy() + + assert np.allclose(samples, samples_2), (samples, samples_2) + def test_prediction(self): D, X, Y, noise_var, lengthscale, variance = self.gen_data() Xt = np.random.rand(20, 3) @@ -154,16 +221,41 @@ def test_prediction(self): infr2.inference_algorithm.model.Y.factor.gp_predict.noise_free = False res = infr2.run(X=mx.nd.array(Xt, dtype=dtype))[0] mu_mf, var_mf = res[0].asnumpy()[0], res[1].asnumpy()[0] - print((var_gpy, var_mf)) assert np.allclose(mu_gpy, mu_mf), (mu_gpy, mu_mf) assert np.allclose(var_gpy, var_mf), (var_gpy, var_mf) - def test_sampling_prediction(self): + def test_prediction_w_mean(self): D, X, Y, noise_var, lengthscale, variance = self.gen_data() Xt = np.random.rand(20, 3) + dtype = 'float64' - m_gpy = GPy.models.GPRegression(X=X, Y=Y, kernel=GPy.kern.RBF(3, ARD=True, lengthscale=lengthscale, variance=variance), noise_var=noise_var) + m, net = self.gen_mxfusion_model_w_mean( + dtype, D, noise_var, lengthscale, variance) + + mean = net(mx.nd.array(X, dtype=dtype)).asnumpy() + mean_t = net(mx.nd.array(Xt, dtype=dtype)).asnumpy() + + m_gpy = GPy.models.GPRegression(X=X, Y=Y-mean, kernel=GPy.kern.RBF(3, ARD=True, lengthscale=lengthscale, variance=variance), noise_var=noise_var) + + observed = [m.X, m.Y] + infr = Inference(MAP(model=m, observed=observed), dtype=dtype) + loss, _ = infr.run(X=mx.nd.array(X, dtype=dtype), Y=mx.nd.array(Y, dtype=dtype)) + + # noise_free, diagonal + mu_gpy, var_gpy = m_gpy.predict_noiseless(Xt) + mu_gpy += mean_t + + infr2 = TransferInference(ModulePredictionAlgorithm(m, observed=[m.X], target_variables=[m.Y]), infr_params=infr.params, dtype=np.float64) + res = infr2.run(X=mx.nd.array(Xt, dtype=dtype))[0] + mu_mf, var_mf = res[0].asnumpy()[0], res[1].asnumpy()[0] + + assert np.allclose(mu_gpy, mu_mf, rtol=1e-04, atol=1e-05), (mu_gpy, mu_mf) + assert np.allclose(var_gpy[:,0], var_mf, rtol=1e-04, atol=1e-05), (var_gpy[:,0], var_mf) + + def test_sampling_prediction(self): + D, X, Y, noise_var, lengthscale, variance = self.gen_data() + Xt = np.random.rand(20, 3) dtype = 'float64' m = self.gen_mxfusion_model(dtype, D, noise_var, lengthscale, variance) @@ -171,7 +263,7 @@ def test_sampling_prediction(self): observed = [m.X, m.Y] infr = Inference(MAP(model=m, observed=observed), dtype=dtype) - loss, _ = infr.run(X=mx.nd.array(X, dtype=dtype), Y=mx.nd.array(Y, dtype=dtype)) + loss, _ = infr.run(X=mx.nd.array(X, dtype=dtype), Y=mx.nd.array(Y, dtype=dtype), max_iter=1) infr_pred = TransferInference(ModulePredictionAlgorithm(model=m, observed=[m.X], target_variables=[m.Y], num_samples=5), infr_params=infr.params) @@ -182,11 +274,38 @@ def test_sampling_prediction(self): gp._module_graph, gp._extra_graphs[0], [gp._module_graph.X]), alg_name='gp_predict') gp.gp_predict.diagonal_variance = False + gp.gp_predict.noise_free = False gp.gp_predict.jitter = 1e-6 y_samples = infr_pred.run(X=mx.nd.array(Xt, dtype=dtype))[0].asnumpy() # TODO: Check the correctness of the sampling + def test_sampling_prediction_w_mean(self): + D, X, Y, noise_var, lengthscale, variance = self.gen_data() + Xt = np.random.rand(20, 3) + dtype = 'float64' + m, net = self.gen_mxfusion_model_w_mean( + dtype, D, noise_var, lengthscale, variance) + + observed = [m.X, m.Y] + infr = Inference(MAP(model=m, observed=observed), dtype=dtype) + + loss, _ = infr.run(X=mx.nd.array(X, dtype=dtype), Y=mx.nd.array(Y, dtype=dtype), max_iter=1) + + infr_pred = TransferInference(ModulePredictionAlgorithm(model=m, observed=[m.X], target_variables=[m.Y], num_samples=5), + infr_params=infr.params) + gp = m.Y.factor + gp.attach_prediction_algorithms( + targets=gp.output_names, conditionals=gp.input_names, + algorithm=GPRegressionSamplingPrediction( + gp._module_graph, gp._extra_graphs[0], [gp._module_graph.X]), + alg_name='gp_predict') + gp.gp_predict.diagonal_variance = True + gp.gp_predict.noise_free = False + gp.gp_predict.jitter = 1e-6 + + y_samples = infr_pred.run(X=mx.nd.array(Xt, dtype=dtype))[0].asnumpy() + def test_with_samples(self): from mxfusion.common import config config.DEFAULT_DTYPE = 'float64' @@ -230,7 +349,6 @@ def test_with_samples(self): xt = np.random.rand(13, 3) res = infr_pred2.run(X=mx.nd.array(xt, dtype=dtype))[0] - def test_prediction_print(self): D, X, Y, noise_var, lengthscale, variance = self.gen_data() Xt = np.random.rand(20, 3) @@ -247,3 +365,13 @@ def test_prediction_print(self): loss, _ = infr.run(X=mx.nd.array(X, dtype=dtype), Y=mx.nd.array(Y, dtype=dtype)) print = infr.print_params() assert (len(print) > 1) + + def test_module_clone(self): + D, X, Y, noise_var, lengthscale, variance = self.gen_data() + dtype = 'float64' + + m = Model() + m.N = Variable() + kernel = RBF(input_dim=3, ARD=True, variance=mx.nd.array(variance, dtype=dtype), lengthscale=mx.nd.array(lengthscale, dtype=dtype), dtype=dtype) + m.Y = GPRegression.define_variable(X=mx.nd.zeros((2, 3)), kernel=kernel, noise_var=mx.nd.ones((1,)), dtype=dtype) + m.clone() diff --git a/testing/modules/sparsegpregression_test.py b/testing/modules/sparsegpregression_test.py index 891ffbe..6639c8e 100644 --- a/testing/modules/sparsegpregression_test.py +++ b/testing/modules/sparsegpregression_test.py @@ -15,12 +15,14 @@ import pytest import mxnet as mx +import mxnet.gluon.nn as nn import numpy as np from mxfusion.models import Model from mxfusion.modules.gp_modules import SparseGPRegression from mxfusion.components.distributions.gp.kernels import RBF from mxfusion.components.distributions import Normal from mxfusion.components import Variable +from mxfusion.components.functions import MXFusionGluonFunction from mxfusion.inference import Inference, MAP, ModulePredictionAlgorithm, TransferInference, create_Gaussian_meanfield, StochasticVariationalInference, GradBasedInference, ForwardSamplingAlgorithm from mxfusion.components.variables.var_trans import PositiveTransformation from mxfusion.modules.gp_modules.sparsegp_regression import SparseGPRegressionSamplingPrediction @@ -56,6 +58,26 @@ def gen_mxfusion_model(self, dtype, D, Z, noise_var, lengthscale, variance, m.Y.factor.sgp_log_pdf.jitter = 1e-8 return m + def gen_mxfusion_model_w_mean(self, dtype, D, Z, noise_var, lengthscale, + variance, rand_gen=None): + net = nn.HybridSequential(prefix='nn_') + with net.name_scope(): + net.add(nn.Dense(D, flatten=False, activation="tanh", + in_units=3, dtype=dtype)) + net.initialize(mx.init.Xavier(magnitude=3)) + + m = Model() + m.N = Variable() + m.X = Variable(shape=(m.N, 3)) + m.Z = Variable(shape=(3, 3), initial_value=mx.nd.array(Z, dtype=dtype)) + m.noise_var = Variable(transformation=PositiveTransformation(), initial_value=mx.nd.array(noise_var, dtype=dtype)) + kernel = RBF(input_dim=3, ARD=True, variance=mx.nd.array(variance, dtype=dtype), lengthscale=mx.nd.array(lengthscale, dtype=dtype), dtype=dtype) + m.mean_func = MXFusionGluonFunction(net, num_outputs=1, + broadcastable=True) + m.Y = SparseGPRegression.define_variable(X=m.X, kernel=kernel, noise_var=m.noise_var, mean=m.mean_func(m.X), inducing_inputs=m.Z, shape=(m.N, D), dtype=dtype) + m.Y.factor.sgp_log_pdf.jitter = 1e-8 + return m, net + def test_log_pdf(self): D, X, Y, Z, noise_var, lengthscale, variance = self.gen_data() @@ -76,6 +98,42 @@ def test_log_pdf(self): assert np.allclose(l_mf.asnumpy(), l_gpy) + def test_log_pdf_w_mean(self): + D, X, Y, Z, noise_var, lengthscale, variance = self.gen_data() + + # MXFusion log-likelihood + dtype = 'float64' + m, net = self.gen_mxfusion_model_w_mean( + dtype, D, Z, noise_var, lengthscale, variance) + + mean = net(mx.nd.array(X, dtype=dtype)).asnumpy() + + # GPy log-likelihood + m_gpy = GPy.models.SparseGPRegression(X=X, Y=Y-mean, Z=Z, kernel=GPy.kern.RBF(3, ARD=True, lengthscale=lengthscale, variance=variance), num_inducing=3) + m_gpy.likelihood.variance = noise_var + l_gpy = m_gpy.log_likelihood() + + observed = [m.X, m.Y] + infr = Inference(MAP(model=m, observed=observed), dtype=dtype) + + loss, _ = infr.run(X=mx.nd.array(X, dtype=dtype), Y=mx.nd.array(Y, dtype=dtype)) + l_mf = -loss + + assert np.allclose(l_mf.asnumpy(), l_gpy) + + def test_draw_samples(self): + D, X, Y, Z, noise_var, lengthscale, variance = self.gen_data() + dtype = 'float64' + + m = self.gen_mxfusion_model(dtype, D, Z, noise_var, lengthscale, + variance) + + observed = [m.X] + infr = Inference(ForwardSamplingAlgorithm( + m, observed, num_samples=2, target_variables=[m.Y]), dtype=dtype) + samples = infr.run(X=mx.nd.array(X, dtype=dtype))[0] + assert samples.shape == (2,) + Y.shape + def test_prediction(self): D, X, Y, Z, noise_var, lengthscale, variance = self.gen_data() Xt = np.random.rand(20, 3) @@ -137,6 +195,35 @@ def test_prediction(self): assert np.allclose(mu_gpy, mu_mf), (mu_gpy, mu_mf) assert np.allclose(var_gpy, var_mf), (var_gpy, var_mf) + def test_prediction_w_mean(self): + D, X, Y, Z, noise_var, lengthscale, variance = self.gen_data() + Xt = np.random.rand(20, 3) + dtype = 'float64' + + m, net = self.gen_mxfusion_model_w_mean( + dtype, D, Z, noise_var, lengthscale, variance) + + mean = net(mx.nd.array(X, dtype=dtype)).asnumpy() + mean_t = net(mx.nd.array(Xt, dtype=dtype)).asnumpy() + + m_gpy = GPy.models.SparseGPRegression(X=X, Y=Y-mean, Z=Z, kernel=GPy.kern.RBF(3, ARD=True, lengthscale=lengthscale, variance=variance)) + m_gpy.likelihood.variance = noise_var + + observed = [m.X, m.Y] + infr = Inference(MAP(model=m, observed=observed), dtype=dtype) + loss, _ = infr.run(X=mx.nd.array(X, dtype=dtype), Y=mx.nd.array(Y, dtype=dtype)) + + # noise_free, diagonal + mu_gpy, var_gpy = m_gpy.predict_noiseless(Xt) + mu_gpy += mean_t + + infr2 = TransferInference(ModulePredictionAlgorithm(m, observed=[m.X], target_variables=[m.Y]), infr_params=infr.params, dtype=np.float64) + res = infr2.run(X=mx.nd.array(Xt, dtype=dtype))[0] + mu_mf, var_mf = res[0].asnumpy()[0], res[1].asnumpy()[0] + + assert np.allclose(mu_gpy, mu_mf, rtol=1e-04, atol=1e-05), (mu_gpy, mu_mf) + assert np.allclose(var_gpy[:,0], var_mf, rtol=1e-04, atol=1e-05), (var_gpy[:,0], var_mf) + def test_sampling_prediction(self): D, X, Y, Z, noise_var, lengthscale, variance = self.gen_data() Xt = np.random.rand(20, 3) @@ -164,11 +251,38 @@ def test_sampling_prediction(self): gp._module_graph, gp._extra_graphs[0], [gp._module_graph.X]), alg_name='sgp_predict') gp.sgp_predict.diagonal_variance = False + gp.sgp_predict.noise_free = False gp.sgp_predict.jitter = 1e-6 y_samples = infr_pred.run(X=mx.nd.array(Xt, dtype=dtype))[0].asnumpy() - # TODO: Check the correctness of the sampling + def test_sampling_prediction_w_mean(self): + D, X, Y, Z, noise_var, lengthscale, variance = self.gen_data() + Xt = np.random.rand(20, 3) + + dtype = 'float64' + m, net = self.gen_mxfusion_model_w_mean( + dtype, D, Z, noise_var, lengthscale, variance) + + observed = [m.X, m.Y] + infr = Inference(MAP(model=m, observed=observed), dtype=dtype) + + loss, _ = infr.run(X=mx.nd.array(X, dtype=dtype), Y=mx.nd.array(Y, dtype=dtype)) + + # noise_free, diagonal + infr_pred = TransferInference(ModulePredictionAlgorithm(model=m, observed=[m.X], target_variables=[m.Y], num_samples=5), + infr_params=infr.params) + gp = m.Y.factor + gp.attach_prediction_algorithms( + targets=gp.output_names, conditionals=gp.input_names, + algorithm=SparseGPRegressionSamplingPrediction( + gp._module_graph, gp._extra_graphs[0], [gp._module_graph.X]), + alg_name='sgp_predict') + gp.sgp_predict.diagonal_variance = True + gp.sgp_predict.noise_free = False + gp.sgp_predict.jitter = 1e-6 + + y_samples = infr_pred.run(X=mx.nd.array(Xt, dtype=dtype))[0].asnumpy() def test_with_samples(self): from mxfusion.common import config @@ -214,3 +328,13 @@ def test_with_samples(self): infr_pred2 = TransferInference(ModulePredictionAlgorithm(model=m, observed=[m.X], target_variables=[m.Y]), infr_params=infr.params) xt = np.random.rand(13, 3) res = infr_pred2.run(X=mx.nd.array(xt, dtype=dtype))[0] + + def test_module_clone(self): + D, X, Y, Z, noise_var, lengthscale, variance = self.gen_data() + dtype = 'float64' + + m = Model() + m.N = Variable() + kernel = RBF(input_dim=3, ARD=True, variance=mx.nd.array(variance, dtype=dtype), lengthscale=mx.nd.array(lengthscale, dtype=dtype), dtype=dtype) + m.Y = SparseGPRegression.define_variable(X=mx.nd.zeros((2, 3)), kernel=kernel, noise_var=mx.nd.ones((1,)), dtype=dtype) + m.clone() diff --git a/testing/modules/svgpregression_test.py b/testing/modules/svgpregression_test.py index 416f799..06ce5b8 100644 --- a/testing/modules/svgpregression_test.py +++ b/testing/modules/svgpregression_test.py @@ -16,12 +16,14 @@ import pytest import warnings import mxnet as mx +import mxnet.gluon.nn as nn import numpy as np from mxfusion.models import Model from mxfusion.modules.gp_modules import SVGPRegression from mxfusion.components.distributions.gp.kernels import RBF from mxfusion.components.distributions import Normal from mxfusion.components import Variable +from mxfusion.components.functions import MXFusionGluonFunction from mxfusion.inference import Inference, MAP, ModulePredictionAlgorithm, TransferInference, create_Gaussian_meanfield, StochasticVariationalInference, GradBasedInference, ForwardSamplingAlgorithm from mxfusion.components.variables.var_trans import PositiveTransformation from mxfusion.modules.gp_modules.svgp_regression import SVGPRegressionSamplingPrediction @@ -63,8 +65,29 @@ def gen_mxfusion_model(self, dtype, D, Z, noise_var, lengthscale, variance, kernel = RBF(input_dim=3, ARD=True, variance=mx.nd.array(variance, dtype=dtype), lengthscale=mx.nd.array(lengthscale, dtype=dtype), dtype=dtype) m.Y = SVGPRegression.define_variable(X=m.X, kernel=kernel, noise_var=m.noise_var, inducing_inputs=m.Z, shape=(m.N, D), dtype=dtype) gp = m.Y.factor + m.Y.factor.svgp_log_pdf.jitter = 1e-8 return m, gp + def gen_mxfusion_model_w_mean(self, dtype, D, Z, noise_var, lengthscale, + variance, rand_gen=None): + net = nn.HybridSequential(prefix='nn_') + with net.name_scope(): + net.add(nn.Dense(D, flatten=False, activation="tanh", + in_units=3, dtype=dtype)) + net.initialize(mx.init.Xavier(magnitude=3)) + + m = Model() + m.N = Variable() + m.X = Variable(shape=(m.N, 3)) + m.Z = Variable(shape=(3, 3), initial_value=mx.nd.array(Z, dtype=dtype)) + m.noise_var = Variable(transformation=PositiveTransformation(), initial_value=mx.nd.array(noise_var, dtype=dtype)) + kernel = RBF(input_dim=3, ARD=True, variance=mx.nd.array(variance, dtype=dtype), lengthscale=mx.nd.array(lengthscale, dtype=dtype), dtype=dtype) + m.mean_func = MXFusionGluonFunction(net, num_outputs=1, + broadcastable=True) + m.Y = SVGPRegression.define_variable(X=m.X, kernel=kernel, noise_var=m.noise_var, mean=m.mean_func(m.X), inducing_inputs=m.Z, shape=(m.N, D), dtype=dtype) + gp = m.Y.factor + return m, gp, net + def test_log_pdf(self): D, X, Y, Z, noise_var, lengthscale, variance, qU_mean, \ qU_cov_W, qU_cov_diag, qU_chol = self.gen_data() @@ -91,6 +114,60 @@ def test_log_pdf(self): assert np.allclose(l_mf.asnumpy(), l_gpy) + def test_log_pdf_w_mean(self): + D, X, Y, Z, noise_var, lengthscale, variance, qU_mean, \ + qU_cov_W, qU_cov_diag, qU_chol = self.gen_data() + dtype = 'float64' + m, gp, net = self.gen_mxfusion_model_w_mean(dtype, D, Z, noise_var, + lengthscale, variance) + mean = net(mx.nd.array(X, dtype=dtype)).asnumpy() + + m_gpy = GPy.core.SVGP(X=X, Y=Y-mean, Z=Z, kernel=GPy.kern.RBF(3, ARD=True, lengthscale=lengthscale, variance=variance), likelihood=GPy.likelihoods.Gaussian(variance=noise_var)) + m_gpy.q_u_mean = qU_mean + m_gpy.q_u_chol = GPy.util.choleskies.triang_to_flat(qU_chol) + + l_gpy = m_gpy.log_likelihood() + + observed = [m.X, m.Y] + infr = Inference(MAP(model=m, observed=observed), dtype=dtype) + infr.initialize(X=X.shape, Y=Y.shape) + infr.params[gp._extra_graphs[0].qU_mean] = mx.nd.array(qU_mean, dtype=dtype) + infr.params[gp._extra_graphs[0].qU_cov_W] = mx.nd.array(qU_cov_W, dtype=dtype) + infr.params[gp._extra_graphs[0].qU_cov_diag] = mx.nd.array(qU_cov_diag, dtype=dtype) + + loss, _ = infr.run(X=mx.nd.array(X, dtype=dtype), Y=mx.nd.array(Y, dtype=dtype)) + l_mf = -loss + + assert np.allclose(l_mf.asnumpy(), l_gpy) + + def test_log_pdf_w_samples_of_noise_var(self): + D, X, Y, Z, noise_var, lengthscale, variance, qU_mean, \ + qU_cov_W, qU_cov_diag, qU_chol = self.gen_data() + dtype = 'float64' + D = 2 + Y = np.random.rand(10, D) + qU_mean = np.random.rand(3, D) + + m = Model() + m.N = Variable() + m.X = Variable(shape=(m.N, 3)) + m.Z = Variable(shape=(3, 3), initial_value=mx.nd.array(Z, dtype=dtype)) + m.noise_var = Variable(transformation=PositiveTransformation(), shape=(m.N, D)) + kernel = RBF(input_dim=3, ARD=True, variance=mx.nd.array(variance, dtype=dtype), lengthscale=mx.nd.array(lengthscale, dtype=dtype), dtype=dtype) + m.Y = SVGPRegression.define_variable(X=m.X, kernel=kernel, noise_var=m.noise_var, inducing_inputs=m.Z, shape=(m.N, D), dtype=dtype) + gp = m.Y.factor + m.Y.factor.svgp_log_pdf.jitter = 1e-8 + + observed = [m.X, m.Y] + infr = Inference(MAP(model=m, observed=observed), dtype=dtype) + infr.initialize(X=X.shape, Y=Y.shape) + infr.params[gp._extra_graphs[0].qU_mean] = mx.nd.array(qU_mean, dtype=dtype) + infr.params[gp._extra_graphs[0].qU_cov_W] = mx.nd.array(qU_cov_W, dtype=dtype) + infr.params[gp._extra_graphs[0].qU_cov_diag] = mx.nd.array(qU_cov_diag, dtype=dtype) + + loss, _ = infr.run(X=mx.nd.array(X, dtype=dtype), Y=mx.nd.array(Y, dtype=dtype), max_iter=1) + + def test_prediction(self): D, X, Y, Z, noise_var, lengthscale, variance, qU_mean, \ qU_cov_W, qU_cov_diag, qU_chol = self.gen_data() @@ -121,7 +198,8 @@ def test_prediction(self): mu_mf, var_mf = res[0].asnumpy()[0], res[1].asnumpy()[0] assert np.allclose(mu_gpy, mu_mf), (mu_gpy, mu_mf) - assert np.allclose(var_gpy[:,0], var_mf), (var_gpy[:,0], var_mf) + assert np.allclose(var_gpy, var_mf), (var_gpy, var_mf) + assert var_gpy.shape == var_mf.shape # noisy, diagonal mu_gpy, var_gpy = m_gpy.predict(Xt) @@ -132,9 +210,10 @@ def test_prediction(self): mu_mf, var_mf = res[0].asnumpy()[0], res[1].asnumpy()[0] assert np.allclose(mu_gpy, mu_mf), (mu_gpy, mu_mf) - assert np.allclose(var_gpy[:,0], var_mf), (var_gpy[:,0], var_mf) + assert np.allclose(var_gpy, var_mf), (var_gpy, var_mf) + assert var_gpy.shape == var_mf.shape - # TODO: The full covariance matrix prediction with SVGP in GPy may not be correct. Need further investigation. + m.Y.factor.svgp_predict.jitter = 1e-8 # noise_free, full_cov mu_gpy, var_gpy = m_gpy.predict_noiseless(Xt, full_cov=True) @@ -145,10 +224,9 @@ def test_prediction(self): res = infr2.run(X=mx.nd.array(Xt, dtype=dtype))[0] mu_mf, var_mf = res[0].asnumpy()[0], res[1].asnumpy()[0] - print(var_gpy.shape, var_mf.shape) - assert np.allclose(mu_gpy, mu_mf), (mu_gpy, mu_mf) - assert np.allclose(var_gpy[:, :, 0], var_mf), (var_gpy[:, :, 0], var_mf) + assert np.allclose(var_gpy, var_mf), (var_gpy, var_mf) + assert var_gpy.shape == var_mf.shape # noisy, full_cov mu_gpy, var_gpy = m_gpy.predict(Xt, full_cov=True) @@ -160,7 +238,55 @@ def test_prediction(self): mu_mf, var_mf = res[0].asnumpy()[0], res[1].asnumpy()[0] assert np.allclose(mu_gpy, mu_mf), (mu_gpy, mu_mf) - assert np.allclose(var_gpy[:, :, 0], var_mf), (var_gpy[:, :, 0], var_mf) + assert np.allclose(var_gpy, var_mf), (var_gpy, var_mf) + assert var_gpy.shape == var_mf.shape + + def test_draw_samples(self): + D, X, Y, Z, noise_var, lengthscale, variance, qU_mean, \ + qU_cov_W, qU_cov_diag, qU_chol = self.gen_data() + dtype = 'float64' + m, gp = self.gen_mxfusion_model(dtype, D, Z, noise_var, lengthscale, + variance) + + observed = [m.X] + infr = Inference(ForwardSamplingAlgorithm( + m, observed, num_samples=2, target_variables=[m.Y]), dtype=dtype) + samples = infr.run(X=mx.nd.array(X, dtype=dtype))[0] + assert samples.shape == (2,) + Y.shape + + def test_prediction_w_mean(self): + D, X, Y, Z, noise_var, lengthscale, variance, qU_mean, \ + qU_cov_W, qU_cov_diag, qU_chol = self.gen_data() + Xt = np.random.rand(5, 3) + dtype = 'float64' + m, gp, net = self.gen_mxfusion_model_w_mean(dtype, D, Z, noise_var, + lengthscale, variance) + mean = net(mx.nd.array(X, dtype=dtype)).asnumpy() + mean_t = net(mx.nd.array(Xt, dtype=dtype)).asnumpy() + + m_gpy = GPy.core.SVGP(X=X, Y=Y-mean, Z=Z, kernel=GPy.kern.RBF(3, ARD=True, lengthscale=lengthscale, variance=variance), likelihood=GPy.likelihoods.Gaussian(variance=noise_var)) + m_gpy.q_u_mean = qU_mean + m_gpy.q_u_chol = GPy.util.choleskies.triang_to_flat(qU_chol) + + observed = [m.X, m.Y] + infr = Inference(MAP(model=m, observed=observed), dtype=dtype) + infr.initialize(X=X.shape, Y=Y.shape) + infr.params[gp._extra_graphs[0].qU_mean] = mx.nd.array(qU_mean, dtype=dtype) + infr.params[gp._extra_graphs[0].qU_cov_W] = mx.nd.array(qU_cov_W, dtype=dtype) + infr.params[gp._extra_graphs[0].qU_cov_diag] = mx.nd.array(qU_cov_diag, dtype=dtype) + + loss, _ = infr.run(X=mx.nd.array(X, dtype=dtype), Y=mx.nd.array(Y, dtype=dtype)) + + # noise_free, diagonal + mu_gpy, var_gpy = m_gpy.predict_noiseless(Xt) + mu_gpy += mean_t + + infr2 = TransferInference(ModulePredictionAlgorithm(m, observed=[m.X], target_variables=[m.Y]), infr_params=infr.params, dtype=np.float64) + res = infr2.run(X=mx.nd.array(Xt, dtype=dtype))[0] + mu_mf, var_mf = res[0].asnumpy()[0], res[1].asnumpy()[0] + + assert np.allclose(mu_gpy, mu_mf, rtol=1e-04, atol=1e-05), (mu_gpy, mu_mf) + assert np.allclose(var_gpy, var_mf, rtol=1e-04, atol=1e-05), (var_gpy, var_mf) def test_sampling_prediction(self): D, X, Y, Z, noise_var, lengthscale, variance, qU_mean, \ @@ -194,11 +320,39 @@ def test_sampling_prediction(self): gp._module_graph, gp._extra_graphs[0], [gp._module_graph.X]), alg_name='svgp_predict') gp.svgp_predict.diagonal_variance = False + gp.svgp_predict.noise_free = False gp.svgp_predict.jitter = 1e-6 y_samples = infr_pred.run(X=mx.nd.array(Xt, dtype=dtype))[0].asnumpy() - # TODO: Check the correctness of the sampling + def test_sampling_prediction_w_mean(self): + D, X, Y, Z, noise_var, lengthscale, variance, qU_mean, \ + qU_cov_W, qU_cov_diag, qU_chol = self.gen_data() + Xt = np.random.rand(20, 3) + + dtype = 'float64' + m, gp, net = self.gen_mxfusion_model_w_mean(dtype, D, Z, noise_var, + lengthscale, variance) + + observed = [m.X, m.Y] + infr = Inference(MAP(model=m, observed=observed), dtype=dtype) + + loss, _ = infr.run(X=mx.nd.array(X, dtype=dtype), Y=mx.nd.array(Y, dtype=dtype)) + + # noise_free, diagonal + infr_pred = TransferInference(ModulePredictionAlgorithm(model=m, observed=[m.X], target_variables=[m.Y], num_samples=5), + infr_params=infr.params) + gp = m.Y.factor + gp.attach_prediction_algorithms( + targets=gp.output_names, conditionals=gp.input_names, + algorithm=SVGPRegressionSamplingPrediction( + gp._module_graph, gp._extra_graphs[0], [gp._module_graph.X]), + alg_name='svgp_predict') + gp.svgp_predict.diagonal_variance = True + gp.svgp_predict.noise_free = False + gp.svgp_predict.jitter = 1e-6 + + y_samples = infr_pred.run(X=mx.nd.array(Xt, dtype=dtype))[0].asnumpy() def test_with_samples(self): from mxfusion.common import config @@ -250,3 +404,15 @@ def test_with_samples(self): infr_pred2 = TransferInference(ModulePredictionAlgorithm(model=m, observed=[m.X], target_variables=[m.Y]), infr_params=infr.params) xt = np.random.rand(13, 3) res = infr_pred2.run(X=mx.nd.array(xt, dtype=dtype))[0] + + def test_module_clone(self): + D, X, Y, Z, noise_var, lengthscale, variance, qU_mean, \ + qU_cov_W, qU_cov_diag, qU_chol = self.gen_data() + dtype = 'float64' + + m = Model() + m.N = Variable() + m.X = Variable(shape=(m.N, 3)) + kernel = RBF(input_dim=3, ARD=True, variance=mx.nd.array(variance, dtype=dtype), lengthscale=mx.nd.array(lengthscale, dtype=dtype), dtype=dtype) + m.Y = SVGPRegression.define_variable(X=mx.nd.zeros((2, 3)), kernel=kernel, noise_var=mx.nd.ones((1,)), dtype=dtype) + m.clone() diff --git a/testing/util/special_test.py b/testing/util/special_test.py index 7b4822c..7a8cf7f 100644 --- a/testing/util/special_test.py +++ b/testing/util/special_test.py @@ -17,12 +17,11 @@ import mxnet as mx import numpy as np from mxfusion.util.special import log_determinant, log_multivariate_gamma -from sklearn.datasets import make_spd_matrix +from mxfusion.util.testutils import make_spd_matrix from itertools import product from scipy.special import multigammaln -# @pytest.mark.usefixtures("set_seed") class TestSpecialFunctions: """ Tests special functions. @@ -31,7 +30,8 @@ class TestSpecialFunctions: @pytest.mark.parametrize("n_dim, random_state", list(product((10, 100, 1000), range(1, 4)))) def test_log_determinant(self, n_dim, random_state): - A = make_spd_matrix(n_dim=n_dim, random_state=random_state) + np.random.seed(random_state) + A = make_spd_matrix(dim=n_dim) assert all(np.linalg.eigvals(A) > 0) a = mx.nd.array(A)