diff --git a/docs/source/notebooks/posterior_predictive.ipynb b/docs/source/notebooks/posterior_predictive.ipynb index 2933a40edb..c94b1f96bd 100644 --- a/docs/source/notebooks/posterior_predictive.ipynb +++ b/docs/source/notebooks/posterior_predictive.ipynb @@ -193,7 +193,8 @@ "source": [ "## Comparison between PPC and other model evaluation methods. \n", "\n", - "An excellent introduction to this is given on [Edward](http://edwardlib.org/tut_PPC) and since I can't write this any better I'll just quote this. \n", + "An excellent introduction to this is given on [Edward](http://edwardlib.org/tutorials/ppc) and since I can't write this any better I'll just quote this:\n", + "\n", "\"PPCs are an excellent tool for revising models, simplifying or expanding the current model as one examines how well it fits the data. They are inspired by prior checks and classical hypothesis testing, under the philosophy that models should be criticized under the frequentist perspective of large sample assessment.\n", "\n", "PPCs can also be applied to tasks such as hypothesis testing, model comparison, model selection, and model averaging. It’s important to note that while they can be applied as a form of Bayesian hypothesis testing, hypothesis testing is generally not recommended: binary decision making from a single test is not as common a use case as one might believe. We recommend performing many PPCs to get a holistic understanding of the model fit.\" \n", @@ -376,21 +377,12 @@ "plt.xlabel('predictor')\n", "plt.ylabel('outcome')" ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": true - }, - "outputs": [], - "source": [] } ], "metadata": { "anaconda-cloud": {}, "kernelspec": { - "display_name": "Python [default]", + "display_name": "Python 3", "language": "python", "name": "python3" },