diff --git a/docs/paper.bib b/docs/paper.bib index d7b05ca37..32cda6514 100644 --- a/docs/paper.bib +++ b/docs/paper.bib @@ -32,7 +32,7 @@ @misc{xskillscore publisher = {GitHub}, url = {https://github.com/xarray-contrib/xskillscore} } -@article{Ehm2016, +@article{Ehm:2016, title = {Of quantiles and expectiles: consistent scoring functions, Choquet representations and forecast rankings}, author = {Werner Ehm and Tilmann Gneiting and Alexander Jordan and Fabian Krüger}, year = 2016, @@ -60,7 +60,7 @@ @article{Taggart:2022 eprint = {https://rmets.onlinelibrary.wiley.com/doi/pdf/10.1002/qj.4266}, abstract = {Abstract The use of tiered warnings and multicategorical forecasts are ubiquitous in meteorological operations. Here, a flexible family of scoring functions is presented for evaluating the performance of ordered multicategorical forecasts. Each score has a risk parameter α\$\$ \alpha \$\$, selected for the specific use case, so that it is consistent with a forecast directive based on the fixed threshold probability 1−α\$\$ 1-\alpha \$\$ (equivalently, a fixed α\$\$ \alpha \$\$-quantile mapping). Each score also has use-case specific weights so that forecasters who accurately discriminate between categorical thresholds are rewarded in proportion to the weight for that threshold. A variation is presented where the penalty assigned to near misses or close false alarms is discounted, which again is consistent with directives based on fixed risk measures. The scores presented provide an alternative to many performance measures currently in use, whose optimal threshold probabilities for forecasting an event typically vary with each forecast case, and in the case of equitable scores are based around sample base rates rather than risk measures suitable for users.} } -@article{f095b858-f542-354a-bb1c-fab9c0b77166, +@article{Hering:2011, title = {Comparing Spatial Predictions}, author = {Amanda S. Hering and Marc G. Genton}, year = 2011, @@ -74,7 +74,7 @@ @article{f095b858-f542-354a-bb1c-fab9c0b77166 urldate = {2023-09-06}, abstract = {Under a general loss function, we develop a hypothesis test to determine whether a significant difference in the spatial predictions produced by two competing models exists on average across the entire spatial domain of interest. The null hypothesis is that of no difference, and a spatial loss differential is created based on the observed data, the two sets of predictions, and the loss function chosen by the researcher. The test assumes only isotropy and short-range spatial dependence of the loss differential but does allow it to be non-Gaussian, non-zero-mean, and spatially correlated. Constant and nonconstant spatial trends in the loss differential are treated in two separate cases. Monte Carlo simulations illustrate the size and power properties of this test, and an example based on daily average wind speeds in Oklahoma is used for illustration. Supplemental results are available online.} } -@article{HARVEY1997281, +@article{Harvey:1997, title = {Testing the equality of prediction mean squared errors}, author = {David Harvey and Stephen Leybourne and Paul Newbold}, year = 1997,