Get default metrics for sample-based forecasts
Source:R/class-forecast-sample.R
get_metrics.forecast_sample.Rd
For sample-based forecasts, the default scoring rules are:
"crps" =
crps_sample()
"overprediction" =
overprediction_sample()
"underprediction" =
underprediction_sample()
"dispersion" =
dispersion_sample()
"log_score" =
logs_sample()
"dss" =
dss_sample()
"mad" =
mad_sample()
"bias" =
bias_sample()
"ae_median" =
ae_median_sample()
"se_mean" =
se_mean_sample()
Usage
# S3 method for class 'forecast_sample'
get_metrics(x, select = NULL, exclude = NULL, ...)
Arguments
- x
A forecast object (a validated data.table with predicted and observed values, see
as_forecast_binary()
).- select
A character vector of scoring rules to select from the list. If
select
isNULL
(the default), all possible scoring rules are returned.- exclude
A character vector of scoring rules to exclude from the list. If
select
is notNULL
, this argument is ignored.- ...
unused
See also
Other get_metrics functions:
get_metrics()
,
get_metrics.forecast_binary()
,
get_metrics.forecast_nominal()
,
get_metrics.forecast_ordinal()
,
get_metrics.forecast_point()
,
get_metrics.forecast_quantile()
,
get_metrics.scores()
Examples
get_metrics(example_sample_continuous, exclude = "mad")
#> $bias
#> function (observed, predicted)
#> {
#> assert_input_sample(observed, predicted)
#> prediction_type <- get_type(predicted)
#> n_pred <- ncol(predicted)
#> p_x <- rowSums(predicted <= observed)/n_pred
#> if (prediction_type == "continuous") {
#> res <- 1 - 2 * p_x
#> return(res)
#> }
#> else {
#> p_xm1 <- rowSums(predicted <= (observed - 1))/n_pred
#> res <- 1 - (p_x + p_xm1)
#> return(res)
#> }
#> }
#> <bytecode: 0x55993453bc10>
#> <environment: namespace:scoringutils>
#>
#> $dss
#> function (observed, predicted, ...)
#> {
#> assert_input_sample(observed, predicted)
#> scoringRules::dss_sample(y = observed, dat = predicted, ...)
#> }
#> <bytecode: 0x559939cc21d0>
#> <environment: namespace:scoringutils>
#>
#> $crps
#> function (observed, predicted, separate_results = FALSE, ...)
#> {
#> assert_input_sample(observed, predicted)
#> crps <- scoringRules::crps_sample(y = observed, dat = predicted,
#> ...)
#> if (separate_results) {
#> if (is.null(dim(predicted))) {
#> dim(predicted) <- c(1, length(predicted))
#> }
#> medians <- apply(predicted, 1, median)
#> dispersion <- scoringRules::crps_sample(y = medians,
#> dat = predicted, ...)
#> overprediction <- rep(0, length(observed))
#> underprediction <- rep(0, length(observed))
#> if (any(observed < medians)) {
#> overprediction[observed < medians] <- scoringRules::crps_sample(y = observed[observed <
#> medians], dat = predicted[observed < medians,
#> , drop = FALSE], ...)
#> }
#> if (any(observed > medians)) {
#> underprediction[observed > medians] <- scoringRules::crps_sample(y = observed[observed >
#> medians], dat = predicted[observed > medians,
#> , drop = FALSE], ...)
#> }
#> if (any(overprediction > 0)) {
#> overprediction[overprediction > 0] <- overprediction[overprediction >
#> 0] - dispersion[overprediction > 0]
#> }
#> if (any(underprediction > 0)) {
#> underprediction[underprediction > 0] <- underprediction[underprediction >
#> 0] - dispersion[underprediction > 0]
#> }
#> return(list(crps = crps, dispersion = dispersion, underprediction = underprediction,
#> overprediction = overprediction))
#> }
#> else {
#> return(crps)
#> }
#> }
#> <bytecode: 0x5599382d71d8>
#> <environment: namespace:scoringutils>
#>
#> $overprediction
#> function (observed, predicted, ...)
#> {
#> crps <- crps_sample(observed, predicted, separate_results = TRUE,
#> ...)
#> return(crps$overprediction)
#> }
#> <bytecode: 0x55993273edb0>
#> <environment: namespace:scoringutils>
#>
#> $underprediction
#> function (observed, predicted, ...)
#> {
#> crps <- crps_sample(observed, predicted, separate_results = TRUE,
#> ...)
#> return(crps$underprediction)
#> }
#> <bytecode: 0x55993273d408>
#> <environment: namespace:scoringutils>
#>
#> $dispersion
#> function (observed, predicted, ...)
#> {
#> crps <- crps_sample(observed, predicted, separate_results = TRUE,
#> ...)
#> return(crps$dispersion)
#> }
#> <bytecode: 0x55993273a758>
#> <environment: namespace:scoringutils>
#>
#> $log_score
#> function (observed, predicted, ...)
#> {
#> assert_input_sample(observed, predicted)
#> scoringRules::logs_sample(y = observed, dat = predicted,
#> ...)
#> }
#> <bytecode: 0x559932737858>
#> <environment: namespace:scoringutils>
#>
#> $ae_median
#> function (observed, predicted)
#> {
#> assert_input_sample(observed, predicted)
#> median_predictions <- apply(as.matrix(predicted), MARGIN = 1,
#> FUN = median)
#> ae_median <- abs(observed - median_predictions)
#> return(ae_median)
#> }
#> <bytecode: 0x559938740dc0>
#> <environment: namespace:scoringutils>
#>
#> $se_mean
#> function (observed, predicted)
#> {
#> assert_input_sample(observed, predicted)
#> mean_predictions <- rowMeans(as.matrix(predicted))
#> se_mean <- (observed - mean_predictions)^2
#> return(se_mean)
#> }
#> <bytecode: 0x559932736f50>
#> <environment: namespace:scoringutils>
#>