UPDATE: 2024-01-19 22:57:24.975755

はじめに

このノートは「StanとRでベイズ統計モデリング」の内容を写経することで、ベイズ統計への理解を深めていくために作成している。

基本的には気になった部分を写経しながら、ところどころ自分用の補足をメモすることで、「StanとRでベイズ統計モデリング」を読み進めるための自分用の補足資料になることを目指す。私の解釈がおかしく、メモが誤っている場合があるので注意。

今回は第12章「時間や空間を扱うモデル」の後半から写経していく。

12.1 状態空間モデルことはじめ

library(dplyr)
library(rstan)
library(ggplot2)

options(max.print = 999999)
rstan_options(auto_write=TRUE)
options(mc.cores=parallel::detectCores())

d <- read.csv('https://raw.githubusercontent.com/MatsuuraKentaro/RStanBook/master/chap12/input/data-ss1.txt')
ggplot() +
  theme_bw(base_size = 15) +
  geom_line(data = d, aes(x = X, y = Y), linewidth = 1) +
  geom_point(data = d, aes(x = X, y = Y), size = 3) +
  labs(x = 'Time (Day)', y = 'Y')

12.1.1 解析の目的

12.1.2 データ分布の確認

12.1.3 メカニズムの想像

\[ \begin{aligned} \mu[t] - \mu[t-1] &= \epsilon_{\mu}[t] &\quad \epsilon_{\mu}[t] &\sim \text{Normal}(0, \sigma_{\mu}) \\ \mu[t] &= \mu[t-1] + \epsilon_{\mu}[t] &\quad \epsilon_{\mu}[t] &\sim \text{Normal}(0, \sigma_{\mu}) \\ \end{aligned} \]

12.1.4 モデル式の記述

モデル12-1

\[ \begin{eqnarray} \mu[t] &=& \mu[t-1] + \epsilon_{\mu}[t-1] &\quad t = 2,...,T \\ Y[t] &=& \mu[t] + \epsilon_{Y}[t] &\quad t = 1,...,T \\ \epsilon_{\mu}[t] &\sim& Normal(0, \sigma_{\mu}) &\quad t = 1,...,T-1 \\ \epsilon_{Y}[t] &\sim& Normal(0, \sigma_{Y}) &\quad t = 1,...,T \\\\ \end{eqnarray} \]

モデル12-2

\[ \begin{eqnarray} \mu[t] &\sim& Normal(\mu[t-1], \sigma_{\mu}) &\quad t = 2,...,T \\ Y[t] &\sim& Normal(\mu[t], \sigma_{Y}) &\quad t = 1,...,T \\\\ \end{eqnarray} \]

12.1.5 Stanで実装

Stanのモデルは下記の通り。

data {
  int T;
  int T_pred;
  vector[T] Y;
}

parameters {
  vector[T] mu;
  real<lower=0> s_mu;
  real<lower=0> s_Y;
}

model {
  ベクトル化バージョン
  // mu[2:T] ~ normal(mu[1:(T-1)], s_mu);
  // Y ~ normal(mu, s_Y);

  for (t in 2:T) {
    mu[t] ~ normal(mu[t-1], s_mu); 
  }

  for (t in 1:T) {
    Y[t] ~ normal(mu[t], s_Y);
  }
}

generated quantities {

  vector[T+T_pred] mu_all;
  vector[T_pred] y_pred;

  mu_all[1:T] = mu;

  for (t in 1:T_pred) {
    int idx = T + t;
    mu_all[idx] = normal_rng(mu_all[idx-1], s_mu);
    y_pred[t] = normal_rng(mu_all[idx], s_Y);
  }

}

12.1.6 推定結果の解釈

データを用意する。

T <- nrow(d)
T_pred <- 3
data <- list(T = T, T_pred = T_pred, Y = d$Y)
data
## $T
## [1] 21
## 
## $T_pred
## [1] 3
## 
## $Y
##  [1] 11.2 11.0 11.3 10.8 10.8 11.3 11.1 11.0 11.4 11.7 12.5 12.6 12.8 13.0 13.0
## [16] 13.6 13.3 12.6 13.1 12.6 12.1

ここでは、stan_model()関数で最初にコンパイルしておいてから、

model122 <- stan_model('note_ahirubayes16-122.stan')

sampling()関数でサンプリングする。

fit <- sampling(object = model122, data = data, seed = 1989)

推定結果は下記の通り。

print(fit, prob = c(0.025, 0.5, 0.975), digits_summary = 1)
## Inference for Stan model: anon_model.
## 4 chains, each with iter=2000; warmup=1000; thin=1; 
## post-warmup draws per chain=1000, total post-warmup draws=4000.
## 
##            mean se_mean   sd 2.5%  50% 97.5% n_eff Rhat
## mu[1]      11.2       0  0.2 10.8 11.2  11.5  4678    1
## mu[2]      11.0       0  0.1 10.8 11.0  11.4  3518    1
## mu[3]      11.2       0  0.1 10.9 11.2  11.5  1550    1
## mu[4]      10.9       0  0.1 10.6 10.8  11.2  1940    1
## mu[5]      10.9       0  0.1 10.6 10.8  11.2  2494    1
## mu[6]      11.2       0  0.1 10.9 11.3  11.5  1884    1
## mu[7]      11.1       0  0.1 10.8 11.1  11.4  4603    1
## mu[8]      11.1       0  0.2 10.8 11.0  11.4  1292    1
## mu[9]      11.4       0  0.1 11.1 11.4  11.7  4242    1
## mu[10]     11.7       0  0.1 11.5 11.7  12.1  2616    1
## mu[11]     12.4       0  0.1 12.1 12.4  12.7  1514    1
## mu[12]     12.6       0  0.1 12.3 12.6  12.9  4382    1
## mu[13]     12.8       0  0.1 12.5 12.8  13.1  4767    1
## mu[14]     13.0       0  0.1 12.7 13.0  13.3  3980    1
## mu[15]     13.0       0  0.1 12.8 13.0  13.4  3525    1
## mu[16]     13.5       0  0.2 13.1 13.5  13.7   862    1
## mu[17]     13.3       0  0.1 12.9 13.3  13.5  2464    1
## mu[18]     12.7       0  0.2 12.5 12.7  13.1   958    1
## mu[19]     13.0       0  0.2 12.6 13.0  13.2   910    1
## mu[20]     12.6       0  0.1 12.3 12.6  12.9  4739    1
## mu[21]     12.2       0  0.2 11.9 12.1  12.6  1373    1
## s_mu        0.4       0  0.1  0.2  0.4   0.6  1497    1
## s_Y         0.2       0  0.1  0.0  0.1   0.3   220    1
## mu_all[1]  11.2       0  0.2 10.8 11.2  11.5  4678    1
## mu_all[2]  11.0       0  0.1 10.8 11.0  11.4  3518    1
## mu_all[3]  11.2       0  0.1 10.9 11.2  11.5  1550    1
## mu_all[4]  10.9       0  0.1 10.6 10.8  11.2  1940    1
## mu_all[5]  10.9       0  0.1 10.6 10.8  11.2  2494    1
## mu_all[6]  11.2       0  0.1 10.9 11.3  11.5  1884    1
## mu_all[7]  11.1       0  0.1 10.8 11.1  11.4  4603    1
## mu_all[8]  11.1       0  0.2 10.8 11.0  11.4  1292    1
## mu_all[9]  11.4       0  0.1 11.1 11.4  11.7  4242    1
## mu_all[10] 11.7       0  0.1 11.5 11.7  12.1  2616    1
## mu_all[11] 12.4       0  0.1 12.1 12.4  12.7  1514    1
## mu_all[12] 12.6       0  0.1 12.3 12.6  12.9  4382    1
## mu_all[13] 12.8       0  0.1 12.5 12.8  13.1  4767    1
## mu_all[14] 13.0       0  0.1 12.7 13.0  13.3  3980    1
## mu_all[15] 13.0       0  0.1 12.8 13.0  13.4  3525    1
## mu_all[16] 13.5       0  0.2 13.1 13.5  13.7   862    1
## mu_all[17] 13.3       0  0.1 12.9 13.3  13.5  2464    1
## mu_all[18] 12.7       0  0.2 12.5 12.7  13.1   958    1
## mu_all[19] 13.0       0  0.2 12.6 13.0  13.2   910    1
## mu_all[20] 12.6       0  0.1 12.3 12.6  12.9  4739    1
## mu_all[21] 12.2       0  0.2 11.9 12.1  12.6  1373    1
## mu_all[22] 12.2       0  0.4 11.3 12.2  13.0  3412    1
## mu_all[23] 12.2       0  0.6 10.9 12.2  13.4  3735    1
## mu_all[24] 12.2       0  0.7 10.7 12.2  13.6  3800    1
## y_pred[1]  12.2       0  0.5 11.2 12.2  13.1  3297    1
## y_pred[2]  12.2       0  0.6 10.9 12.2  13.4  3900    1
## y_pred[3]  12.2       0  0.8 10.7 12.2  13.6  3776    1
## lp__       40.0       1 11.7 22.0 37.7  69.0   138    1
## 
## Samples were drawn using NUTS(diag_e) at Fri Jan 19 22:57:27 2024.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).

12.1.7 状態の変化をなめらかにする

ms <- rstan::extract(fit)
# quantile(ms$s_mu, probs = c(0.1, 0.5, 0.9))
# quantile(ms$s_Y, probs = c(0.1, 0.5, 0.9))

qua <- apply(ms$mu_all, 2, quantile, probs = c(0.1, 0.25, 0.50, 0.75, 0.9))
d_est <- data.frame(X = 1:(T+T_pred), t(qua), check.names = FALSE)

ggplot() +  
  theme_bw(base_size = 15) +
  geom_ribbon(data = d_est, aes(x = X, ymin = `10%`, ymax = `90%`), fill = 'black', alpha = 1/6) +
  geom_ribbon(data = d_est, aes(x = X, ymin = `25%`, ymax = `75%`), fill = 'black', alpha = 2/6) +
  geom_line(data = d_est, aes(x = X, y = `50%`), linewidth = 1) +
  geom_point(data = d, aes(x = X, y = Y), shape = 16, size = 2.5) +
  geom_vline(xintercept = T, linetype = 'dashed') +
  coord_cartesian(xlim = c(1, 24), ylim = c(10, 14)) +
  labs(x = 'Time (Day)', y = 'Y')

モデル12-3

\[ \begin{eqnarray} \mu[t] - \mu[t-1] &=& \mu[t-1] - \mu[t-2] + \epsilon_{\mu}[t-2] &\quad t = 3,...,T \\ \epsilon_{\mu}[t] &\sim& Normal(0, \sigma_{\mu}) &\quad t = 1,...,T-2 \\ Y[t] &\sim& Normal(\mu[t], \sigma_{Y}) &\quad t = 1,...,T \\ \end{eqnarray} \]

\[ \begin{eqnarray} \mu[t] &=& 2\mu[t-1] - \mu[t-2] + \epsilon_{\mu}[t-2] &\quad t = 3,...,T \\ \end{eqnarray} \]

モデル12-4

\[ \begin{eqnarray} \mu[t] &\sim& Normal(2\mu[t-1] - \mu[t-2], \sigma_{\mu}) &\quad t = 3,...,T \\ Y[t] &\sim& Normal(\mu[t], \sigma_{Y}) &\quad t = 1,...,T \\ \end{eqnarray} \]

Stanのモデルは下記の通り。

data {
  int T;
  int T_pred; 
  vector[T] Y;
}

parameters {
  vector[T] mu;
  real<lower=0> s_mu;
  real<lower=0> s_Y;  
}

model {
  
  // ベクトル化バージョン  
  // mu[3:T] ~ normal(2*mu[2:(T-1)] - mu[1:(T-2)], s_mu);
  // Y ~ normal(mu, s_Y);

  for (t in 3:T) {
    mu[t] ~ normal(2*mu[t-1] - mu[t-2], s_mu); 
  }
  
  for (t in 1:T) {
  Y[t] ~ normal(mu[t], s_Y);
  }
}

generated quantities {

  vector[T+T_pred] mu_all;
  vector[T_pred] y_pred;

  mu_all[1:T] = mu;

  for (t in 1:T_pred) {
    mu_all[T+t] = normal_rng(2*mu_all[T+t-1] - mu_all[T+t-2], s_mu);
    y_pred[t] = normal_rng(mu_all[T+t], s_Y);
  }
  
}

ここでは、stan_model()関数で最初にコンパイルしておいてから、

model124 <- stan_model('note_ahirubayes16-124.stan')

sampling()関数でサンプリングする。

fit <- sampling(object = model124, data = data, seed = 1989)

推定結果は下記の通り。

print(fit, prob = c(0.025, 0.5, 0.975), digits_summary = 1)
## Inference for Stan model: anon_model.
## 4 chains, each with iter=2000; warmup=1000; thin=1; 
## post-warmup draws per chain=1000, total post-warmup draws=4000.
## 
##            mean se_mean  sd 2.5%  50% 97.5% n_eff Rhat
## mu[1]      11.2     0.0 0.2 10.7 11.2  11.6  4364    1
## mu[2]      11.1     0.0 0.2 10.8 11.1  11.4  3822    1
## mu[3]      11.1     0.0 0.2 10.8 11.1  11.4  2612    1
## mu[4]      11.0     0.0 0.1 10.7 11.0  11.3  3097    1
## mu[5]      11.0     0.0 0.1 10.7 11.0  11.2  2913    1
## mu[6]      11.0     0.0 0.1 10.8 11.0  11.3  2318    1
## mu[7]      11.1     0.0 0.1 10.8 11.1  11.4  2435    1
## mu[8]      11.2     0.0 0.2 10.9 11.2  11.5  2100    1
## mu[9]      11.5     0.0 0.2 11.2 11.5  11.8  2328    1
## mu[10]     11.8     0.0 0.1 11.5 11.8  12.1  2857    1
## mu[11]     12.2     0.0 0.2 12.0 12.2  12.6  2241    1
## mu[12]     12.6     0.0 0.1 12.3 12.6  12.9  2324    1
## mu[13]     12.8     0.0 0.1 12.5 12.8  13.1  3239    1
## mu[14]     13.0     0.0 0.1 12.7 13.0  13.3  3229    1
## mu[15]     13.2     0.0 0.1 12.9 13.2  13.5  3168    1
## mu[16]     13.3     0.0 0.2 13.0 13.3  13.6  2049    1
## mu[17]     13.2     0.0 0.1 12.9 13.2  13.5  2917    1
## mu[18]     13.0     0.0 0.2 12.7 13.0  13.3  2615    1
## mu[19]     12.8     0.0 0.1 12.5 12.8  13.1  2925    1
## mu[20]     12.6     0.0 0.2 12.3 12.6  12.9  3418    1
## mu[21]     12.2     0.0 0.2 11.8 12.2  12.7  3083    1
## s_mu        0.2     0.0 0.1  0.1  0.2   0.4   524    1
## s_Y         0.3     0.0 0.1  0.2  0.2   0.4  1788    1
## mu_all[1]  11.2     0.0 0.2 10.7 11.2  11.6  4364    1
## mu_all[2]  11.1     0.0 0.2 10.8 11.1  11.4  3822    1
## mu_all[3]  11.1     0.0 0.2 10.8 11.1  11.4  2612    1
## mu_all[4]  11.0     0.0 0.1 10.7 11.0  11.3  3097    1
## mu_all[5]  11.0     0.0 0.1 10.7 11.0  11.2  2913    1
## mu_all[6]  11.0     0.0 0.1 10.8 11.0  11.3  2318    1
## mu_all[7]  11.1     0.0 0.1 10.8 11.1  11.4  2435    1
## mu_all[8]  11.2     0.0 0.2 10.9 11.2  11.5  2100    1
## mu_all[9]  11.5     0.0 0.2 11.2 11.5  11.8  2328    1
## mu_all[10] 11.8     0.0 0.1 11.5 11.8  12.1  2857    1
## mu_all[11] 12.2     0.0 0.2 12.0 12.2  12.6  2241    1
## mu_all[12] 12.6     0.0 0.1 12.3 12.6  12.9  2324    1
## mu_all[13] 12.8     0.0 0.1 12.5 12.8  13.1  3239    1
## mu_all[14] 13.0     0.0 0.1 12.7 13.0  13.3  3229    1
## mu_all[15] 13.2     0.0 0.1 12.9 13.2  13.5  3168    1
## mu_all[16] 13.3     0.0 0.2 13.0 13.3  13.6  2049    1
## mu_all[17] 13.2     0.0 0.1 12.9 13.2  13.5  2917    1
## mu_all[18] 13.0     0.0 0.2 12.7 13.0  13.3  2615    1
## mu_all[19] 12.8     0.0 0.1 12.5 12.8  13.1  2925    1
## mu_all[20] 12.6     0.0 0.2 12.3 12.6  12.9  3418    1
## mu_all[21] 12.2     0.0 0.2 11.8 12.2  12.7  3083    1
## mu_all[22] 11.9     0.0 0.4 11.1 11.9  12.7  3292    1
## mu_all[23] 11.6     0.0 0.7 10.1 11.6  12.9  3212    1
## mu_all[24] 11.2     0.0 1.0  9.1 11.2  13.2  3245    1
## y_pred[1]  11.9     0.0 0.5 10.9 11.9  12.8  3614    1
## y_pred[2]  11.6     0.0 0.7 10.1 11.6  13.0  3209    1
## y_pred[3]  11.2     0.0 1.1  9.1 11.2  13.3  3425    1
## lp__       40.2     0.3 6.2 27.3 40.5  51.0   515    1
## 
## Samples were drawn using NUTS(diag_e) at Fri Jan 19 22:57:29 2024.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
ms <- rstan::extract(fit)
# quantile(ms$s_mu, probs = c(0.1, 0.5, 0.9))
# quantile(ms$s_Y, probs = c(0.1, 0.5, 0.9))

qua <- apply(ms$mu_all, 2, quantile, probs = c(0.1, 0.25, 0.50, 0.75, 0.9))
d_est <- data.frame(X = 1:(T+T_pred), t(qua), check.names = FALSE)

ggplot() +  
  theme_bw(base_size = 18) +
  geom_ribbon(data = d_est, aes(x = X, ymin = `10%`, ymax = `90%`), fill = 'black', alpha = 1/6) +
  geom_ribbon(data = d_est, aes(x = X, ymin = `25%`, ymax = `75%`), fill = 'black', alpha = 2/6) +
  geom_line(data = d_est, aes(x = X, y = `50%`), linewidth = 1) +
  geom_point(data = d, aes(x = X, y = Y), shape = 16, size = 2.5) +
  geom_vline(xintercept = T, linetype = 'dashed') +
  coord_cartesian(xlim = c(1, 24), ylim = c(10, 14)) +
  labs(x = 'Time (Day)', y = 'Y')

12.2 季節調整項

12.2.1 メカニズムの想像

\[ \begin{eqnarray} \sum_{l=0}^{L-1} season[t-l] &=& \epsilon_{season}[t] &\quad \epsilon_{season}[t] \sim Normal(0, \sigma_{season}) \end{eqnarray} \]

\[ \begin{eqnarray} season[t] &=& - \sum_{l=1}^{L-1} season[t-l] + \epsilon_{season}[t] &\quad \epsilon_{season}[t] \sim Normal(0, \sigma_{season}) \end{eqnarray} \]

12.2.2 モデルの記述

モデル12-5

\[ \begin{eqnarray} \mu[t] &=& \mu[t-1] + \epsilon_{\mu}[t-1] &\quad t = 2,...,T \\ season[t] &=& - \sum_{l=1}^{L-1} season[t-l] + \epsilon_{season}[t-3] &\quad t = 4,...,T \\ Y[t] &=& \mu[t] + season[t] + \epsilon_{Y}[t] &\quad t = 1,...,T \\ \epsilon_{\mu}[t] &\sim& Normal(0, \sigma_{\mu}) &\quad t = 1,...,T-1 \\ \epsilon_{season}[t] &\sim& Normal(0, \sigma_{season}) &\quad t = 1,...,T-3 \\ \epsilon_{Y}[t] &\sim& Normal(0, \sigma_{Y}) &\quad t = 1,...,T \\\\ \end{eqnarray} \]

モデル12-6

\[ \begin{eqnarray} \mu[t] &\sim& Normal(\mu[t-1], \sigma_{\mu}) &\quad t = 2,...,T \\ season[t] &\sim& Normal(- \sum_{l=1}^{L-1} season[t-l], \sigma_{season}) &\quad t = 4,...,T \\ Y[t] &\sim& Normal(\mu[t] + season[t], \sigma_{Y}) &\quad t = 1,...,T \\ \end{eqnarray} \]

12.2.3 Stanで実装

data {
  int T;
  vector[T] Y;
}

parameters {
  vector[T] mu;
  vector[T] season;
  real<lower=0> s_mu;
  real<lower=0> s_season;
  real<lower=0> s_Y;
}

transformed parameters {
  vector[T] y_mean;
  
  // ベクトル化バージョン
  // y_mean = mu + season;
  for (t in 1:T) {
    y_mean[t] = mu[t] + season[t];
  }
}

model {
  // ベクトル化バージョン
  // mu[2:T] ~ normal(mu[1:(T-1)], s_mu);
  // for(t in 4:T)
  //   season[t] ~ normal(-sum(season[(t-3):(t-1)]), s_season);
  // Y ~ normal(y_mean, s_Y);

  for (t in 2:T) {
    mu[t] ~ normal(mu[t-1], s_mu);
  }

  for (t in 4:T) {
    season[t] ~ normal(-sum(season[(t-3):(t-1)]), s_season);
  }
  
  for (t in 1:T) {
    Y[t] ~ normal(y_mean[t], s_Y);
  }
}

12.2.4 推定結果の解釈

データを用意する。

d <- read.csv('https://raw.githubusercontent.com/MatsuuraKentaro/RStanBook/master/chap12/input/data-ss2.txt')
T <- nrow(d)
data <- list(T = T, Y = d$Y)
data
## $T
## [1] 44
## 
## $Y
##  [1] 18.073 23.665 16.410 14.931 17.106 22.218 12.220 15.144 15.821 19.906
## [11] 11.270 13.826 14.518 20.091 10.884 14.725 14.978 21.394  9.810 14.539
## [21] 16.709 21.608 12.960 14.073 17.349 22.662 15.906 15.619 18.117 26.206
## [31] 16.147 17.512 19.674 30.237 17.283 18.883 21.110 30.618 19.179 20.332
## [41] 23.278 31.892 20.276 22.866

ここでは、stan_model()関数で最初にコンパイルしておいてから、

model126 <- stan_model('note_ahirubayes16-126.stan')

sampling()関数でサンプリングする。

fit <- sampling(object = model126, data = data, seed = 1989)

推定結果は下記の通り。

print(fit, prob = c(0.025, 0.5, 0.975), digits_summary = 1)
## Inference for Stan model: anon_model.
## 4 chains, each with iter=2000; warmup=1000; thin=1; 
## post-warmup draws per chain=1000, total post-warmup draws=4000.
## 
##            mean se_mean   sd  2.5%  50% 97.5% n_eff Rhat
## mu[1]      18.4     0.0  0.6  17.1 18.4  19.6  2801  1.0
## mu[2]      18.5     0.0  0.5  17.6 18.5  19.5  3683  1.0
## mu[3]      18.5     0.0  0.5  17.6 18.5  19.5  2791  1.0
## mu[4]      17.5     0.0  0.4  16.7 17.5  18.4  3020  1.0
## mu[5]      17.2     0.0  0.4  16.3 17.2  18.0  4192  1.0
## mu[6]      16.8     0.0  0.4  15.9 16.8  17.6  3949  1.0
## mu[7]      16.2     0.0  0.4  15.3 16.2  17.0  4368  1.0
## mu[8]      16.3     0.0  0.4  15.4 16.3  17.1  3413  1.0
## mu[9]      15.8     0.0  0.4  14.9 15.8  16.6  3935  1.0
## mu[10]     15.1     0.0  0.4  14.2 15.2  16.0  3484  1.0
## mu[11]     15.1     0.0  0.4  14.2 15.1  15.9  3654  1.0
## mu[12]     14.9     0.0  0.4  14.1 14.9  15.7  2979  1.0
## mu[13]     14.8     0.0  0.4  14.0 14.8  15.6  3667  1.0
## mu[14]     14.8     0.0  0.4  14.0 14.8  15.7  3264  1.0
## mu[15]     15.2     0.0  0.4  14.4 15.2  16.0  2758  1.0
## mu[16]     15.5     0.0  0.4  14.7 15.5  16.3  3691  1.0
## mu[17]     15.3     0.0  0.4  14.5 15.3  16.2  4104  1.0
## mu[18]     15.3     0.0  0.4  14.5 15.3  16.2  3836  1.0
## mu[19]     15.0     0.0  0.5  14.0 15.0  15.8  2181  1.0
## mu[20]     15.8     0.0  0.4  14.9 15.8  16.6  3470  1.0
## mu[21]     16.3     0.0  0.4  15.4 16.3  17.1  3901  1.0
## mu[22]     16.3     0.0  0.4  15.4 16.3  17.1  4372  1.0
## mu[23]     16.5     0.0  0.4  15.6 16.5  17.3  4022  1.0
## mu[24]     16.6     0.0  0.4  15.7 16.6  17.4  4328  1.0
## mu[25]     17.1     0.0  0.4  16.2 17.1  17.9  3682  1.0
## mu[26]     17.4     0.0  0.4  16.6 17.4  18.2  3780  1.0
## mu[27]     18.4     0.0  0.4  17.5 18.4  19.3  2717  1.0
## mu[28]     18.5     0.0  0.4  17.7 18.5  19.3  3771  1.0
## mu[29]     18.8     0.0  0.4  17.9 18.8  19.6  3626  1.0
## mu[30]     19.3     0.0  0.4  18.5 19.3  20.1  3948  1.0
## mu[31]     19.7     0.0  0.4  18.9 19.7  20.6  4082  1.0
## mu[32]     20.3     0.0  0.4  19.5 20.3  21.1  3927  1.0
## mu[33]     20.9     0.0  0.4  20.1 20.9  21.8  4113  1.0
## mu[34]     21.8     0.0  0.5  20.9 21.8  22.7  3085  1.0
## mu[35]     21.6     0.0  0.4  20.8 21.6  22.4  4319  1.0
## mu[36]     21.8     0.0  0.4  21.0 21.8  22.6  4240  1.0
## mu[37]     22.2     0.0  0.4  21.3 22.2  23.0  3728  1.0
## mu[38]     22.7     0.0  0.4  21.8 22.7  23.5  4699  1.0
## mu[39]     23.1     0.0  0.4  22.3 23.1  24.0  3942  1.0
## mu[40]     23.4     0.0  0.4  22.5 23.4  24.2  4142  1.0
## mu[41]     23.9     0.0  0.4  23.1 23.9  24.8  3357  1.0
## mu[42]     24.2     0.0  0.4  23.4 24.2  25.1  4443  1.0
## mu[43]     24.6     0.0  0.5  23.7 24.6  25.5  3745  1.0
## mu[44]     25.0     0.0  0.6  23.8 25.0  26.3  2464  1.0
## season[1]  -0.3     0.0  0.6  -1.5 -0.3   0.9  2463  1.0
## season[2]   5.1     0.0  0.5   4.1  5.1   6.0  3968  1.0
## season[3]  -2.4     0.0  0.6  -3.5 -2.4  -1.4   827  1.0
## season[4]  -2.5     0.0  0.5  -3.4 -2.5  -1.4  1216  1.0
## season[5]  -0.1     0.0  0.4  -0.9 -0.1   0.8  3353  1.0
## season[6]   5.4     0.0  0.4   4.5  5.4   6.2  2749  1.0
## season[7]  -3.8     0.0  0.4  -4.6 -3.8  -2.9  2137  1.0
## season[8]  -1.3     0.0  0.4  -2.2 -1.3  -0.4  2048  1.0
## season[9]   0.0     0.0  0.4  -0.8  0.0   0.8  3473  1.0
## season[10]  4.9     0.0  0.4   4.1  4.9   5.8  1886  1.0
## season[11] -3.8     0.0  0.4  -4.6 -3.8  -3.0  3550  1.0
## season[12] -1.1     0.0  0.4  -2.0 -1.1  -0.2  2384  1.0
## season[13] -0.2     0.0  0.4  -1.0 -0.2   0.6  3573  1.0
## season[14]  5.3     0.0  0.4   4.5  5.3   6.2  2233  1.0
## season[15] -4.3     0.0  0.4  -5.1 -4.3  -3.4  2871  1.0
## season[16] -0.9     0.0  0.5  -1.8 -0.9   0.0  1359  1.0
## season[17] -0.3     0.0  0.4  -1.1 -0.3   0.5  3797  1.0
## season[18]  6.0     0.0  0.4   5.1  6.0   6.8  2153  1.0
## season[19] -4.8     0.0  0.5  -5.8 -4.9  -3.7   729  1.0
## season[20] -1.3     0.0  0.4  -2.1 -1.3  -0.5  2253  1.0
## season[21]  0.3     0.0  0.4  -0.6  0.3   1.1  2296  1.0
## season[22]  5.4     0.0  0.4   4.6  5.4   6.3  2496  1.0
## season[23] -3.6     0.0  0.4  -4.4 -3.5  -2.7  3645  1.0
## season[24] -2.4     0.0  0.4  -3.2 -2.4  -1.6  3152  1.0
## season[25]  0.2     0.0  0.4  -0.6  0.2   1.1  2350  1.0
## season[26]  5.5     0.0  0.5   4.6  5.4   6.4  1288  1.0
## season[27] -2.8     0.0  0.5  -3.8 -2.8  -1.9   623  1.0
## season[28] -2.8     0.0  0.4  -3.7 -2.9  -2.0  1594  1.0
## season[29] -0.6     0.0  0.4  -1.4 -0.6   0.2  3518  1.0
## season[30]  6.9     0.0  0.4   6.1  6.9   7.7  2705  1.0
## season[31] -3.6     0.0  0.4  -4.4 -3.6  -2.7  3304  1.0
## season[32] -2.8     0.0  0.4  -3.6 -2.8  -2.0  2581  1.0
## season[33] -1.2     0.0  0.4  -2.0 -1.2  -0.3  2105  1.0
## season[34]  8.2     0.0  0.5   7.1  8.2   9.1   478  1.0
## season[35] -4.2     0.0  0.4  -5.1 -4.2  -3.4  2909  1.0
## season[36] -2.8     0.0  0.4  -3.7 -2.8  -2.0  3119  1.0
## season[37] -1.0     0.0  0.4  -1.8 -1.0  -0.2  2741  1.0
## season[38]  7.9     0.0  0.4   7.0  7.9   8.8  2617  1.0
## season[39] -4.0     0.0  0.4  -4.9 -4.0  -3.2  3222  1.0
## season[40] -2.9     0.0  0.4  -3.8 -2.9  -2.1  2088  1.0
## season[41] -0.7     0.0  0.5  -1.7 -0.7   0.2  2575  1.0
## season[42]  7.7     0.0  0.5   6.8  7.7   8.6  3845  1.0
## season[43] -4.3     0.0  0.5  -5.3 -4.3  -3.3  3664  1.0
## season[44] -2.3     0.0  0.6  -3.5 -2.3  -1.1  2632  1.0
## s_mu        0.7     0.0  0.2   0.5  0.7   1.1  1160  1.0
## s_season    0.6     0.0  0.1   0.3  0.6   0.9   663  1.0
## s_Y         0.4     0.0  0.2   0.1  0.3   0.8   103  1.1
## y_mean[1]  18.1     0.0  0.4  17.3 18.1  18.9  4244  1.0
## y_mean[2]  23.6     0.0  0.4  22.7 23.6  24.4  4527  1.0
## y_mean[3]  16.1     0.0  0.5  14.9 16.2  16.8   455  1.0
## y_mean[4]  15.1     0.0  0.4  14.4 15.0  16.1  1195  1.0
## y_mean[5]  17.1     0.0  0.4  16.3 17.1  17.9  4280  1.0
## y_mean[6]  22.2     0.0  0.4  21.4 22.2  22.9  4823  1.0
## y_mean[7]  12.4     0.0  0.4  11.7 12.3  13.3  1265  1.0
## y_mean[8]  15.0     0.0  0.4  14.0 15.1  15.7  1398  1.0
## y_mean[9]  15.8     0.0  0.4  15.0 15.8  16.5  4505  1.0
## y_mean[10] 20.1     0.0  0.4  19.4 20.0  21.0   988  1.0
## y_mean[11] 11.3     0.0  0.4  10.5 11.3  12.0  5129  1.0
## y_mean[12] 13.8     0.0  0.4  13.0 13.8  14.6  3975  1.0
## y_mean[13] 14.6     0.0  0.4  13.9 14.6  15.4  2836  1.0
## y_mean[14] 20.2     0.0  0.4  19.5 20.1  21.0  3560  1.0
## y_mean[15] 10.9     0.0  0.4  10.1 10.9  11.7  4520  1.0
## y_mean[16] 14.6     0.0  0.4  13.6 14.6  15.3  1364  1.0
## y_mean[17] 15.0     0.0  0.4  14.3 15.0  15.9  4815  1.0
## y_mean[18] 21.3     0.0  0.4  20.4 21.3  22.0  3201  1.0
## y_mean[19] 10.2     0.0  0.5   9.5 10.0  11.3   346  1.0
## y_mean[20] 14.5     0.0  0.4  13.6 14.5  15.2  2750  1.0
## y_mean[21] 16.6     0.0  0.4  15.6 16.6  17.3  1310  1.0
## y_mean[22] 21.7     0.0  0.4  20.9 21.7  22.6  3541  1.0
## y_mean[23] 12.9     0.0  0.4  12.1 12.9  13.7  4435  1.0
## y_mean[24] 14.2     0.0  0.4  13.5 14.1  15.1  2260  1.0
## y_mean[25] 17.3     0.0  0.4  16.4 17.3  18.0  3815  1.0
## y_mean[26] 22.9     0.0  0.4  22.2 22.8  23.9   711  1.0
## y_mean[27] 15.6     0.0  0.5  14.5 15.7  16.3   415  1.0
## y_mean[28] 15.7     0.0  0.4  14.9 15.7  16.5  3584  1.0
## y_mean[29] 18.2     0.0  0.4  17.5 18.2  19.0  2974  1.0
## y_mean[30] 26.2     0.0  0.4  25.4 26.2  27.0  5159  1.0
## y_mean[31] 16.2     0.0  0.4  15.4 16.2  16.9  4582  1.0
## y_mean[32] 17.5     0.0  0.4  16.7 17.5  18.4  4808  1.0
## y_mean[33] 19.7     0.0  0.4  19.1 19.7  20.6  2609  1.0
## y_mean[34] 29.9     0.0  0.5  28.8 30.1  30.6   341  1.0
## y_mean[35] 17.4     0.0  0.4  16.7 17.3  18.2  3218  1.0
## y_mean[36] 18.9     0.0  0.4  18.2 18.9  19.8  3947  1.0
## y_mean[37] 21.2     0.0  0.4  20.4 21.1  22.0  4077  1.0
## y_mean[38] 30.6     0.0  0.4  29.7 30.6  31.4  3261  1.0
## y_mean[39] 19.1     0.0  0.4  18.3 19.1  19.9  4535  1.0
## y_mean[40] 20.4     0.0  0.4  19.7 20.4  21.3  3179  1.0
## y_mean[41] 23.2     0.0  0.4  22.3 23.2  24.0  3649  1.0
## y_mean[42] 31.9     0.0  0.4  31.1 31.9  32.7  4480  1.0
## y_mean[43] 20.3     0.0  0.4  19.5 20.3  21.1  4702  1.0
## y_mean[44] 22.7     0.0  0.4  21.7 22.8  23.5  1676  1.0
## lp__       21.5     2.7 23.9 -18.3 17.8  75.4    78  1.1
## 
## Samples were drawn using NUTS(diag_e) at Fri Jan 19 22:57:32 2024.
## For each parameter, n_eff is a crude measure of effective sample size,
## and Rhat is the potential scale reduction factor on split chains (at 
## convergence, Rhat=1).
ms <- rstan::extract(fit)

qua <- apply(ms$mu, 2, quantile, probs = c(0.1, 0.25, 0.50, 0.75, 0.9))
d_est <- data.frame(X = 1:T, t(qua), check.names = FALSE)

ggplot() +  
  theme_bw(base_size = 15) +
  geom_ribbon(data = d_est, aes(x = X, ymin = `10%`, ymax = `90%`), fill = 'black', alpha = 1/6) +
  geom_ribbon(data = d_est, aes(x = X, ymin = `25%`, ymax = `75%`), fill = 'black', alpha = 2/6) +
  geom_line(data = d_est, aes(x = X, y = `50%`), linewidth = 0.5) +
  geom_point(data = d, aes(x = X, y = Y), shape = 16, size = 1) +
  geom_line(data = d, aes(x = X, y = Y), linewidth = 0.25) +
  labs(x = 'Time (Quarter)', y = 'Y') +
  coord_cartesian(xlim = c(1, 44))

qua <- apply(ms$season, 2, quantile, probs = c(0.1, 0.25, 0.50, 0.75, 0.9))
d_est <- data.frame(X = 1:T, t(qua), check.names = FALSE)

ggplot() +  
  theme_bw(base_size = 18) +
  geom_ribbon(data = d_est, aes(x = X, ymin = `10%`, ymax = `90%`), fill = 'black', alpha = 1/6) +
  geom_ribbon(data = d_est, aes(x = X, ymin = `25%`, ymax = `75%`), fill = 'black', alpha = 2/6) +
  geom_line(data = d_est, aes(x = X, y = `50%`), linewidth = 0.5) +
  labs(x = 'Time (Quarter)', y = 'Y') +
  coord_cartesian(xlim = c(1, 44))

## 参考文献および参考資料