diff --git a/articles/jss_paper.html b/articles/jss_paper.html index c02ccf4..1309d75 100644 --- a/articles/jss_paper.html +++ b/articles/jss_paper.html @@ -807,9 +807,9 @@

## # A tibble: 3 × 6
 ##   expression                            min  median `itr/sec` mem_alloc `gc/sec`
 ##   <bch:expr>                        <bch:t> <bch:t>     <dbl> <bch:byt>    <dbl>
-## 1 dist$density(-2:2, with_params =… 27.08µs 29.92µs    32435.        0B     16.2
-## 2 denscmp(-2:2, matrix(c(0, 1), nr…   4.1µs  4.79µs   200114.        0B     40.0
-## 3 dnorm(-2:2, mean = rep(0, 5L), s…  1.66µs  1.96µs   478716.    2.58KB      0
+## 1 dist$density(-2:2, with_params =… 26.01µs 28µs 34686. 0B 17.4 +## 2 denscmp(-2:2, matrix(c(0, 1), nr… 4.12µs 4.71µs 204950. 0B 41.0 +## 3 dnorm(-2:2, mean = rep(0, 5L), s… 1.68µs 1.92µs 492249. 2.58KB 0

diff --git a/articles/tensorflow.html b/articles/tensorflow.html index 1a09558..e8215ef 100644 --- a/articles/tensorflow.html +++ b/articles/tensorflow.html @@ -188,207 +188,207 @@

A simple linear model#> generated.

#> Epoch 0/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 483ms/step - loss: 56.18431/1 ━━━━━━━━━━━━━━━━━━━━ 0s 487ms/step - loss: 56.1843
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 341ms/step - loss: 56.18431/1 ━━━━━━━━━━━━━━━━━━━━ 0s 344ms/step - loss: 56.1843
 #> Epoch 1/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 46.63251/1 ━━━━━━━━━━━━━━━━━━━━ 0s 14ms/step - loss: 46.6325
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 46.63251/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 46.6325
 #> Epoch 2/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 39.75831/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 39.7583
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 39.75831/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 39.7583
 #> Epoch 3/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 35.50141/1 ━━━━━━━━━━━━━━━━━━━━ 0s 14ms/step - loss: 35.5014
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 35.50141/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 35.5014
 #> Epoch 4/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.62621/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 33.6262
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 33.62621/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 33.6262
 #> Epoch 5/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.64411/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 33.6441
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 33.64411/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 33.6441
 #> Epoch 6/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 34.80991/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 34.8099
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 34.80991/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 34.8099
 #> Epoch 7/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 36.28141/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 36.2814
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 36.28141/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 36.2814
 #> Epoch 8/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 37.38771/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 37.3877
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 37.38771/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 37.3877
 #> Epoch 9/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 37.80121/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 37.8012
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 20ms/step - loss: 37.80121/1 ━━━━━━━━━━━━━━━━━━━━ 0s 21ms/step - loss: 37.8012
 #> Epoch 10/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 37.51511/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 37.5151
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 37.51511/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 37.5151
 #> Epoch 11/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 36.71811/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 36.7181
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 36.71811/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 36.7181
 #> Epoch 12/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 35.67251/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 35.6725
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 35.67251/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 35.6725
 #> Epoch 13/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 34.63281/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 34.6328
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 34.63281/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 34.6328
 #> Epoch 14/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.79741/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 33.7974
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 33.79741/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 33.7974
 #> Epoch 15/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.28291/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 33.2829
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.28291/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.2829
 #> Epoch 16/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.11521/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 33.1152
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.11521/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.1152
 #> Epoch 17/100
 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.23721/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.2372
 #> Epoch 18/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.53231/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 33.5323
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.53231/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.5323
 #> Epoch 19/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.86181/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 33.8618
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 33.86181/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.8618
 #> Epoch 20/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 34.10381/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 34.1038
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 34.10381/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 34.1038
 #> Epoch 21/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 34.18291/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 34.1829
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 34.18291/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 34.1829
 #> Epoch 22/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 34.08131/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 34.0813
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 34.08131/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 34.0813
 #> Epoch 23/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.83221/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 33.8322
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.83221/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.8322
 #> Epoch 24/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.50171/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 33.5017
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.50171/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.5017
 #> Epoch 25/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.16641/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 33.1664
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 33.16641/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.1664
 #> Epoch 26/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.89201/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.8920
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 32.89201/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.8920
 #> Epoch 27/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.71911/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.7191
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 32.71911/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.7191
 #> Epoch 28/100
 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.65621/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.6562
 #> Epoch 29/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.68181/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.6818
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 32.68181/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.6818
 #> Epoch 30/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.75441/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.7544
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.75441/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.7544
 #> Epoch 31/100
 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.82651/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.8265
 #> Epoch 32/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.85911/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.8591
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 32.85911/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.8591
 #> Epoch 33/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.83111/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.8311
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 32.83111/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.8311
 #> Epoch 34/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.74251/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.7425
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 32.74251/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.7425
 #> Epoch 35/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.61121/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.6112
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 32.61121/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.6112
 #> Epoch 36/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.46491/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.4649
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 32.46491/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.4649
 #> Epoch 37/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.33131/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.3313
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.33131/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.3313
 #> Epoch 38/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.23041/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.2304
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 32.23041/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.2304
 #> Epoch 39/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.16951/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.1695
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 32.16951/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.1695
 #> Epoch 40/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.14301/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.1430
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 32.14301/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.1430
 #> Epoch 41/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.13571/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.1357
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 32.13571/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.1357
 #> Epoch 42/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.12951/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.1295
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 32.12951/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.1295
 #> Epoch 43/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.10861/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.1086
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 32.10861/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.1086
 #> Epoch 44/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.06491/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.0649
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 32.06491/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.0649
 #> Epoch 45/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.99891/1 ━━━━━━━━━━━━━━━━━━━━ 0s 14ms/step - loss: 31.9989
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 31.99891/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.9989
 #> Epoch 46/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.91821/1 ━━━━━━━━━━━━━━━━━━━━ 0s 14ms/step - loss: 31.9182
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 31.91821/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.9182
 #> Epoch 47/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.83421/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.8342
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.83421/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.8342
 #> Epoch 48/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.75761/1 ━━━━━━━━━━━━━━━━━━━━ 0s 14ms/step - loss: 31.7576
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.75761/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.7576
 #> Epoch 49/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.69491/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.6949
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.69491/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.6949
 #> Epoch 50/100
 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.64701/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.6470
 #> Epoch 51/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.60981/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.6098
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.60981/1 ━━━━━━━━━━━━━━━━━━━━ 0s 14ms/step - loss: 31.6098
 #> Epoch 52/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.57631/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.5763
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.57631/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.5763
 #> Epoch 53/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.53921/1 ━━━━━━━━━━━━━━━━━━━━ 0s 14ms/step - loss: 31.5392
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.53921/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.5392
 #> Epoch 54/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 18ms/step - loss: 31.49401/1 ━━━━━━━━━━━━━━━━━━━━ 0s 19ms/step - loss: 31.4940
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.49401/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.4940
 #> Epoch 55/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.43951/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.4395
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 31.43951/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.4395
 #> Epoch 56/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.37821/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.3782
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.37821/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.3782
 #> Epoch 57/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.31441/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.3144
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.31441/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.3144
 #> Epoch 58/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.25271/1 ━━━━━━━━━━━━━━━━━━━━ 0s 14ms/step - loss: 31.2527
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 20ms/step - loss: 31.25271/1 ━━━━━━━━━━━━━━━━━━━━ 0s 21ms/step - loss: 31.2527
 #> Epoch 59/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.19611/1 ━━━━━━━━━━━━━━━━━━━━ 0s 14ms/step - loss: 31.1961
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.19611/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.1961
 #> Epoch 60/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.14541/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.1454
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.14541/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.1454
 #> Epoch 61/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.09891/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.0989
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.09891/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.0989
 #> Epoch 62/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.05381/1 ━━━━━━━━━━━━━━━━━━━━ 0s 14ms/step - loss: 31.0538
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.05381/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.0538
 #> Epoch 63/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.00711/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.0071
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.00711/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.0071
 #> Epoch 64/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.95711/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 30.9571
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.95711/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.9571
 #> Epoch 65/100
 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.90341/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.9034
 #> Epoch 66/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.84721/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 30.8472
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.84721/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.8472
 #> Epoch 67/100
 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.79051/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.7905
 #> Epoch 68/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.73501/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 30.7350
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.73501/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.7350
 #> Epoch 69/100
 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.68181/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.6818
 #> Epoch 70/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.63071/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 30.6307
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.63071/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.6307
 #> Epoch 71/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.58091/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 30.5809
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 30.58091/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.5809
 #> Epoch 72/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.53111/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 30.5311
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 30.53111/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.5311
 #> Epoch 73/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.48021/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 30.4802
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.48021/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.4802
 #> Epoch 74/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.42771/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 30.4277
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.42771/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.4277
 #> Epoch 75/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.37391/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 30.3739
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.37391/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.3739
 #> Epoch 76/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.31951/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 30.3195
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 30.31951/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.3195
 #> Epoch 77/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.26531/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 30.2653
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.26531/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.2653
 #> Epoch 78/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.21201/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 30.2120
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.21201/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.2120
 #> Epoch 79/100
 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.15961/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.1596
 #> Epoch 80/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.10781/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 30.1078
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 30.10781/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.1078
 #> Epoch 81/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.05621/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.0562
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 30.05621/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.0562
 #> Epoch 82/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.00421/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.0042
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 30.00421/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.0042
 #> Epoch 83/100
 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.95161/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.9516
 #> Epoch 84/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.89851/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 29.8985
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 29.89851/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.8985
 #> Epoch 85/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.84501/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 29.8450
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 29.84501/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.8450
 #> Epoch 86/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.79171/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 29.7917
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 29.79171/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.7917
 #> Epoch 87/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.73871/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 29.7387
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.73871/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.7387
 #> Epoch 88/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.68611/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 29.6861
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.68611/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.6861
 #> Epoch 89/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.63381/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 29.6338
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 29.63381/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.6338
 #> Epoch 90/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.58161/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 29.5816
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 29.58161/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.5816
 #> Epoch 91/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.52921/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 29.5292
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 29.52921/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.5292
 #> Epoch 92/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.47661/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 29.4766
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.47661/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.4766
 #> Epoch 93/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.42371/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.4237
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 29.42371/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.4237
 #> Epoch 94/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.37081/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 29.3708
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.37081/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.3708
 #> Epoch 95/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.31801/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 29.3180
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.31801/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.3180
 #> Epoch 96/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.26531/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 29.2653
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.26531/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.2653
 #> Epoch 97/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.21291/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 29.2129
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 29.21291/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.2129
 #> Epoch 98/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.16051/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 29.1605
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.16051/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.1605
 #> Epoch 99/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.10831/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 29.1083
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 29.10831/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.1083
 #> Epoch 100/100
-#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 29.05601/1 ━━━━━━━━━━━━━━━━━━━━ 0s 14ms/step - loss: 29.0560
+#> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 29.05601/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.0560

#> [1] 4.854740 1.606937
 #> (Intercept)           x 
diff --git a/pkgdown.yml b/pkgdown.yml
index d5a2a8a..aecf3be 100644
--- a/pkgdown.yml
+++ b/pkgdown.yml
@@ -5,7 +5,7 @@ articles:
   distributions: distributions.html
   jss_paper: jss_paper.html
   tensorflow: tensorflow.html
-last_built: 2024-06-17T17:19Z
+last_built: 2024-06-24T07:29Z
 urls:
   reference: https://ashesitr.github.io/reservr/reference
   article: https://ashesitr.github.io/reservr/articles
diff --git a/reference/Distribution.html b/reference/Distribution.html
index 65a4e7c..77c6905 100644
--- a/reference/Distribution.html
+++ b/reference/Distribution.html
@@ -1425,7 +1425,7 @@ 

Examples#> jac_full[, grepl("^probs", nms)] <- 1 #> list(constraints = rowSums(prob_mat) - 1, jacobian = jac_full) #> } -#> <environment: 0x55da7f5f5a30> +#> <environment: 0x558ee951cb38> ## ------------------------------------------------ ## Method `Distribution$export_functions` diff --git a/reference/dist_trunc-1.png b/reference/dist_trunc-1.png index cce84cc..dc58b63 100644 Binary files a/reference/dist_trunc-1.png and b/reference/dist_trunc-1.png differ diff --git a/reference/dist_uniform-1.png b/reference/dist_uniform-1.png index ea66f9c..3829da3 100644 Binary files a/reference/dist_uniform-1.png and b/reference/dist_uniform-1.png differ diff --git a/search.json b/search.json index a026880..6870fda 100644 --- a/search.json +++ b/search.json @@ -1 +1 @@ -[{"path":"https://ashesitr.github.io/reservr/articles/distributions.html","id":"distributions","dir":"Articles","previous_headings":"","what":"Distributions","title":"Working with Distributions","text":"Distributions set classes available reservr specify distribution families random variables. Distribution inherits R6 Class Distribution provides functionality necessary working specific family. Distribution can defined calling one constructor functions, prefixed dist_ package. constructors accept parameters family arguments. arguments specified, corresponding parameter considered fixed sense need specified computing something distribution assumed fixed calling fit() distribution instance.","code":""},{"path":"https://ashesitr.github.io/reservr/articles/distributions.html","id":"sample","dir":"Articles","previous_headings":"Distributions","what":"Sample","title":"Working with Distributions","text":"example, unspecified normal distribution can created calling dist_normal() without arguments. means parameters mean sd considered placeholders. want , e.g., sample norm, must specify placeholders with_params argument:","code":"library(reservr) set.seed(1L) # Instantiate an unspecified normal distribution norm <- dist_normal() x <- norm$sample(n = 10L, with_params = list(mean = 3, sd = 1)) set.seed(1L) norm2 <- dist_normal(sd = 1) x2 <- norm2$sample(n = 10L, with_params = list(mean = 3)) # the same RVs are drawn because the distribution parameters and the seed were the same stopifnot(identical(x, x2))"},{"path":"https://ashesitr.github.io/reservr/articles/distributions.html","id":"density","dir":"Articles","previous_headings":"Distributions","what":"Density","title":"Working with Distributions","text":"density() function computes density distribution respect natural measure. Use is_discrete_at() check point discrete mass lebesgue density. diff_density() computes gradient density respect free parameter. Setting log = TRUE computes gradient log-density, .e., gradient log f(x, params) instead.","code":"norm$density(x, with_params = list(mean = 3, sd = 1)) #> [1] 0.3278626 0.3922715 0.2813724 0.1117603 0.3778620 0.2849269 0.3542572 #> [8] 0.3037652 0.3380030 0.3807663 dnorm(x, mean = 3, sd = 1) #> [1] 0.3278626 0.3922715 0.2813724 0.1117603 0.3778620 0.2849269 0.3542572 #> [8] 0.3037652 0.3380030 0.3807663 norm$density(x, log = TRUE, with_params = list(mean = 3, sd = 1)) # log-density #> [1] -1.1151607 -0.9358010 -1.2680761 -2.1913990 -0.9732262 -1.2555227 #> [7] -1.0377321 -1.1915002 -1.0847006 -0.9655696 norm$is_discrete_at(x, with_params = list(mean = 3, sd = 1)) #> [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE # A discrete distribution with mass only at point = x[1]. dd <- dist_dirac(point = x[1]) dd$density(x) #> [1] 1 0 0 0 0 0 0 0 0 0 dd$is_discrete_at(x) #> [1] TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE norm$diff_density(x, with_params = list(mean = 3, sd = 1)) #> $mean #> [1] -0.20539076 0.07203805 -0.23512285 0.17828905 0.12450847 -0.23377349 #> [7] 0.17267525 0.22427736 0.19461580 -0.11628160 #> #> $sd #> [1] -0.19919475 -0.37904224 -0.08489705 0.17266080 -0.33683550 -0.09312311 #> [7] -0.27009027 -0.13817569 -0.22594681 -0.34525522"},{"path":"https://ashesitr.github.io/reservr/articles/distributions.html","id":"probability","dir":"Articles","previous_headings":"Distributions","what":"Probability","title":"Working with Distributions","text":"probability(), c.d.f., survival function, logarithms can computed. discrete distributions, dist$probability(x, lower.tail = TRUE) returns \\(P(X \\le x)\\) dist$probability(x, lower.tail = FALSE) returns \\(P(X > x)\\). Gradients (log-)c.d.f. survival function respect parameters can computed using diff_probability().","code":"norm$probability(x, with_params = list(mean = 3, sd = 1)) #> [1] 0.2655087 0.5728534 0.2016819 0.9446753 0.6291140 0.2059746 0.6870228 #> [8] 0.7698414 0.7176185 0.3800352 pnorm(x, mean = 3, sd = 1) #> [1] 0.2655087 0.5728534 0.2016819 0.9446753 0.6291140 0.2059746 0.6870228 #> [8] 0.7698414 0.7176185 0.3800352 dd$probability(x) #> [1] 1 1 0 1 1 0 1 1 1 1 dd$probability(x, lower.tail = FALSE, log.p = TRUE) #> [1] -Inf -Inf 0 -Inf -Inf 0 -Inf -Inf -Inf -Inf norm$diff_probability(x, with_params = list(mean = 3, sd = 1)) #> $mean #> [1] -0.3278626 -0.3922715 -0.2813724 -0.1117603 -0.3778620 -0.2849269 #> [7] -0.3542572 -0.3037652 -0.3380030 -0.3807663 #> #> $sd #> [1] 0.20539076 -0.07203805 0.23512285 -0.17828905 -0.12450847 0.23377349 #> [7] -0.17267525 -0.22427736 -0.19461580 0.11628160"},{"path":"https://ashesitr.github.io/reservr/articles/distributions.html","id":"hazard","dir":"Articles","previous_headings":"Distributions","what":"Hazard","title":"Working with Distributions","text":"hazard rate defined \\(h(x, \\theta) = f(x, \\theta) / S(x, \\theta)\\), .e., ratio density survival function.","code":"norm$hazard(x, with_params = list(mean = 3, sd = 1)) #> [1] 0.4463805 0.9183533 0.3524565 2.0200785 1.0188091 0.3588385 1.1318948 #> [8] 1.3198083 1.1969728 0.6141740 norm$hazard(x, log = TRUE, with_params = list(mean = 3, sd = 1)) #> [1] -0.80658365 -0.08517306 -1.04282794 0.70313635 0.01863443 -1.02488292 #> [7] 0.12389301 0.27748652 0.17979571 -0.48747702"},{"path":"https://ashesitr.github.io/reservr/articles/distributions.html","id":"fitting","dir":"Articles","previous_headings":"Distributions","what":"Fitting","title":"Working with Distributions","text":"fit() generic defined Distributions perform maximum likelihood estimation. accepts weighted, censored truncated sample class trunc_obs, can automatically convert uncensored, untruncated observations without weight proper trunc_obs object.","code":"# Fit with mean, sd free fit1 <- fit(norm, x) # Fit with mean free fit2 <- fit(norm2, x) # Fit with sd free fit3 <- fit(dist_normal(mean = 3), x) # Fitted parameters fit1$params #> $mean #> [1] 3.132203 #> #> $sd #> [1] 0.7405289 fit2$params #> $mean #> [1] 3.132203 fit3$params #> $sd #> [1] 0.752237 # log-Likelihoods can be computed on AIC(fit1$logLik) #> [1] 26.37096 AIC(fit2$logLik) #> [1] 25.8626 AIC(fit3$logLik) #> [1] 24.68469 # Convergence checks fit1$opt$message #> [1] \"NLOPT_SUCCESS: Generic success return value.\" fit2$opt$message #> [1] \"NLOPT_SUCCESS: Generic success return value.\" fit3$opt$message #> [1] \"NLOPT_SUCCESS: Generic success return value.\""},{"path":"https://ashesitr.github.io/reservr/articles/distributions.html","id":"fitting-censored-data","dir":"Articles","previous_headings":"Distributions","what":"Fitting censored data","title":"Working with Distributions","text":"can also fit interval-censored data.","code":"params <- list(mean = 30, sd = 10) x <- norm$sample(100L, with_params = params) xl <- floor(x) xr <- ceiling(x) cens_fit <- fit(norm, trunc_obs(xmin = xl, xmax = xr)) print(cens_fit) #> $params #> $params$mean #> [1] 31.25 #> #> $params$sd #> [1] 9.112857 #> #> #> $opt #> $opt$par #> mean sd #> 31.250000 9.112857 #> #> $opt$value #> [1] 362.9126 #> #> $opt$iter #> [1] 5 #> #> $opt$convergence #> [1] 1 #> #> $opt$message #> [1] \"NLOPT_SUCCESS: Generic success return value.\" #> #> #> $logLik #> 'log Lik.' -362.9126 (df=2)"},{"path":"https://ashesitr.github.io/reservr/articles/distributions.html","id":"fitting-truncated-data","dir":"Articles","previous_headings":"Distributions","what":"Fitting truncated data","title":"Working with Distributions","text":"possible fit randomly truncated samples, .e., samples truncation bound also random differs observed observation.","code":"params <- list(mean = 30, sd = 10) x <- norm$sample(100L, with_params = params) tl <- runif(length(x), min = 0, max = 20) tr <- runif(length(x), min = 0, max = 60) + tl # truncate_obs() also truncates observations. # if data is already truncated, use trunc_obs(x = ..., tmin = ..., tmax = ...) instead. trunc_fit <- fit(norm, truncate_obs(x, tl, tr)) print(trunc_fit) #> $params #> $params$mean #> [1] 26.72871 #> #> $params$sd #> [1] 8.242123 #> #> #> $opt #> $opt$par #> mean sd #> 26.728710 8.242123 #> #> $opt$value #> [1] 203.8095 #> #> $opt$iter #> [1] 9 #> #> $opt$convergence #> [1] 1 #> #> $opt$message #> [1] \"NLOPT_SUCCESS: Generic success return value.\" #> #> #> $logLik #> 'log Lik.' -203.8095 (df=2) attr(trunc_fit$logLik, \"nobs\") #> [1] 62"},{"path":"https://ashesitr.github.io/reservr/articles/distributions.html","id":"plotting","dir":"Articles","previous_headings":"Distributions","what":"Plotting","title":"Working with Distributions","text":"Visualising different distributions, parametrizations, e.g., fits, can done plot_distributions()","code":"# Plot fitted densities plot_distributions( true = norm, fit1 = norm, fit2 = norm2, fit3 = dist_normal(3), .x = seq(-2, 7, 0.01), with_params = list( true = list(mean = 3, sd = 1), fit1 = fit1$params, fit2 = fit2$params, fit3 = fit3$params ), plots = \"density\" ) # Plot fitted densities, c.d.f.s and hazard rates plot_distributions( true = norm, cens_fit = norm, trunc_fit = norm, .x = seq(0, 60, length.out = 101L), with_params = list( true = list(mean = 30, sd = 10), cens_fit = cens_fit$params, trunc_fit = trunc_fit$params ) ) # More complex distributions plot_distributions( bdegp = dist_bdegp(2, 3, 10, 3), .x = c(seq(0, 12, length.out = 121), 1.5 - 1e-6), with_params = list( bdegp = list( dists = list( list(), list(), list( dists = list( list( dist = list( shapes = as.list(1:3), scale = 2.0, probs = list(0.2, 0.5, 0.3) ) ), list( sigmau = 0.4, xi = 0.2 ) ), probs = list(0.7, 0.3) ) ), probs = list(0.15, 0.1, 0.75) ) ) )"},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"introduction","dir":"Articles","previous_headings":"","what":"Introduction","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"Statistical analyses typically concerned modelling estimating distribution measured variable interest \\(Y\\), called outcome, possibly conditional value one several endogenous variables \\(X\\), called predictors. absence endogenous variables, process usually called distribution fitting, presence endogenous variables called regression. Classical regression, via generalized linear models (GLMs), concerned influence endogenous variables mean outcome, .e., \\(\\mathsf{E}(Y|X) = f(X)\\), often links parameters conditional outcome distribution mean. gentle introduction generalized linear models can found Dobson Barnett (2018). implementation GLMs available stats R package, part R (R Core Team 2023). models also allow specification additional parameters conditional outcome distribution, Generalized Additive Models Location, Scale Shape (Stasinopoulos Rigby 2007). recently, deep distributional regression proposed, allows flexible specification individual outcome distribution parameters (Rügamer et al. 2023). Statistical methods (described implemented previously mentioned papers) often require complete data, full information observations \\((X, Y)\\) interest. paper, describe R-package allows distributional regression three common observation schemes provide complete data. First , data interval censoring applied outcome \\(Y\\) refers case lower upper bounds \\(Y\\) observed, instead actual value. Next, truncated data misses observations outcome \\(Y\\) falls certain lower upper truncation bound. consider case random truncation, truncation bounds also random variables may vary observation. Finally, consider combination two, randomly truncated interval censoring. three scenarios can combined single general scheme: instead observing real-valued target variable \\(Y\\) (\\(\\mu\\)-density \\(f_\\theta\\) c.d.f. \\(F_\\theta\\), \\(\\mu\\) sigma-finite measure \\(\\mathbb R\\) \\(\\theta\\) parameter vector parameter space \\(\\Theta\\)), observe vector \\((M, V, L, U)\\), satisfies \\(L \\le M \\le V \\le U\\) \\(L C_1) + \\mathbf{1}(Y > C_2), \\end{align*}\\] define new random variables \\((M, V) = f(Y,C_1,C_2)\\) \\[\\begin{align*} (M, V) & := \\begin{cases} (-\\infty, C_1), & D = 0, \\\\ (C_1, C_2), & D = 1, \\\\ (C_2, \\infty), & D = 2. \\end{cases} \\end{align*}\\] Note \\(D\\) can reconstructed \\((M, V)\\): \\(D=0\\) \\(M=-\\infty\\), \\(D=1\\) \\(-\\infty m) \\\\ & = F_\\theta((m, \\infty]) \\cdot \\mathsf{P}(C_2 = m). \\end{align*}\\] assume distribution censoring variable \\((C_1,C_2)\\) non-informative, .e., distribution depend \\(\\theta\\), likelihood observing \\((M, V) = (m, v)\\) equal \\(F_\\theta((m, v])\\), factor depend \\(\\theta\\). similar argumentation can used non-discrete case. Overall, noting \\(F_\\infty((-\\infty, \\infty]) = 1\\), motivated likelihood contribution \\(F_\\theta((m, v]) \\cdot \\mathbf{1}(m < v)\\) censored, untruncated observation (1.1). Next, consider uncensored, truncated observation \\((m, v, l, u)\\) \\(y = m = v\\); may hence identify observation \\((y, l, u)\\). may proceed assume \\((L, U)\\) independent \\(Y\\) satisfies \\(L \\le U\\), \\(L\\) possibly equal \\(-\\infty\\) \\(U\\) possibly equal \\(\\infty\\). , \\((L, U)\\) shall density \\(f_{(L, U)}\\) respect dominating sigma-finite measure \\(\\nu\\). Truncation means happen observe \\((Y, L, U)\\) \\(L < Y \\le U\\). consequence, observed value \\(M = V\\) can regarded drawn \\((\\mu \\otimes \\nu)\\)-density \\[\\begin{align} f_{(Y, L, U) | L < Y \\le U}(y, l, u) = \\frac{f_{(L, U)}(l, u) f_\\theta(y)}{\\mathsf{P}(L < Y \\le U)} \\mathbf{1}(l < y \\le u). \\tag{1.4} \\end{align}\\] Subsequently, write \\((Y^{(t)}, L^{(t)}, U^{(t)})\\) random vector following density, .e., \\[\\begin{align*} f_{(Y^{(t)}, L^{(t)}, U^{(t)})}(y, l, u) = f_{(Y, L, U) | L < Y \\le U}(y, l, u). \\end{align*}\\] Conditioning density \\((L^{(t)}, U^{(t)}) = (l, u)\\), arrive expression involve nuisance density \\(f_{(L,U)}\\): \\[\\begin{align*} f_{Y^{(t)} | L^{(t)} = l, U^{(t)} = u}(y) & = \\frac{f_{(Y^{(t)}, L^{(t)}, U^{(t)})}(y, l, u)}{f_{(L^{(t)}, U^{(t)})}(l, u)} \\\\ & = \\frac{f_{(Y, L, U) | L < Y \\le U}(y, l, u)}{\\int_{(l, u]} f_{(Y, L, U) | L < Y \\le U}(z, l, u) \\,\\mathrm{d}\\mu(z)} = \\frac{f_\\theta(y)}{\\int_{(l, u]} f_\\theta(z) \\,\\mathrm{d}\\mu(z)}. \\end{align*}\\] Overall, arrive (conditional) log-likelihood contribution \\(\\log f_\\theta(y) - \\log F_\\theta((l, u])\\) uncensored, truncated observation (1.1). Finally, truncation censoring can occur time, .e., \\(l \\le m < v \\le u\\) either \\(l \\ne -\\infty\\) \\(u \\ne \\infty\\). accordance previous two cases, make assumption \\(Y, (C_1, C_2)\\) \\((L, U)\\) mutually independent satisfy \\(C_1 < C_2\\) \\(L < U\\). Define \\[\\begin{align*} D = \\mathbf{1}(Y > C_1) + \\mathbf{1}(Y > C_2) \\end{align*}\\] \\[\\begin{align*} (M, V) := \\begin{cases} (L, \\min(U, C_1)), & D = 0, \\\\ (\\max(L, C_1), \\min(C_2, U)), & D = 1, \\\\ (\\max(L,C_2), U), & D = 2. \\end{cases} \\end{align*}\\] simplicity, assume random variables discrete. observation \\((m, v, l, u)\\), one following four cases met \\[\\begin{align*} l < m < v < u, \\quad l = m < v < u, \\quad l < m < v = u, \\quad l = m < v = u. \\end{align*}\\] case \\(l < m < v < u\\), \\[\\begin{align*} \\mathsf{P}(M = m, V = v | L = l, U = u, L < Y \\le U) & = \\frac{\\mathsf{P}(C_1 = m, C_2 = v, Y \\(m, v], L = l, U = u)}{\\mathsf{P}(L = l, U = u, l < Y \\le u)} \\\\ & = \\frac{\\mathsf{P}(C_1 = m, C_2 = v) F_\\theta((m, v])}{F_\\theta((l, u])} \\end{align*}\\] independence assumption. factor front depend \\(\\theta\\) irrelevant (conditional) likelihood contribution. Likewise, case \\(l = m < v < u\\), \\[\\begin{align*} \\mathsf{P}(M = l, V = v | L = l, U = u, L < Y \\le U) & = \\frac{\\mathsf{P}(M = l, V = v, L = l, U = u, l < Y \\le u)}{\\mathsf{P}(L = l, U = u, l < Y \\le u)}. \\end{align*}\\] definition \\((M,V)\\), event numerator disjoint union following two sets: \\[\\begin{align*} & \\{D = 0, C_1 = v, L = l, U = u, l < Y \\le u\\} = \\{C_1 = v, L = l, U = u, Y \\(l, v]\\} \\\\ & \\{D = 1, C_1 \\le l, C_2 = v, L = l, U = u, l < Y \\le u\\} = \\{C_1 \\le l, C_2 = v, L = l, U = u, Y \\(l, v]\\}. \\end{align*}\\] independence, obtain \\[\\begin{align*} \\mathsf{P}(M = l, V = v | L = l, U = u, L < Y \\le U) = \\{\\mathsf{P}(C_1 = v) + \\mathsf{P}(C_1 \\le l, C_2 = v)\\} \\frac{F_\\theta((l, v])}{F_\\theta((l, u])}. \\end{align*}\\] , factor front fraction independent \\(\\theta\\) irrelevant likelihood. two cases \\(l < m < v = u\\) \\(l = m < v = u\\) can treated similarly; cases, likelihood contribution equal \\(F_\\theta((m, v]) /F_\\theta((l, u])\\) times factor depend \\(\\theta\\).","code":""},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"related-packages","dir":"Articles","previous_headings":"1 Introduction","what":"Related packages","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"less general cases non-informative censoring without random truncation fixed truncation, .e., \\((L, U)\\) constant observations, well estimation distribution parameters absence censoring random truncation, number R packages can fit distributions, also supporting weights. Among MASS (Venables Ripley 2002), fitdistrplus (Delignette-Muller Dutang 2015), survival (Therneau 2023), flexsurv (Jackson 2016). Note fixed truncation operation can baked distribution family whose parameters estimated, allowing classical maximum likelihood estimation. Many packages also support classic regression expected values given predictors. Distributional regression packages, gamlss (Stasinopoulos Rigby 2007) deepregression (Rügamer et al. 2023) currently support interval censoring random truncation. See following table overview available features package. Another R6-based interface provided ROOPSD (Robin 2022). reservr builds upon R packages tensorflow (Allaire Tang 2022) keras (Chollet, Allaire, et al. 2017) interface machine learning library TensorFlow (Abadi et al. 2015) perform distributional regression. underlying infrastructure shared distributional regression package deepregression (Rügamer et al. 2023). latter also supports distributional regression, time writing requires complete samples support truncation censoring. remaining parts paper structured follows: Section 2 details core functionality corresponding R package reservr. split definition samples \\(\\mathfrak{}\\) (Section 2.1), definition distribution families (Section 2.2), mathematical definitions available distribution families (Section 2.3), estimation distribution parameters (Section 2.4) distributional regression using tensorflow (Section 2.5). conclusion given Section 3.","code":""},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"pkg-overview","dir":"Articles","previous_headings":"","what":"Usage of reservr","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"package serves two main goals: fitting distributions randomly truncated non-informatively interval censored data performing (deep) distributional regression randomly truncated non-informatively interval censored data. Four main components integrated facilitate analysis goals Methods representing randomly truncated non-informatively interval censored sample \\(\\mathfrak{}\\). Methods specifying parametrized distribution family \\(\\mathcal{F} = \\{F_\\theta | \\theta \\\\Theta\\}\\) fitted. Methods estimating distribution parameters \\(\\theta\\) given sample \\(\\mathfrak{}\\). Methods regression distribution parameters given regression sample \\(\\mathfrak{}_{\\text{reg}}\\), parametrized family \\(\\mathcal{F}\\) general tensorflow network \\(\\mathcal{G} : \\mathfrak{X} \\\\Theta\\) processes \\(X\\) estimate conditional distribution \\(Y | X = x\\) \\(F_{g(x)}\\) \\(g \\\\mathcal G\\). components described one one following sections.","code":""},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"trunc-obs","dir":"Articles","previous_headings":"2 Usage of reservr","what":"Working with samples","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"sample \\(\\mathfrak{} = \\{(m, v, l, u, w)_i\\}\\) represented tibble (package tibble). core function create tibble trunc_obs(). tibble created trunc_obs() consists five columns: x: observed, exact value random variable, referred \\(Y\\) Section 1. Otherwise NA. xmin: Lower interval censoring bound (\\(M\\) Section 1) observation. observation censored, xmin equal x. xmax: Upper interval censoring bound (\\(V\\) Section 1) observation. observation censored, xmax equal x. tmin: Lower truncation bound (\\(L\\) Section 1). observations \\(\\mathtt{x} \\ge \\mathtt{tmin}\\) observed. Can \\(-\\infty\\) indicate lower truncation. tmax: Upper truncation bound (\\(U\\) Section 1). observations \\(\\mathtt{x} \\le \\mathtt{tmax}\\) observed. Can \\(\\infty\\) indicate upper truncation. w: weight associated observation. Defaults \\(1\\). Note , unlike Section 1, lower bounds intervals trunc_obs included, , allow \\(\\mathtt{x} \\ge \\mathtt{tmin}\\) rather \\(\\mathtt{x} > \\mathtt{tmin}\\), unknown variable interest called \\(\\mathtt{x}\\) instead \\(Y\\). continuous random variables, formulas equivalent half-open formulation. discrete random variables, \\(\\mathtt{xmin}\\) \\(\\mathtt{tmin}\\) may appropriately shifted, e.g., replacing \\(\\mathtt{xmin}\\) \\(\\mathtt{xmin}-0.5\\) integer valued variables. following code defines sample size 1 without truncation censoring, realized value \\(1.3\\). Simulating randomly truncated interval censored data standard normal distribution \\(80\\%\\) observations randomly interval censored random uniform truncation \\(L \\sim \\mathrm{Unif}[-2, 0]\\) \\(U \\sim \\mathrm{Unif}[0, 2]\\) can simulated follows Observations look like: total number observations smaller base population \\(1000\\) due truncation: total number censored observations roughly \\(0.8 \\cdot \\mathtt{nrow(obs)}\\). addition trunc_obs() constructor function, functions as_trunc_obs() coercion, truncate_obs() artificially changing truncation bounds, repdel_obs() computing randomly truncated reporting delay observations general insurance claims data containing accident date, reporting delay evaluation date information. latter takes inputs form \\((T_\\text{acc}, D, \\tau)\\) \\(T_{\\text{acc}} < \\tau\\) accident dates corresponding reporting delays \\(D \\ge 0\\) \\(\\tau\\) calendar date observation. returns sample \\((\\mathtt{xmin} = \\mathtt{xmax} = D, \\mathtt{tmin} = 0, \\mathtt{tmax} = \\tau - T_{\\text{acc}}, \\mathtt{w} = 1)\\) suitable estimating reporting delay distribution claim observed reported evaluation date, .e., \\(T_{\\text{acc}} + D \\le \\tau\\). analysis performed using reservr .","code":"trunc_obs(1.3) ## # A data frame: 1 × 6 ## x xmin xmax tmin tmax w ## ## 1 1.3 1.3 1.3 -Inf Inf 1 set.seed(123) N <- 1000L x <- rnorm(N) is_censored <- rbinom(N, size = 1L, prob = 0.8) == 1L c_lower <- runif(sum(is_censored), min = -2.0, max = 0.0) c_upper <- c_lower + runif(sum(is_censored), min = 0, max = 1.0) x_lower <- x x_upper <- x x_lower[is_censored] <- dplyr::case_when( x[is_censored] <= c_lower ~ -Inf, x[is_censored] <= c_upper ~ c_lower, TRUE ~ c_upper ) x_upper[is_censored] <- dplyr::case_when( x[is_censored] <= c_lower ~ c_lower, x[is_censored] <= c_upper ~ c_upper, TRUE ~ Inf ) t_lower <- runif(N, min = -2.0, max = 0.0) t_upper <- runif(N, min = 0.0, max = 2.0) is_observed <- t_lower <= x & x <= t_upper obs <- trunc_obs( xmin = pmax(x_lower, t_lower)[is_observed], xmax = pmin(x_upper, t_upper)[is_observed], tmin = t_lower[is_observed], tmax = t_upper[is_observed] ) obs[8L:12L, ] ## # A tibble: 5 × 6 ## x xmin xmax tmin tmax w ## ## 1 NA -0.479 1.15 -1.93 1.15 1 ## 2 NA -0.177 1.79 -0.210 1.79 1 ## 3 -0.556 -0.556 -0.556 -0.957 0.791 1 ## 4 NA -0.379 0.616 -0.379 0.616 1 ## 5 NA 0.0575 1.45 -0.437 1.45 1 nrow(obs) ## [1] 623 sum(is.na(obs$x)) ## [1] 496"},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"distributions","dir":"Articles","previous_headings":"2 Usage of reservr","what":"Definition of distribution families","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"Distribution families implemented using R6 class system (Chang 2021). inherit class Distribution feature common interface manage fixed free parameters underlying familiy, use basic distribution functions random number generation computation density, cumulative distribution, hazard quantile function, use additional functions supporting parameter estimation procedures computing support presence point mass, compile performance enhanced functions speed basic functions repeated evaluation, provide tensorflow-specific implementations support (deep) distributional regression. Distribution object represents distribution family \\(\\mathcal{F}\\) supported subset real line parameterized fixed finite-dimensional parameter space \\(\\Theta\\). family may singleton, case rather distribution distribution family. reservr provides set basic distribution families, optionally fixed parameters, well transformations distribution families take one underlying distribution families. time writing, :","code":""},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"parameters","dir":"Articles","previous_headings":"2 Usage of reservr > 2.2 Definition of distribution families","what":"Parameters","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"Parameters distribution families can either fixed constant value, free. Free parameters (placeholders) estimated data whereas fixed parameters held constant. Distribution methods argument with_params provide values free parameters need fully specified parameters work. example, generating samples distribution possible fully parameterized using fixed parameters with_params argument Distribution$sample(). now defined dist normal distribution family standard deviation \\(1\\) free mean. Since parameters required normal distribution fixed, dist$sample() error provided mean parameter. with_params argument can used provide free parameters override fixed parameters, necessary. two observations drawn standard normal normal distribution mean zero standard deviation \\(2\\), respectively. Since chosen seed identical, second sample exactly double first sample. Whenever output length greater one, taking one sample, with_params can optionally contain individual parameters entry. three observations drawn \\(\\mathcal{N}(\\mu = 0, \\sigma = 0.5)\\), \\(\\mathcal{N}(\\mu = 1, \\sigma = 0.5)\\) \\(\\mathcal{N}(\\mu = 2, \\sigma = 0.5)\\), respectively. Distributions set fields methods related managing parameters: active binding default_params gets sets list parameters fixed values, NULL represents free parameter. Component families included Distribution objects. get_params() gets list parameters fixed values, traversing component distribution families. get_placeholders() gets list free parameters NULL values. active binding param_bounds gets sets domain regular family parameters Interval object. Setting bound via param_bounds active binding allows restricting natural parameter space family. get_param_bounds() returns bounds free parameters list Intervals, traversing component distribution families. get_param_constraint() returns NULL function evaluates constraints parameter set. function must return vector constraint values (need equal \\(0\\) valid parameters) list elements constraints jacobian. returning list, jacobian element contain jacobian constraint function. Used nloptr::slsqp(heq=) estimation. example mixture families require probs parameters sum \\(1\\) addition box constraint parameter \\([0, 1]\\). Note box constraints handled param_bounds need specified constraint function. get_components() returns list component families transformations mixtures. list empty basic families. example normal family fixed standard deviation \\(\\sigma = 1\\) mixture distribution family two components, one specified normal distribution family:","code":"dist <- dist_normal(sd = 1.0) dist$sample(1L) ## Error in (function (n, mean = 0, sd = 1) : invalid arguments set.seed(10L) dist$sample(1L, with_params = list(mean = 0.0)) ## [1] 0.01874617 set.seed(10L) dist$sample(1L, with_params = list(mean = 0.0, sd = 2.0)) ## [1] 0.03749234 set.seed(10L) dist$sample(3L, with_params = list(mean = 0.0:2.0, sd = 0.5)) ## [1] 0.009373085 0.907873729 1.314334725 dist <- dist_normal(sd = 1.0) mix <- dist_mixture(dists = list(dist_normal(), NULL)) dist$default_params ## $mean ## NULL ## ## $sd ## [1] 1 mix$default_params ## $dists ## $dists[[1]] ## A NormalDistribution with 2 dof ## ## $dists[[2]] ## NULL ## ## ## $probs ## $probs[[1]] ## NULL ## ## $probs[[2]] ## NULL str(dist$get_placeholders()) ## List of 1 ## $ mean: NULL str(mix$get_placeholders()) ## List of 2 ## $ dists:List of 2 ## ..$ :List of 2 ## .. ..$ mean: NULL ## .. ..$ sd : NULL ## ..$ : NULL ## $ probs:List of 2 ## ..$ : NULL ## ..$ : NULL str(dist$param_bounds) ## List of 2 ## $ mean:Classes 'Interval', 'R6' (-Inf, Inf) ## $ sd :Classes 'Interval', 'R6' (0, Inf) str(mix$param_bounds) ## List of 2 ## $ dists:List of 1 ## ..$ : NULL ## $ probs:List of 1 ## ..$ :Classes 'Interval', 'R6' [0, 1] str(dist$get_param_bounds()) ## List of 1 ## $ mean:Classes 'Interval', 'R6' (-Inf, Inf) str(mix$get_param_bounds()) ## List of 2 ## $ dists:List of 1 ## ..$ :List of 2 ## .. ..$ mean:Classes 'Interval', 'R6' (-Inf, Inf) ## .. ..$ sd :Classes 'Interval', 'R6' (0, Inf) ## $ probs:List of 2 ## ..$ :Classes 'Interval', 'R6' [0, 1] ## ..$ :Classes 'Interval', 'R6' [0, 1] str(dist$get_param_constraints()) ## NULL str(mix$get_param_constraints()) ## function (params) dist$get_components() ## list() mix$get_components() ## [[1]] ## A NormalDistribution with 2 dof ## ## [[2]] ## NULL"},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"basic-distribution-functions","dir":"Articles","previous_headings":"2 Usage of reservr > 2.2 Definition of distribution families","what":"Basic distribution functions","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"basic distribution functions (density, probability, hazard quantile function, well random number generation) provided distribution family. general, argument with_params can used specify missing parameters (placeholders) override fixed distribution parameters. provided parameters vectors length greater 1, must conform input dimension (e.g. length(x) density). case, parameters “vectorized” sense \\(\\)th output element computed using \\(\\)th entry parameter list. density(x, log = FALSE, with_params = list()) computes (log-)density. probability(q, lower.tail = TRUE, log.p = FALSE, with_params = list() computes (log-)cumulative distribution function (log-)survival function. hazard(x, log = FALSE. with_params = list()) computes (log-)hazard function. quantile(p, lower.tail = TRUE, log.p = FALSE, with_params = list()) computes upper lower quantiles. sample(n, with_params = list()) generates random sample size n. (with_params can contain length n vectors case).","code":""},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"additional-functions","dir":"Articles","previous_headings":"2 Usage of reservr > 2.2 Definition of distribution families","what":"Additional functions","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"addition basic functions, several supporting functions useful , e.g., estimation parameters. export_functions(name, with_params = list()) exports {d,p,q,r} functions adhering common R convention distribution functions. get_type() returns one \"continuous\", \"discrete\", \"mixed\" depending whether distribution family density respect Lebesgue measure, counting measure, sum Lebesgue measure one many point measures. is_continuous() is_discrete() testing particular type. has_capability(caps) gives information whether specific implementation provides features described. Possible capabilities \"sample\", \"density\", \"probability\", \"quantile\", \"diff_density\", \"diff_probability\", \"tf_logdensity\", \"tf_logprobability\". require_capability(caps) errors specified capabilities implemented family hand. is_discrete_at(x, with_params = list()) returns logical vector indicating whether distribution point mass x. is_in_support(x, with_params = list()) returns logical vector indicating whether distribution mass x.","code":""},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"performance-enhancements","dir":"Articles","previous_headings":"2 Usage of reservr > 2.2 Definition of distribution families","what":"Performance enhancements","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"working larger data many calls distribution functions, performing fit, can beneficial just--time compile specialized functions avoid overhead dealing generic structure distributions parametrization. Distributions offer set “compiler” functions return simplified, faster, versions basic distribution functions, analytically compute gradients. functions necessarily implemented Distribution classes, automatically used , e.g., fit_dist() useful. input structure param_matrix can obtained flatten_params_matrix(dist$get_placeholders()) dist Distribution object question. compile_density() compiles fast function signature (x, param_matrix, log = FALSE) compute density fixed parameters hard-coded taking free parameters matrix defined layout instead nested list. compile_probability() compiles fast replacement probability signature (q, param_matrix, lower.tail = TRUE, log.p = FALSE). compile_probability_interval() compiles fast function signature (qmin, qmax, param_matrix, log.p = FALSE) computing \\(P(X \\[\\mathtt{qmin}, \\mathtt{qmax}])\\) logarithm efficiently. expression necessary computing truncation probabilities. compile_sample() compiles fast replacement sample signature (n, param_matrix). diff_density(x, log = FALSE, with_params = list()) computes (log-)gradients density function respect free distribution family parameters, useful maximum likelihood estimation. diff_probability(q, lower.tail = TRUE, log.p = FALSE, with_params = list()) computes (log-)gradients cumulative density function respect free distribution family parameters. useful conditional maximum likelihood estimation presence random truncation non-informative interval censoring.","code":"dist <- dist_normal() flatten_params_matrix(dist$get_placeholders()) ## mean sd ## [1,] NA NA denscmp <- dist$compile_density() if (requireNamespace(\"bench\", quietly = TRUE)) { bench::mark( dist$density(-2:2, with_params = list(mean = 0.0, sd = 1.0)), denscmp(-2:2, matrix(c(0.0, 1.0), nrow = 5L, ncol = 2L, byrow = TRUE)), dnorm(-2:2, mean = rep(0.0, 5L), sd = rep(1.0, 5L)) ) } ## # A tibble: 3 × 6 ## expression min median `itr/sec` mem_alloc `gc/sec` ## ## 1 dist$density(-2:2, with_params =… 27.08µs 29.92µs 32435. 0B 16.2 ## 2 denscmp(-2:2, matrix(c(0, 1), nr… 4.1µs 4.79µs 200114. 0B 40.0 ## 3 dnorm(-2:2, mean = rep(0, 5L), s… 1.66µs 1.96µs 478716. 2.58KB 0"},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"tensorflow-interface","dir":"Articles","previous_headings":"2 Usage of reservr > 2.2 Definition of distribution families","what":"tensorflow interface","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"Use distribution families within tensorflow networks requires specialized implementations using tensorflow APIs instead regular R functions. tailored needs maximizing (conditional) likelihoods weighted, censored randomly truncated data. Details working tensorflow can found Section 2.5. tf_compile_params(input, name_prefix = \"\") creates keras layers take input layer transform valid parametrization distribution family. tf_is_discrete_at() returns tensorflow-ready version is_discrete_at(). tf_logdensity() returns tensorflow-ready version compile_density() implied log = TRUE. tf_logprobability() returns tensorflow-ready version pf compile_probability_interval() implied log.p = TRUE. tf_make_constants() creates list constant tensors fixed distribution family parameters.","code":""},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"dist-definitions","dir":"Articles","previous_headings":"2 Usage of reservr","what":"Special families","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"distribution families available reservr tailored algorithms parameter estimation, commonly known. section contains mathematical definitions function families.","code":""},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"dist-mixture","dir":"Articles","previous_headings":"2 Usage of reservr > 2.3 Special families","what":"Mixture distribution families","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"mixture distribution family defined fixed number \\(k\\) component families \\(\\{\\mathcal{F}_i\\}_{= 1}^k\\) via set distributions \\[\\begin{align*} \\mathop{\\mathrm{Mixture}}(\\mathcal{F}_1, \\ldots, \\mathcal{F}_k) & := \\Bigl\\{ F = \\sum_{= 1}^k p_i F_i \\Bigm| F_i \\\\mathcal{F}_i, p_i \\[0, 1], \\sum_{= 1}^k p_i = 1 \\Bigr\\}. \\end{align*}\\]","code":""},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"dist-erlangmix","dir":"Articles","previous_headings":"2 Usage of reservr > 2.3 Special families","what":"Erlang mixture distribution families","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"Erlang mixture distribution family defined number components \\(k\\) mixture Erlang distributions (Gamma distributions integer shape parameter) common scale parameter. \\(\\Gamma_{\\alpha, \\theta}\\) denotes Gamma distribution shape \\(\\alpha\\) scale \\(\\theta\\), erlang mixture family \\(k\\) components can defined follows: \\[\\begin{align*} \\mathop{\\mathrm{ErlangMixture}}(k) := \\Bigl\\{ F = \\sum_{= 1}^k p_i \\Gamma_{\\alpha_i, \\theta} \\Bigm| \\alpha_i \\\\mathbb{N}, \\theta \\(0, \\infty), p_i \\[0, 1], \\sum_{= 1}^k p_i = 1 \\Bigr\\}. \\end{align*}\\] Note \\(k \\\\infty\\), Erlang mixtures dense space distributions \\((0, \\infty)\\) respect weak convergence (Lee Lin 2012), making useful modeling choice general positive continuous distributions. However, tail index Erlang mixture distributions always zero due exponential decay Gamma densities.","code":""},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"dist-blended","dir":"Articles","previous_headings":"2 Usage of reservr > 2.3 Special families","what":"Blended distribution families","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"Blended distribution defined follows: Given two underlying distributions \\(P, Q\\) \\(\\mathbb{R}\\) cdfs \\(F(\\cdot)=P((-\\infty, \\cdot])\\) \\(G(\\cdot)=Q((-\\infty, \\cdot])\\), respectively, parameters \\(\\kappa \\\\mathbb{R}, \\varepsilon \\(0, \\infty), p_1, p_2 \\[0, 1], p_1 + p_2 = 1\\) \\(F(\\kappa) > 0\\) \\(G(\\kappa) < 1\\), define Blended Distribution \\(B = \\mathop{\\mathrm{Blended}}(P, Q; p, \\kappa, \\varepsilon)\\) \\(P\\) \\(Q\\) blending interval \\([\\kappa - \\varepsilon, \\kappa + \\varepsilon]\\) mixture probabilities \\(p\\) via cdf \\(F_B\\): \\[\\begin{align*} p_{\\kappa, \\varepsilon}(x) &= \\begin{cases} x & , x \\(-\\infty, \\kappa-\\varepsilon],\\\\ \\tfrac12 (x + \\kappa - \\varepsilon) + \\tfrac\\varepsilon\\pi \\cos\\Big( \\frac{\\pi (x - \\kappa)}{2 \\varepsilon} \\Big) &, x \\(\\kappa-\\varepsilon , \\kappa+\\varepsilon], \\\\ \\kappa &, x \\(\\kappa +\\varepsilon, \\infty), \\end{cases} \\\\ \\nonumber q_{\\kappa, \\varepsilon}(x) & = \\begin{cases} \\kappa & , x \\(-\\infty, \\kappa-\\varepsilon],\\\\ \\tfrac12 (x + \\kappa + \\varepsilon) - \\tfrac\\varepsilon\\pi \\cos\\Big( \\frac{\\pi (x - \\kappa)}{2 \\varepsilon} \\Big) &, x \\(\\kappa-\\varepsilon , \\kappa+\\varepsilon], \\\\ x &, x \\(\\kappa +\\varepsilon, \\infty), \\end{cases} \\\\ F_B(x) & = p_1 \\frac{F(p_{\\kappa, \\varepsilon}(x))}{F(\\kappa)} + p_2 \\frac{G(q_{\\kappa, \\varepsilon}(x)) - G(\\kappa)}{1 - G(\\kappa)}. \\end{align*}\\] following illustration shows components \\(\\mathrm{Blended}(\\mathcal{N}(\\mu = -1, \\sigma = 1), \\mathrm{Exp}(\\lambda = 1); p = (0.5, 0.5), \\kappa = 0, \\varepsilon = 1)\\) distribution. transformation original component distributions (\\(\\mathcal{N}\\) \\(\\mathrm{Exp}\\)) can illustrated first right- left-truncating \\(\\kappa = 0\\) respectively, applying blending transformations \\(p_{\\kappa, \\varepsilon}\\) \\(q_{\\kappa, \\varepsilon}\\). latter distributions can obtained reservr setting probability weights blended distribution \\(p = (1, 0)\\) \\(p = (0, 1)\\) respectively. Intermediate truncated distributions obtained via trunc_dist(), \\(\\kappa\\) upper lower bound respectively. show resulting density steps, final blended density obtained weighting blended component densities. definition blended distribution leads definition blended distribution family allowing \\(P, Q, \\kappa\\) \\(\\varepsilon\\) vary: Given two families \\(\\mathcal{F}, \\mathcal{G}\\) distributions \\(\\mathbb{R}\\), parameters \\(\\kappa \\\\mathbb{R}, \\varepsilon \\(0, \\infty)\\), define Blended Distribution family family Distributions \\[\\begin{align*} \\mathop{\\mathrm{Blended}}(\\mathcal{F}, \\mathcal{G}; \\kappa, \\varepsilon) & := \\{ \\mathop{\\mathrm{Blended}}(P, Q ; p, \\kappa, \\varepsilon) \\mid P \\\\mathcal{F}, Q \\\\mathcal{G}, p_1, p_2 \\[0, 1], p_1 + p_2 = 1 \\}. \\end{align*}\\] Blended distribution families can generalized number components \\(k\\) letting \\(\\kappa\\) \\(\\varepsilon\\) become vectors dimension \\(k - 1\\) \\(\\kappa_i + \\varepsilon_i \\le \\kappa_{+ 1} - \\varepsilon_{+ 1}\\) \\(= 1, \\ldots, k - 2\\). Compared piecewise distribution families obtained mixture truncated distribution families supports \\((-\\infty, \\kappa]\\) \\([\\kappa, \\infty)\\) commonly used extreme value modelling, blended distribution families exhibit continuous density within blending region \\((\\kappa - \\varepsilon, \\kappa + \\varepsilon)\\). reservr provides implementation via dist_blended(), limited support two component families.","code":"dist1 <- dist_normal(mean = -1.0, sd = 1.0) dist2 <- dist_exponential(rate = 1.0) distb <- dist_blended( dists = list(dist1, dist2), breaks = list(0.0), bandwidths = list(1.0), probs = list(0.5, 0.5) ) distt1 <- dist_trunc(dist1, min = -Inf, max = 0.0) distt2 <- dist_trunc(dist2, min = 0.0, max = Inf) distb1 <- distb$clone() distb1$default_params$probs <- list(1.0, 0.0) distb2 <- distb$clone() distb2$default_params$probs <- list(0.0, 1.0)"},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"dist-bdegp","dir":"Articles","previous_headings":"2 Usage of reservr > 2.3 Special families","what":"The Blended Dirac Erlang Generalized Pareto distribution family","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"Using construction Blended distribution family, can define Blended Dirac Erlang Generalized Pareto (BDEGP) family follows, see . Given parameters \\(n \\\\mathbb{N}, m \\\\mathbb{N}, \\kappa \\\\mathbb{R}\\) \\(\\varepsilon \\(0, \\infty)\\), define Blended Dirac Erlang Generalized Pareto family family distributions \\[\\begin{align*} \\mathop{\\mathrm{BDEGP}}(n, m, \\kappa, \\varepsilon) := & \\mathop{\\mathrm{Mixture}}( \\\\ & \\qquad \\{\\delta_0\\}, \\{\\delta_1\\}, \\ldots, \\{\\delta_{n-1}\\}, \\\\ & \\qquad \\mathop{\\mathrm{Blended}}( \\\\ & \\qquad\\qquad \\mathop{\\mathrm{ErlangMixture}}(m), \\\\ & \\qquad\\qquad \\{ \\mathop{\\mathrm{GPD}}(\\kappa, \\sigma, \\xi) \\mid \\sigma \\(0, \\infty), \\xi \\[0, 1)) \\}; \\\\ & \\qquad\\qquad \\kappa, \\varepsilon \\\\ & \\qquad) \\\\ &), \\end{align*}\\] \\(\\delta_k\\) dirac distribution \\(k\\) \\(\\mathrm{GPD}\\) generalized Pareto distribution. Note constraint tail index \\(\\xi \\[0, 1)\\), guaranteeing finite expectation. distribution family three features making useful modelling general heavy-tailed distributions \\((0, \\infty)\\): maximally flexible lower tail flexible family distributions body flexible tail index due generalized Pareto component","code":""},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"fit-dist","dir":"Articles","previous_headings":"2 Usage of reservr","what":"Methods of estimating distribution parameters","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"section describes functions problem estimating parameter \\(\\theta \\\\Theta\\) given sample \\(\\mathfrak{}\\) parameterized family \\(\\mathcal{F} = \\{F_\\theta \\mid \\theta \\\\Theta\\}\\). Sometimes, conditional log-likelihood (1.2) can directly maximized, yielding estimate \\(\\theta\\). default behavior reservr specialized estimation routine provided family \\(\\mathcal{F}_\\theta\\) defined. Depending whether box constraints, nonlinear constraints constraints parameter space \\(\\Theta\\), different implementations nonlinear optimization algorithms nloptr (Johnson 2007), particular truncated Newton (Dembo Steihaug 1983) unconstrained families, L-BFGS (Liu Nocedal 1989) box-constrained families SLSQP (Kraft 1994) general constrained families employed. addition naive direct optimization approach, families lend specialized estimation algorithms usually show faster convergence due making use special structures parameter space \\(\\Theta\\). Estimating distribution parameters truncated observations handled using generic fit() method. delegates fit_dist(), also generic signature: dist: distribution family fit obs: trunc_obs object, vector observed values start: Starting parameters, list compatible dist$get_placeholders(). time writing specialized algorithms six types families: Blended distribution families Erlang mixture distribution families Generalized pareto distribution families free lower bound u (estimated minimum xmin sample) Mixture distribution families Translated distribution families fixed offset multiplier (transform sample via \\(\\tfrac{\\cdot-\\text{offset}}{\\text{multiplier}}\\) fit component distribution family transformed sample) Uniform distribution families free lower bound min upper bound max (estimated minimum xmin, min, maximum xmax, max, sample) present, start parameter obtained via fit_dist_start() generic. generic implements family specific method generating valid starting values placeholder parameters. notable implementation fit_dist_start.ErlangMixtureDistribution() Erlang mixture distribution families. shape parameters free, different initialization strategies can chosen using additional arguments fit_dist_start(): init = \"shapes\" paired shapes = c(...) manually specifies starting shape parameters \\(\\alpha\\) init = \"fan\" paired spread = d uses \\(\\alpha = (1, 1 + d, \\ldots, 1 + (k - 1) \\cdot d)\\) default \\(d = 1\\) resulting \\(\\alpha = (1, \\ldots, k)\\) init = \"kmeans\" uses 1-dimensional K-means based clustering sample observations cluster corresponds unique shape init = \"cmm\" uses centralized method moments procedure described Re-using dist <- dist_normal(sd = 1.0) generated sample obs, can fit free parameter mean: Using function plot_distributions() can also assess quality fit. , density labelled empirical corresponds kernel density estimate automatic bandwidth selection. follow example fitting \\(\\mathrm{ErlangMixture}(3)\\) distribution family using various initialization strategies. Note , \"kmeans\" \"cmm\" use random number generator internal K-means clustering. necessitates setting constant seed running fit_dist_start() fit() ensure chosen starting parameters calls. noted different initialization methods considerable impact outcome example due discrete nature Erlang mixture distribution shape parameters thus combinatorial difficulty picking optimal shapes \\(\\alpha\\). fit() result Erlang mixture distribution families contains element named \"params_hist\". can populated passing trace = TRUE fit() record parameters ECME steps ECME-based estimation algorithms . element \"iter\" contains number full ECME-Iterations performed.","code":"dist <- dist_normal(sd = 1.0) the_fit <- fit(dist, obs) str(the_fit) ## List of 3 ## $ params:List of 1 ## ..$ mean: num 0.0822 ## $ opt :List of 5 ## ..$ par : Named num 0.0822 ## .. ..- attr(*, \"names\")= chr \"mean\" ## ..$ value : num 341 ## ..$ iter : int 7 ## ..$ convergence: int 1 ## ..$ message : chr \"NLOPT_SUCCESS: Generic success return value.\" ## $ logLik:Class 'logLik' : -341 (df=1) plot_distributions( true = dist, fitted = dist, empirical = dist_empirical(0.5 * (obs$xmin + obs$xmax)), .x = seq(-5, 5, length.out = 201), plots = \"density\", with_params = list( true = list(mean = 0.0, sd = 1.0), fitted = the_fit$params ) ) dist <- dist_erlangmix(list(NULL, NULL, NULL)) params <- list( shapes = list(1L, 4L, 12L), scale = 2.0, probs = list(0.5, 0.3, 0.2) ) set.seed(1234) x <- dist$sample(100L, with_params = params) set.seed(32) init_true <- fit_dist_start(dist, x, init = \"shapes\", shapes = as.numeric(params$shapes)) init_fan <- fit_dist_start(dist, x, init = \"fan\", spread = 3L) init_kmeans <- fit_dist_start(dist, x, init = \"kmeans\") init_cmm <- fit_dist_start(dist, x, init = \"cmm\") rbind( flatten_params(init_true), flatten_params(init_fan), flatten_params(init_kmeans), flatten_params(init_cmm) ) ## shapes[1] shapes[2] shapes[3] scale probs[1] probs[2] probs[3] ## [1,] 1 4 12 1.590800 0.43 0.33 0.24 ## [2,] 1 4 7 2.688103 0.55 0.32 0.13 ## [3,] 1 5 13 1.484960 0.43 0.36 0.21 ## [4,] 2 10 24 1.010531 0.56 0.27 0.17 set.seed(32) str(fit(dist, x, init = \"shapes\", shapes = as.numeric(params$shapes))) ## List of 4 ## $ params :List of 3 ## ..$ probs :List of 3 ## .. ..$ : num 0.43 ## .. ..$ : num 0.33 ## .. ..$ : num 0.24 ## ..$ shapes:List of 3 ## .. ..$ : num 1 ## .. ..$ : num 4 ## .. ..$ : num 13 ## ..$ scale : num 1.59 ## $ params_hist: list() ## $ iter : int 1 ## $ logLik :Class 'logLik' : -290 (df=6) fit(dist, x, init = \"fan\", spread = 3L)$logLik ## 'log Lik.' -292.0026 (df=6) fit(dist, x, init = \"kmeans\")$logLik ## 'log Lik.' -289.2834 (df=6) fit(dist, x, init = \"cmm\")$logLik ## 'log Lik.' -293.1273 (df=6)"},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"tensorflow","dir":"Articles","previous_headings":"2 Usage of reservr","what":"Distributional regression using tensorflow integration","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"maximization problem (1.3) delegated tensorflow, supplies ample stochastic optimization algorithms. Functions reservr necessary create suitable output layer tensorflow maps onto \\(\\Theta\\) provide implementation (negative) log-likelihood (1.3) loss function. two tasks combined tf_compile_model(). function returns object class reservr_keras_model, can used estimation procedure. Given input layers inputs intermediate output layer intermediate_output well family distributions dist, function Compiles loss dist defined (1.3) \\(l(g) = -\\tfrac{1}{\\#(\\mathfrak{}_{\\mathrm{reg}})} \\ell(g|\\mathfrak{}_{\\mathrm{reg}})\\), optionally disabling censoring truncation efficiency. Creates list final output layers mapping intermediate_output onto parameter space \\(\\Theta\\) dist using Distribution$tf_compile_params(). step adds additional degrees freedom overall model, approach described Runs keras3::compile() underlying keras.src.models.model.Model. following example defines linear model homoskedasticity assumption fits using \\(100\\) iterations Adam optimization algorithm (Kingma Ba 2015). First, simulate data \\((Y,X)\\) model defined \\(X \\sim \\mathrm{Unif}(10,20)\\) \\(Y | X =x \\sim \\mathcal{N}(\\mu = 2x, \\sigma = 1)\\). Next, specify distribution family \\(\\mathcal{F} = \\{\\mathcal{N}(\\mu, \\sigma = 1) | \\mu\\\\mathbb R\\}\\), incorporating homoskedasticity assumption. Using keras, define empty neural network, just taking \\(x\\) input performing transformation. , tf_compile_model() adapts input layer free parameter space \\(\\Theta = \\mathbb{R}\\). introduces two parameters function family \\(\\mathcal{G}\\) implies functional relationship \\(\\mu = g(x) := \\theta_1 \\cdot x + \\theta_0\\). Since sample fully observed, disable censoring truncation, leading simplified loss \\[\\begin{align*} l(g) = -\\tfrac{1}{100} \\sum_{x, y} \\log f_{g(x)}(y), \\end{align*}\\] \\(f_\\mu(y)\\) density \\(\\mathcal{N}(\\mu=\\mu, \\sigma=1)\\) evaluated \\(y\\). fit can now performed, modifying parameters (weights) nnet -place. Note argument y fit accepts trunc_obs object. example, vector y silently converted untruncated, uncensored trunc_obs object. fit() returns keras_training_history underlying call fit() keras.src.models.model.Model. training history can plotted, displaying loss epoch (black circles), blue smoothing line. predict() method reservr_keras_model takes input tensors returns predicted distribution parameters list compatible dist$get_placeholders(). can thus extract parameter mean compare OLS fit dataset: Since reservr_keras_model includes underlying keras.src.models.model.Model, parameters can also extracted compared OLS coefficients now discuss complex example involving censoring, using right-censored ovarian dataset bundled survival package (R Core Team 2023). goal predict rate parameter exponential survival time distribution cancer patients given four features \\(X = (\\mathtt{age}, \\mathtt{resid.ds}, \\mathtt{rx}, \\mathtt{ecog.ps})\\) collected study. variables \\(\\mathtt{resid.ds}, \\mathtt{rx}\\) \\(\\mathtt{ecog.ps}\\) indicator variables coded \\(\\{1, 2\\}\\). \\(\\mathtt{age}\\) continuous variable values \\((38, 75)\\). Due different scale \\(\\mathtt{age}\\) variable, useful separate variables order perform normalization. Normalization using keras3::layer_normalization() transforms input variables zero mean unit variance. step necessary categorical features. Next, define input layers shapes, conforming input predictor list dat$x. age normalized concatenated features, stored flags, resulting 4-dimensional representation. add two hidden ReLU-layers \\(5\\) neurons network compile result, adapting 5-dimensional hidden output parameter space \\(\\Theta = (0, \\infty)\\) rate parameter exponential distribution. accomplished using dense layer \\(1\\) neuron \\(\\mathrm{softplus}\\) activation function. stability reasons, default weight initialization optimal. circumvent , estimate global exponential distribution fit observations initialize final layer weights global fit initial prediction network. Finally, can train network visualize predictions. plot expected lifetime \\((\\mathtt{age}, \\mathtt{rx})\\) shows network learned longer expected lifetimes lower \\(\\mathtt{age}\\) treatment group (\\(\\mathtt{rx}\\)) 2. global fit included dashed blue line. Individual predictions observations can also plotted subject level.","code":"set.seed(1431L) keras3::set_random_seed(1432L) dataset <- tibble::tibble( x = runif(100, min = 10, max = 20), y = 2 * x + rnorm(100) ) dist <- dist_normal(sd = 1.0) nnet_input <- keras3::keras_input(shape = 1L, name = \"x_input\") nnet_output <- nnet_input nnet <- tf_compile_model( inputs = list(nnet_input), intermediate_output = nnet_output, dist = dist, optimizer = keras3::optimizer_adam(learning_rate = 0.1), censoring = FALSE, truncation = FALSE ) nnet$dist nnet$model nnet_fit <- fit( nnet, x = dataset$x, y = dataset$y, epochs = 100L, batch_size = 100L, shuffle = FALSE, verbose = FALSE ) # Fix weird behavior of keras3 nnet_fit$params$epochs <- max(nnet_fit$params$epochs, length(nnet_fit$metrics$loss)) plot(nnet_fit) pred_params <- predict(nnet, data = list(keras3::as_tensor(dataset$x, keras3::config_floatx()))) lm_fit <- lm(y ~ x, data = dataset) dataset$y_pred <- pred_params$mean dataset$y_lm <- predict(lm_fit, newdata = dataset, type = \"response\") library(ggplot2) ggplot(dataset, aes(x = x, y = y)) + geom_point() + geom_line(aes(y = y_pred), color = \"blue\") + geom_line(aes(y = y_lm), linetype = 2L, color = \"green\") coef_nnet <- rev(as.numeric(nnet$model$get_weights())) coef_lm <- unname(coef(lm_fit)) str(coef_nnet) str(coef_lm) set.seed(1219L) tensorflow::set_random_seed(1219L) keras3::config_set_floatx(\"float32\") dist <- dist_exponential() ovarian <- survival::ovarian dat <- list( y = trunc_obs( xmin = ovarian$futime, xmax = ifelse(ovarian$fustat == 1, ovarian$futime, Inf) ), x = list( age = keras3::as_tensor(ovarian$age, keras3::config_floatx(), shape = nrow(ovarian)), flags = k_matrix(ovarian[, c(\"resid.ds\", \"rx\", \"ecog.ps\")] - 1.0) ) ) nnet_inputs <- list( keras3::keras_input(shape = 1L, name = \"age\"), keras3::keras_input(shape = 3L, name = \"flags\") ) hidden1 <- keras3::layer_concatenate( keras3::layer_normalization(nnet_inputs[[1L]]), nnet_inputs[[2L]] ) hidden2 <- keras3::layer_dense( hidden1, units = 5L, activation = keras3::activation_relu ) nnet_output <- keras3::layer_dense( hidden2, units = 5L, activation = keras3::activation_relu ) nnet <- tf_compile_model( inputs = nnet_inputs, intermediate_output = nnet_output, dist = dist, optimizer = keras3::optimizer_adam(learning_rate = 0.01), censoring = TRUE, truncation = FALSE ) nnet$model str(predict(nnet, dat$x)) global_fit <- fit(dist, dat$y) tf_initialise_model(nnet, params = global_fit$params, mode = \"zero\") str(predict(nnet, dat$x)) nnet_fit <- fit( nnet, x = dat$x, y = dat$y, epochs = 100L, batch_size = nrow(dat$y), shuffle = FALSE, verbose = FALSE ) nnet_fit$params$epochs <- max(nnet_fit$params$epochs, length(nnet_fit$metrics$loss)) plot(nnet_fit) ovarian$expected_lifetime <- 1.0 / predict(nnet, dat$x)$rate"},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"conclusion","dir":"Articles","previous_headings":"","what":"Conclusion","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"presented reservr, package supports distribution parameter estimation distributional regression using R. tasks supported samples without interval censoring without random truncation, general form truncation typical packages support. package includes facilities (1) description randomly truncated non-informatively interval censored samples, (2) definition distribution families consider, (3) global distribution parameter estimation ..d. assumption sample (4) distributional regression - employing tensorflow package flexibility speed.","code":""},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"acknowledgements","dir":"Articles","previous_headings":"","what":"Acknowledgements","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"Author like thank Axel Bücher proofreading valuable comments earlier version article.","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/articles/tensorflow.html","id":"overview","dir":"Articles","previous_headings":"","what":"Overview","title":"TensorFlow Integration","text":"reservr capable fitting distributions censored truncated observations, directly allow modelling influence exogenous variables observed alongside primary outcome. integration TensorFlow comes . TensorFlow integration allows fit neural network simultaneously parameters distribution taking exogenous variables account. reservr accepts partial tensorflow networks yield single arbitrary-dimension rank 2 tensor (e.g. dense layer) output can connect suitable layers intermediate output complete network predicts parameters pre-specified distribution family. also dynamically compiles suitable conditional likelihood based loss, depending type problem (censoring, truncation), can optimized using keras3::fit implementation --box. means full flexibility respect callbacks, optimizers, mini-batching, etc.","code":"library(reservr) library(tensorflow) library(keras3) #> #> Attaching package: 'keras3' #> The following objects are masked from 'package:tensorflow': #> #> set_random_seed, shape library(tibble) library(ggplot2)"},{"path":"https://ashesitr.github.io/reservr/articles/tensorflow.html","id":"a-simple-linear-model","dir":"Articles","previous_headings":"Overview","what":"A simple linear model","title":"TensorFlow Integration","text":"following example show code necessary fit simple model assumptions OLS data. true relationship use \\(y = 2 x + \\epsilon\\) \\(\\epsilon \\sim \\mathcal{N}(0, 1)\\). use censoring truncation.","code":"if (reticulate::py_module_available(\"keras\")) { set.seed(1431L) tensorflow::set_random_seed(1432L) dataset <- tibble( x = runif(100, min = 10, max = 20), y = 2 * x + rnorm(100) ) print(qplot(x, y, data = dataset)) # Specify distributional assumption of OLS: dist <- dist_normal(sd = 1.0) # OLS assumption: homoskedasticity # Optional: Compute a global fit global_fit <- fit(dist, dataset$y) # Define a neural network nnet_input <- layer_input(shape = 1L, name = \"x_input\") # in practice, this would be deeper nnet_output <- nnet_input optimizer <- optimizer_adam(learning_rate = 0.1) nnet <- tf_compile_model( inputs = list(nnet_input), intermediate_output = nnet_output, dist = dist, optimizer = optimizer, censoring = FALSE, # Turn off unnecessary features for this problem truncation = FALSE ) nnet_fit <- fit(nnet, x = dataset$x, y = dataset$y, epochs = 100L, batch_size = 100L, shuffle = FALSE) # Fix weird behavior of keras3 nnet_fit$params$epochs <- max(nnet_fit$params$epochs, length(nnet_fit$metrics$loss)) print(plot(nnet_fit)) pred_params <- predict(nnet, data = list(as_tensor(dataset$x, config_floatx()))) lm_fit <- lm(y ~ x, data = dataset) dataset$y_pred <- pred_params$mean dataset$y_lm <- predict(lm_fit, newdata = dataset, type = \"response\") p <- ggplot(dataset, aes(x = x, y = y)) %+% geom_point() %+% geom_line(aes(y = y_pred)) %+% geom_line(aes(y = y_lm), linetype = 2L) print(p) coef_nnet <- rev(as.numeric(nnet$model$get_weights())) coef_lm <- coef(lm_fit) print(coef_nnet) print(coef_lm) } #> Warning: `qplot()` was deprecated in ggplot2 3.4.0. #> This warning is displayed once every 8 hours. #> Call `lifecycle::last_lifecycle_warnings()` to see where this warning was #> generated. #> Epoch 0/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 483ms/step - loss: 56.18431/1 ━━━━━━━━━━━━━━━━━━━━ 0s 487ms/step - loss: 56.1843 #> Epoch 1/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 46.63251/1 ━━━━━━━━━━━━━━━━━━━━ 0s 14ms/step - loss: 46.6325 #> Epoch 2/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 39.75831/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 39.7583 #> Epoch 3/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 35.50141/1 ━━━━━━━━━━━━━━━━━━━━ 0s 14ms/step - loss: 35.5014 #> Epoch 4/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.62621/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 33.6262 #> Epoch 5/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.64411/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 33.6441 #> Epoch 6/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 34.80991/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 34.8099 #> Epoch 7/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 36.28141/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 36.2814 #> Epoch 8/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 37.38771/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 37.3877 #> Epoch 9/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 37.80121/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 37.8012 #> Epoch 10/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 37.51511/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 37.5151 #> Epoch 11/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 36.71811/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 36.7181 #> Epoch 12/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 35.67251/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 35.6725 #> Epoch 13/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 34.63281/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 34.6328 #> Epoch 14/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.79741/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 33.7974 #> Epoch 15/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.28291/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 33.2829 #> Epoch 16/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.11521/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 33.1152 #> Epoch 17/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.23721/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.2372 #> Epoch 18/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.53231/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 33.5323 #> Epoch 19/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.86181/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 33.8618 #> Epoch 20/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 34.10381/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 34.1038 #> Epoch 21/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 34.18291/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 34.1829 #> Epoch 22/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 34.08131/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 34.0813 #> Epoch 23/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.83221/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 33.8322 #> Epoch 24/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.50171/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 33.5017 #> Epoch 25/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.16641/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 33.1664 #> Epoch 26/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.89201/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.8920 #> Epoch 27/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.71911/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.7191 #> Epoch 28/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.65621/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.6562 #> Epoch 29/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.68181/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.6818 #> Epoch 30/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.75441/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.7544 #> Epoch 31/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.82651/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.8265 #> Epoch 32/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.85911/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.8591 #> Epoch 33/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.83111/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.8311 #> Epoch 34/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.74251/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.7425 #> Epoch 35/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.61121/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.6112 #> Epoch 36/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.46491/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.4649 #> Epoch 37/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.33131/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.3313 #> Epoch 38/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.23041/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.2304 #> Epoch 39/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.16951/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.1695 #> Epoch 40/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.14301/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.1430 #> Epoch 41/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.13571/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.1357 #> Epoch 42/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.12951/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.1295 #> Epoch 43/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.10861/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.1086 #> Epoch 44/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.06491/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.0649 #> Epoch 45/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.99891/1 ━━━━━━━━━━━━━━━━━━━━ 0s 14ms/step - loss: 31.9989 #> Epoch 46/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.91821/1 ━━━━━━━━━━━━━━━━━━━━ 0s 14ms/step - loss: 31.9182 #> Epoch 47/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.83421/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.8342 #> Epoch 48/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.75761/1 ━━━━━━━━━━━━━━━━━━━━ 0s 14ms/step - loss: 31.7576 #> Epoch 49/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.69491/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.6949 #> Epoch 50/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.64701/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.6470 #> Epoch 51/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.60981/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.6098 #> Epoch 52/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.57631/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.5763 #> Epoch 53/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.53921/1 ━━━━━━━━━━━━━━━━━━━━ 0s 14ms/step - loss: 31.5392 #> Epoch 54/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 18ms/step - loss: 31.49401/1 ━━━━━━━━━━━━━━━━━━━━ 0s 19ms/step - loss: 31.4940 #> Epoch 55/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.43951/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.4395 #> Epoch 56/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.37821/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.3782 #> Epoch 57/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.31441/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.3144 #> Epoch 58/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.25271/1 ━━━━━━━━━━━━━━━━━━━━ 0s 14ms/step - loss: 31.2527 #> Epoch 59/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.19611/1 ━━━━━━━━━━━━━━━━━━━━ 0s 14ms/step - loss: 31.1961 #> Epoch 60/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.14541/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.1454 #> Epoch 61/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.09891/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.0989 #> Epoch 62/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.05381/1 ━━━━━━━━━━━━━━━━━━━━ 0s 14ms/step - loss: 31.0538 #> Epoch 63/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.00711/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.0071 #> Epoch 64/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.95711/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 30.9571 #> Epoch 65/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.90341/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.9034 #> Epoch 66/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.84721/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 30.8472 #> Epoch 67/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.79051/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.7905 #> Epoch 68/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.73501/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 30.7350 #> Epoch 69/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.68181/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.6818 #> Epoch 70/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.63071/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 30.6307 #> Epoch 71/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.58091/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 30.5809 #> Epoch 72/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.53111/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 30.5311 #> Epoch 73/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.48021/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 30.4802 #> Epoch 74/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.42771/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 30.4277 #> Epoch 75/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.37391/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 30.3739 #> Epoch 76/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.31951/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 30.3195 #> Epoch 77/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.26531/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 30.2653 #> Epoch 78/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.21201/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 30.2120 #> Epoch 79/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.15961/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.1596 #> Epoch 80/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.10781/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 30.1078 #> Epoch 81/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.05621/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.0562 #> Epoch 82/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.00421/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.0042 #> Epoch 83/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.95161/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.9516 #> Epoch 84/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.89851/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 29.8985 #> Epoch 85/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.84501/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 29.8450 #> Epoch 86/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.79171/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 29.7917 #> Epoch 87/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.73871/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 29.7387 #> Epoch 88/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.68611/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 29.6861 #> Epoch 89/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.63381/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 29.6338 #> Epoch 90/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.58161/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 29.5816 #> Epoch 91/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.52921/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 29.5292 #> Epoch 92/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.47661/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 29.4766 #> Epoch 93/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.42371/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.4237 #> Epoch 94/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.37081/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 29.3708 #> Epoch 95/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.31801/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 29.3180 #> Epoch 96/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.26531/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 29.2653 #> Epoch 97/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.21291/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 29.2129 #> Epoch 98/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.16051/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 29.1605 #> Epoch 99/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.10831/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 29.1083 #> Epoch 100/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 29.05601/1 ━━━━━━━━━━━━━━━━━━━━ 0s 14ms/step - loss: 29.0560 #> [1] 4.854740 1.606937 #> (Intercept) x #> 0.5645856 1.9574191"},{"path":"https://ashesitr.github.io/reservr/authors.html","id":null,"dir":"","previous_headings":"","what":"Authors","title":"Authors and Citation","text":"Alexander Rosenstock. Author, maintainer, copyright holder.","code":""},{"path":"https://ashesitr.github.io/reservr/authors.html","id":"citation","dir":"","previous_headings":"","what":"Citation","title":"Authors and Citation","text":"Rosenstock (2024). reservr: Fit Distributions Neural Networks Censored Truncated Data. R package version 0.0.3, https://github.com/AshesITR/reservr, https://ashesitr.github.io/reservr/.","code":"@Manual{, title = {reservr: Fit Distributions and Neural Networks to Censored and Truncated Data}, author = {Alexander Rosenstock}, year = {2024}, note = {R package version 0.0.3, https://github.com/AshesITR/reservr}, url = {https://ashesitr.github.io/reservr/}, }"},{"path":"https://ashesitr.github.io/reservr/index.html","id":"reservr","dir":"","previous_headings":"","what":"Fit Distributions and Neural Networks to Censored and Truncated Data","title":"Fit Distributions and Neural Networks to Censored and Truncated Data","text":"goal reservr provide flexible interface specifying distributions fitting (randomly) truncated possibly interval-censored data. provides custom fitting algorithms fit distributions ..d. samples well dynnamic TensorFlow integration allow training neural networks arbitrary output distributions. latter can used include explanatory variables distributional fits. Reservr also provides tools relevant working core functionality actuarial setting, namely functions prob_report() truncate_claims(), make assumptions type random truncation applied data. Please refer vignettes distributions.Rmd tensorflow.Rmd detailed introductions.","code":""},{"path":"https://ashesitr.github.io/reservr/index.html","id":"installation","dir":"","previous_headings":"","what":"Installation","title":"Fit Distributions and Neural Networks to Censored and Truncated Data","text":"reservr yet CRAN. can install latest development version reservr via can install released version reservr CRAN : want use reservrs features, make sure also install tensorflow.","code":"devtools::install_github(\"AshesITR/reservr\") install.packages(\"reservr\")"},{"path":"https://ashesitr.github.io/reservr/index.html","id":"example","dir":"","previous_headings":"","what":"Example","title":"Fit Distributions and Neural Networks to Censored and Truncated Data","text":"basic example shows fit normal distribution randomly truncated censored data.","code":"library(reservr) set.seed(123) mu <- 0 sigma <- 1 N <- 1000 p_cens <- 0.8 x <- rnorm(N, mean = mu, sd = sigma) is_censored <- rbinom(N, size = 1L, prob = p_cens) == 1L x_lower <- x x_lower[is_censored] <- x[is_censored] - runif(sum(is_censored), min = 0, max = 0.5) x_upper <- x x_upper[is_censored] <- x[is_censored] + runif(sum(is_censored), min = 0, max = 0.5) t_lower <- runif(N, min = -2, max = 0) t_upper <- runif(N, min = 0, max = 2) is_observed <- t_lower <= x & x <= t_upper obs <- trunc_obs( xmin = pmax(x_lower, t_lower)[is_observed], xmax = pmin(x_upper, t_upper)[is_observed], tmin = t_lower[is_observed], tmax = t_upper[is_observed] ) # Summary of the simulation cat(sprintf( \"simulated samples: %d\\nobserved samples: %d\\ncensored samples: %d\\n\", N, nrow(obs), sum(is.na(obs$x)) )) # Define outcome distribution and perform fit to truncated and (partially) censored sample dist <- dist_normal() the_fit <- fit(dist, obs) # Visualize resulting parameters and show a kernel density estimate of the samples. # We replace interval-censored samples with their midpoint for the kernel density estimate. plot_distributions( true = dist, fitted = dist, empirical = dist_empirical(0.5 * (obs$xmin + obs$xmax)), .x = seq(-5, 5, length.out = 201), plots = \"density\", with_params = list( true = list(mean = mu, sd = sigma), fitted = the_fit$params ) )"},{"path":"https://ashesitr.github.io/reservr/index.html","id":"code-of-conduct","dir":"","previous_headings":"","what":"Code of Conduct","title":"Fit Distributions and Neural Networks to Censored and Truncated Data","text":"Please note reservr project released Contributor Code Conduct. contributing project, agree abide terms.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":null,"dir":"Reference","previous_headings":"","what":"Base class for Distributions — Distribution","title":"Base class for Distributions — Distribution","text":"Represents modifiable Distribution family","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"active-bindings","dir":"Reference","previous_headings":"","what":"Active bindings","title":"Base class for Distributions — Distribution","text":"default_params Get set (non-recursive) default parameters Distribution param_bounds Get set (non-recursive) parameter bounds (box constraints) Distribution","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Base class for Distributions — Distribution","text":"Distribution$new() Distribution$sample() Distribution$density() Distribution$tf_logdensity() Distribution$probability() Distribution$tf_logprobability() Distribution$quantile() Distribution$hazard() Distribution$diff_density() Distribution$diff_probability() Distribution$is_in_support() Distribution$is_discrete_at() Distribution$tf_is_discrete_at() Distribution$has_capability() Distribution$get_type() Distribution$get_components() Distribution$is_discrete() Distribution$is_continuous() Distribution$require_capability() Distribution$get_dof() Distribution$get_placeholders() Distribution$get_params() Distribution$tf_make_constants() Distribution$tf_compile_params() Distribution$get_param_bounds() Distribution$get_param_constraints() Distribution$export_functions() Distribution$clone()","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$new(type, caps, params, name, default_params)"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Base class for Distributions — Distribution","text":"type Type distribution. string constant default implementation. Distributions non-constant type must override get_type() function. caps Character vector capabilities fuel default implementations has_capability() require_capability(). Distributions dynamic capabilities must override has_capability() function. params Initial parameter bounds structure, backing param_bounds active binding (usually list intervals). name Name Distribution class. CamelCase end \"Distribution\". default_params Initial fixed parameters backing default_params active binding (usually list numeric / NULLs).","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Construct Distribution instance Used internally dist_* functions.","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$sample(n, with_params = list())"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"arguments-1","dir":"Reference","previous_headings":"","what":"Arguments","title":"Base class for Distributions — Distribution","text":"n number samples draw. with_params Distribution parameters use. parameter value can also numeric vector length n. case -th sample use -th parameters.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-1","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Sample Distribution","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"length n vector ..d. random samples Distribution specified parameters.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_exponential(rate = 2.0)$sample(10)"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-2","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$density(x, log = FALSE, with_params = list())"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"arguments-2","dir":"Reference","previous_headings":"","what":"Arguments","title":"Base class for Distributions — Distribution","text":"x Vector points evaluate density . log Flag. TRUE, return log-density instead. with_params Distribution parameters use. parameter value can also numeric vector length length(x). case, -th density point use -th parameters.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-2","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Density Distribution","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-1","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"numeric vector (log-)densities","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-1","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_exponential()$density(c(1.0, 2.0), with_params = list(rate = 2.0))"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-3","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$tf_logdensity()"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-3","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Compile TensorFlow function log-density evaluation","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-2","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"tf_function taking arguments x args returning log-density Distribution evaluated x parameters args.","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-4","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$probability( q, lower.tail = TRUE, log.p = FALSE, with_params = list() )"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"arguments-3","dir":"Reference","previous_headings":"","what":"Arguments","title":"Base class for Distributions — Distribution","text":"q Vector points evaluate probability function . lower.tail TRUE, return P(X <= q). Otherwise return P(X > q). log.p TRUE, probabilities returned log(p). with_params Distribution parameters use. parameter value can also numeric vector length length(q). case, -th probability point use -th parameters.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-4","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Cumulative probability Distribution","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-3","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"numeric vector (log-)probabilities","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-2","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_exponential()$probability( c(1.0, 2.0), with_params = list(rate = 2.0) )"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-5","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$tf_logprobability()"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-5","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Compile TensorFlow function log-probability evaluation","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-4","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"tf_function taking arguments qmin, qmax args returning log-probability Distribution evaluated closed interval [qmin, qmax] parameters args.","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-6","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$quantile( p, lower.tail = TRUE, log.p = FALSE, with_params = list() )"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"arguments-4","dir":"Reference","previous_headings":"","what":"Arguments","title":"Base class for Distributions — Distribution","text":"p Vector probabilities. lower.tail TRUE, return P(X <= q). Otherwise return P(X > q). log.p TRUE, probabilities returned log(p). with_params Distribution parameters use. parameter value can also numeric vector length length(p). case, -th quantile use -th parameters.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-6","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Quantile function Distribution","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-5","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"numeric vector quantiles","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-3","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_exponential()$quantile(c(0.1, 0.5), with_params = list(rate = 2.0))"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-7","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$hazard(x, log = FALSE, with_params = list())"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"arguments-5","dir":"Reference","previous_headings":"","what":"Arguments","title":"Base class for Distributions — Distribution","text":"x Vector points. log Flag. TRUE, return log-hazard instead. with_params Distribution parameters use. parameter value can also numeric vector length length(x). case, -th hazard point use -th parameters.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-7","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Hazard function Distribution","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-6","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"numeric vector (log-)hazards","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-4","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_exponential(rate = 2.0)$hazard(c(1.0, 2.0))"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-8","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$diff_density(x, log = FALSE, with_params = list())"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"arguments-6","dir":"Reference","previous_headings":"","what":"Arguments","title":"Base class for Distributions — Distribution","text":"x Vector points. log Flag. TRUE, return gradient log-density instead. with_params Distribution parameters use. parameter value can also numeric vector length length(x). case, -th density point use -th parameters.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-8","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Gradients density Distribution","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-7","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"list structure containing (log-)density gradients free parameters Distribution evaluated x.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-5","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_exponential()$diff_density( c(1.0, 2.0), with_params = list(rate = 2.0) )"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-9","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$diff_probability( q, lower.tail = TRUE, log.p = FALSE, with_params = list() )"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"arguments-7","dir":"Reference","previous_headings":"","what":"Arguments","title":"Base class for Distributions — Distribution","text":"q Vector points evaluate probability function . lower.tail TRUE, return P(X <= q). Otherwise return P(X > q). log.p TRUE, probabilities returned log(p). with_params Distribution parameters use. parameter value can also numeric vector length length(q). case, -th probability point use -th parameters.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-9","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Gradients cumulative probability Distribution","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-8","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"list structure containing cumulative (log-)probability gradients free parameters Distribution evaluated q.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-6","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_exponential()$diff_probability( c(1.0, 2.0), with_params = list(rate = 2.0) )"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-10","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$is_in_support(x, with_params = list())"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"arguments-8","dir":"Reference","previous_headings":"","what":"Arguments","title":"Base class for Distributions — Distribution","text":"x Vector points with_params Distribution parameters use. parameter value can also numeric vector length length(x). case, -th point use -th parameters.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-10","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Determine value support Distribution","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-9","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"logical vector length x indicating whether x part support distribution given parameters.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-7","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_exponential(rate = 1.0)$is_in_support(c(-1.0, 0.0, 1.0))"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-11","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$is_discrete_at(x, with_params = list())"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"arguments-9","dir":"Reference","previous_headings":"","what":"Arguments","title":"Base class for Distributions — Distribution","text":"x Vector points with_params Distribution parameters use. parameter value can also numeric vector length length(x). case, -th point use -th parameters.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-11","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Determine value positive probability","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-10","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"logical vector length x indicating whether positive probability mass x given Distribution parameters.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-8","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_dirac(point = 0.0)$is_discrete_at(c(0.0, 1.0))"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-12","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$tf_is_discrete_at()"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-12","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Compile TensorFlow function discrete support checking","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-11","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"tf_function taking arguments x args returning whether Distribution point mass x given parameters args.","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-13","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$has_capability(caps)"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"arguments-10","dir":"Reference","previous_headings":"","what":"Arguments","title":"Base class for Distributions — Distribution","text":"caps Character vector capabilities","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-13","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Check capability present","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-12","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"logical vector length caps.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-9","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_exponential()$has_capability(\"density\")"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-14","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$get_type()"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-14","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Get type Distribution. Type can one discrete, continuous mixed.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-13","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"string representing type Distribution.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-10","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_exponential()$get_type() dist_dirac()$get_type() dist_mixture(list(dist_dirac(), dist_exponential()))$get_type() dist_mixture(list(dist_dirac(), dist_binomial()))$get_type()"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-15","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$get_components()"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-15","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Get component Distributions transformed Distribution.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-14","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"possibly empty list Distributions","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-11","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_trunc(dist_exponential())$get_components() dist_dirac()$get_components() dist_mixture(list(dist_exponential(), dist_gamma()))$get_components()"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-16","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$is_discrete()"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-16","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Check Distribution discrete, .e. density respect counting measure.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-15","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"TRUE Distribution discrete, FALSE otherwise. Note mixed distributions discrete can point masses.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-12","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_exponential()$is_discrete() dist_dirac()$is_discrete()"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-17","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$is_continuous()"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-17","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Check Distribution continuous, .e. density respect Lebesgue measure.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-16","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"TRUE Distribution continuous, FALSE otherwise. Note mixed distributions continuous.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-13","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_exponential()$is_continuous() dist_dirac()$is_continuous()"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-18","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$require_capability( caps, fun_name = paste0(sys.call(-1)[[1]], \"()\") )"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"arguments-11","dir":"Reference","previous_headings":"","what":"Arguments","title":"Base class for Distributions — Distribution","text":"caps Character vector Capabilities require fun_name Frienly text use generating error message case failure.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-18","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Ensure Distribution required capabilities. throw error capability missing.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-17","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"Invisibly TRUE.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-14","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_exponential()$require_capability(\"diff_density\")"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-19","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$get_dof()"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-19","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Get number degrees freedom Distribution family. parameters without fixed default considered free.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-18","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"integer representing degrees freedom suitable e.g. AIC calculations.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-15","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_exponential()$get_dof() dist_exponential(rate = 1.0)$get_dof()"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-20","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$get_placeholders()"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-20","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Get Placeholders Distribution family. Returns list free parameters family. values NULL. Distribution Distributions parameters, placeholders computed recursively.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-19","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"named list containing combination (named unnamed) lists NULLs.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-16","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_exponential()$get_placeholders() dist_mixture(list(dist_dirac(), dist_exponential()))$get_placeholders()"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-21","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$get_params(with_params = list())"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"arguments-12","dir":"Reference","previous_headings":"","what":"Arguments","title":"Base class for Distributions — Distribution","text":"with_params Optional parameter overrides structure dist$get_params(). Given Parameter values expected length 1.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-21","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Get full list parameters, possibly including placeholders.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-20","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"list representing (recursive) parameter structure Distribution values specified parameters NULL free parameters missing Distributions parameters with_params.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-17","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_mixture(list(dist_dirac(), dist_exponential()))$get_params( with_params = list(probs = list(0.5, 0.5)) )"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-22","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$tf_make_constants(with_params = list())"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"arguments-13","dir":"Reference","previous_headings":"","what":"Arguments","title":"Base class for Distributions — Distribution","text":"with_params Optional parameter overrides structure dist$tf_make_constants(). Given Parameter values expected length 1.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-22","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Get list constant TensorFlow parameters","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-21","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"list representing (recursive) constant parameters Distribution values sprecified parameters. constant TensorFlow Tensor dtype floatx.","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-23","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$tf_compile_params(input, name_prefix = \"\")"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"arguments-14","dir":"Reference","previous_headings":"","what":"Arguments","title":"Base class for Distributions — Distribution","text":"input keras layer bind outputs name_prefix Prefix use layer names","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-23","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Compile distribution parameters tensorflow outputs","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-22","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"list two elements outputs flat list keras output layers, one parameter. output_inflater function taking keras output layers transforming list structure suitable passing loss function returned tf_compile_model()","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-24","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$get_param_bounds()"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-24","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Get Interval bounds Distribution parameters","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-23","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"list representing free (recursive) parameter structure Distribution Interval objects values representing bounds respective free parameters.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-18","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_mixture( list(dist_dirac(), dist_exponential()), probs = list(0.5, 0.5) )$get_param_bounds() dist_mixture( list(dist_dirac(), dist_exponential()) )$get_param_bounds() dist_genpareto()$get_param_bounds() dist_genpareto1()$get_param_bounds()"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-25","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$get_param_constraints()"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-25","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Get additional (non-linear) equality constraints Distribution parameters","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-24","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"NULL box constraints specified dist$get_param_bounds() sufficient, function taking full Distribution parameters returning either numeric vector (must 0 valid parameter combinations) list elements constraints: numeric vector constraints jacobian: Jacobi matrix constraints respect parameters","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-19","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_mixture( list(dist_dirac(), dist_exponential()) )$get_param_constraints()"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-26","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$export_functions( name, envir = parent.frame(), with_params = list() )"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"arguments-15","dir":"Reference","previous_headings":"","what":"Arguments","title":"Base class for Distributions — Distribution","text":"name common suffix exported functions envir Environment export functions with_params Optional list parameters use default values exported functions","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-26","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Export sampling, density, probability quantile functions plain R functions Creates new functions envir named {r,d,p,q} implement dist$sample, dist$density, dist$probability dist$quantile plain functions default arguments specified with_params fixed parameters. resulting functions signatures taking parameters separate arguments.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-25","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"Invisibly NULL.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-20","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"tmp_env <- new.env(parent = globalenv()) dist_exponential()$export_functions( name = \"exp\", envir = tmp_env, with_params = list(rate = 2.0) ) evalq( fitdistrplus::fitdist(rexp(100), \"exp\"), envir = tmp_env )"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Base class for Distributions — Distribution","text":"objects class cloneable method.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-27","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$clone(deep = FALSE)"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"arguments-16","dir":"Reference","previous_headings":"","what":"Arguments","title":"Base class for Distributions — Distribution","text":"deep Whether make deep clone.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"# Example for param_bounds: # Create an Exponential Distribution with rate constrained to (0, 2) # instead of (0, Inf) my_exp <- dist_exponential() my_exp$param_bounds$rate <- interval(c(0, 2)) my_exp$get_param_bounds() #> $rate #> (0, 2) #> fit_dist(my_exp, rexp(100, rate = 3), start = list(rate = 1))$params$rate #> [1] 2 ## ------------------------------------------------ ## Method `Distribution$sample` ## ------------------------------------------------ dist_exponential(rate = 2.0)$sample(10) #> [1] 0.13286615 0.01112249 0.07288815 1.51862540 0.08488557 0.42304793 #> [7] 0.10249917 0.08983756 0.25915838 0.33607531 ## ------------------------------------------------ ## Method `Distribution$density` ## ------------------------------------------------ dist_exponential()$density(c(1.0, 2.0), with_params = list(rate = 2.0)) #> [1] 0.27067057 0.03663128 ## ------------------------------------------------ ## Method `Distribution$probability` ## ------------------------------------------------ dist_exponential()$probability( c(1.0, 2.0), with_params = list(rate = 2.0) ) #> [1] 0.8646647 0.9816844 ## ------------------------------------------------ ## Method `Distribution$quantile` ## ------------------------------------------------ dist_exponential()$quantile(c(0.1, 0.5), with_params = list(rate = 2.0)) #> [1] 0.05268026 0.34657359 ## ------------------------------------------------ ## Method `Distribution$hazard` ## ------------------------------------------------ dist_exponential(rate = 2.0)$hazard(c(1.0, 2.0)) #> [1] 2 2 ## ------------------------------------------------ ## Method `Distribution$diff_density` ## ------------------------------------------------ dist_exponential()$diff_density( c(1.0, 2.0), with_params = list(rate = 2.0) ) #> $rate #> [1] -0.13533528 -0.05494692 #> ## ------------------------------------------------ ## Method `Distribution$diff_probability` ## ------------------------------------------------ dist_exponential()$diff_probability( c(1.0, 2.0), with_params = list(rate = 2.0) ) #> $rate #> [1] 0.13533528 0.03663128 #> ## ------------------------------------------------ ## Method `Distribution$is_in_support` ## ------------------------------------------------ dist_exponential(rate = 1.0)$is_in_support(c(-1.0, 0.0, 1.0)) #> [1] FALSE FALSE TRUE ## ------------------------------------------------ ## Method `Distribution$is_discrete_at` ## ------------------------------------------------ dist_dirac(point = 0.0)$is_discrete_at(c(0.0, 1.0)) #> [1] TRUE FALSE ## ------------------------------------------------ ## Method `Distribution$has_capability` ## ------------------------------------------------ dist_exponential()$has_capability(\"density\") #> [1] TRUE ## ------------------------------------------------ ## Method `Distribution$get_type` ## ------------------------------------------------ dist_exponential()$get_type() #> [1] \"continuous\" dist_dirac()$get_type() #> [1] \"discrete\" dist_mixture(list(dist_dirac(), dist_exponential()))$get_type() #> [1] \"mixed\" dist_mixture(list(dist_dirac(), dist_binomial()))$get_type() #> [1] \"discrete\" ## ------------------------------------------------ ## Method `Distribution$get_components` ## ------------------------------------------------ dist_trunc(dist_exponential())$get_components() #> [[1]] #> An ExponentialDistribution with 1 dof #> dist_dirac()$get_components() #> list() dist_mixture(list(dist_exponential(), dist_gamma()))$get_components() #> [[1]] #> An ExponentialDistribution with 1 dof #> #> [[2]] #> A GammaDistribution with 2 dof #> ## ------------------------------------------------ ## Method `Distribution$is_discrete` ## ------------------------------------------------ dist_exponential()$is_discrete() #> [1] FALSE dist_dirac()$is_discrete() #> [1] TRUE ## ------------------------------------------------ ## Method `Distribution$is_continuous` ## ------------------------------------------------ dist_exponential()$is_continuous() #> [1] TRUE dist_dirac()$is_continuous() #> [1] FALSE ## ------------------------------------------------ ## Method `Distribution$require_capability` ## ------------------------------------------------ dist_exponential()$require_capability(\"diff_density\") ## ------------------------------------------------ ## Method `Distribution$get_dof` ## ------------------------------------------------ dist_exponential()$get_dof() #> [1] 1 dist_exponential(rate = 1.0)$get_dof() #> [1] 0 ## ------------------------------------------------ ## Method `Distribution$get_placeholders` ## ------------------------------------------------ dist_exponential()$get_placeholders() #> $rate #> NULL #> dist_mixture(list(dist_dirac(), dist_exponential()))$get_placeholders() #> $dists #> $dists[[1]] #> $dists[[1]]$point #> NULL #> #> #> $dists[[2]] #> $dists[[2]]$rate #> NULL #> #> #> #> $probs #> $probs[[1]] #> NULL #> #> $probs[[2]] #> NULL #> #> ## ------------------------------------------------ ## Method `Distribution$get_params` ## ------------------------------------------------ dist_mixture(list(dist_dirac(), dist_exponential()))$get_params( with_params = list(probs = list(0.5, 0.5)) ) #> $dists #> $dists[[1]] #> $dists[[1]]$point #> NULL #> #> #> $dists[[2]] #> $dists[[2]]$rate #> NULL #> #> #> #> $probs #> $probs[[1]] #> [1] 0.5 #> #> $probs[[2]] #> [1] 0.5 #> #> ## ------------------------------------------------ ## Method `Distribution$get_param_bounds` ## ------------------------------------------------ dist_mixture( list(dist_dirac(), dist_exponential()), probs = list(0.5, 0.5) )$get_param_bounds() #> $dists #> $dists[[1]] #> $dists[[1]]$point #> (-Inf, Inf) #> #> #> $dists[[2]] #> $dists[[2]]$rate #> (0, Inf) #> #> #> #> $probs #> list() #> dist_mixture( list(dist_dirac(), dist_exponential()) )$get_param_bounds() #> $dists #> $dists[[1]] #> $dists[[1]]$point #> (-Inf, Inf) #> #> #> $dists[[2]] #> $dists[[2]]$rate #> (0, Inf) #> #> #> #> $probs #> $probs[[1]] #> [0, 1] #> #> $probs[[2]] #> [0, 1] #> #> dist_genpareto()$get_param_bounds() #> $u #> (-Inf, Inf) #> #> $sigmau #> (0, Inf) #> #> $xi #> (-Inf, Inf) #> dist_genpareto1()$get_param_bounds() #> $u #> (-Inf, Inf) #> #> $sigmau #> (0, Inf) #> #> $xi #> [0, 1] #> ## ------------------------------------------------ ## Method `Distribution$get_param_constraints` ## ------------------------------------------------ dist_mixture( list(dist_dirac(), dist_exponential()) )$get_param_constraints() #> function (params) #> { #> prob_mat <- do.call(cbind, params$probs) #> nms <- names(flatten_params(params)) #> jac_full <- matrix(0, nrow = nrow(prob_mat), ncol = length(nms)) #> jac_full[, grepl(\"^probs\", nms)] <- 1 #> list(constraints = rowSums(prob_mat) - 1, jacobian = jac_full) #> } #> ## ------------------------------------------------ ## Method `Distribution$export_functions` ## ------------------------------------------------ tmp_env <- new.env(parent = globalenv()) dist_exponential()$export_functions( name = \"exp\", envir = tmp_env, with_params = list(rate = 2.0) ) #> Exported `dexp()`. #> Exported `rexp()`. #> Exported `pexp()`. #> Exported `qexp()`. evalq( fitdistrplus::fitdist(rexp(100), \"exp\"), envir = tmp_env ) #> Fitting of the distribution ' exp ' by maximum likelihood #> Parameters: #> estimate Std. Error #> rate 2.131976 0.2131975"},{"path":"https://ashesitr.github.io/reservr/reference/GenPareto.html","id":null,"dir":"Reference","previous_headings":"","what":"The Generalized Pareto Distribution (GPD) — GenPareto","title":"The Generalized Pareto Distribution (GPD) — GenPareto","text":"functions provide information generalized Pareto distribution threshold u. dgpd gives density, pgpd gives distribution function, qgpd gives quantile function rgpd generates random deviates.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/GenPareto.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"The Generalized Pareto Distribution (GPD) — GenPareto","text":"","code":"rgpd(n = 1L, u = 0, sigmau = 1, xi = 0) dgpd(x, u = 0, sigmau = 1, xi = 0, log = FALSE) pgpd(q, u = 0, sigmau = 1, xi = 0, lower.tail = TRUE, log.p = FALSE) qgpd(p, u = 0, sigmau = 1, xi = 0, lower.tail = TRUE, log.p = FALSE)"},{"path":"https://ashesitr.github.io/reservr/reference/GenPareto.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"The Generalized Pareto Distribution (GPD) — GenPareto","text":"n integer number observations. u threshold parameter (minimum value). sigmau scale parameter (must positive). xi shape parameter x, q vector quantiles. log, log.p logical; TRUE, probabilities/densities p given log(p). lower.tail logical; TRUE (default), probabilities \\(P(X \\le x)\\), otherwise \\(P(X > x)\\). p vector probabilities.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/GenPareto.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"The Generalized Pareto Distribution (GPD) — GenPareto","text":"rgpd generates random deviates. dgpd gives density. pgpd gives distribution function. qgpd gives quantile function.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/GenPareto.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"The Generalized Pareto Distribution (GPD) — GenPareto","text":"u, sigmau xi specified, assume default values 0, 1 0 respectively. generalized Pareto distribution density $$f(x) = 1 / \\sigma_u (1 + \\xi z)^(- 1 / \\xi - 1)$$ \\(z = (x - u) / \\sigma_u\\) \\(f(x) = exp(-z)\\) \\(\\xi\\) 0. support \\(x \\ge u\\) \\(\\xi \\ge 0\\) \\(u \\le x \\le u - \\sigma_u / \\xi\\) \\(\\xi < 0\\). Expected value exists \\(\\xi < 1\\) equal $$E(X) = u + \\sigma_u / (1 - \\xi)$$ k-th moments exist general \\(k\\xi < 1\\).","code":""},{"path":"https://ashesitr.github.io/reservr/reference/GenPareto.html","id":"references","dir":"Reference","previous_headings":"","what":"References","title":"The Generalized Pareto Distribution (GPD) — GenPareto","text":"https://en.wikipedia.org/wiki/Generalized_Pareto_distribution","code":""},{"path":"https://ashesitr.github.io/reservr/reference/GenPareto.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"The Generalized Pareto Distribution (GPD) — GenPareto","text":"","code":"x <- rgpd(1000, u = 1, sigmau = 0.5, xi = 0.1) xx <- seq(-1, 10, 0.01) hist(x, breaks = 100, freq = FALSE, xlim = c(-1, 10)) lines(xx, dgpd(xx, u = 1, sigmau = 0.5, xi = 0.1)) plot(xx, dgpd(xx, u = 1, sigmau = 1, xi = 0), type = \"l\") lines(xx, dgpd(xx, u = 0.5, sigmau = 1, xi = -0.3), col = \"blue\", lwd = 2) lines(xx, dgpd(xx, u = 1.5, sigmau = 1, xi = 0.3), col = \"red\", lwd = 2) plot(xx, dgpd(xx, u = 1, sigmau = 1, xi = 0), type = \"l\") lines(xx, dgpd(xx, u = 1, sigmau = 0.5, xi = 0), col = \"blue\", lwd = 2) lines(xx, dgpd(xx, u = 1, sigmau = 2, xi = 0), col = \"red\", lwd = 2)"},{"path":"https://ashesitr.github.io/reservr/reference/Pareto.html","id":null,"dir":"Reference","previous_headings":"","what":"The Pareto Distribution — Pareto","title":"The Pareto Distribution — Pareto","text":"functions provide information Pareto distribution. dpareto gives density, ppareto gives distribution function, qpareto gives quantile function rpareto generates random deviates.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Pareto.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"The Pareto Distribution — Pareto","text":"","code":"rpareto(n = 1L, shape = 0, scale = 1) dpareto(x, shape = 1, scale = 1, log = FALSE) ppareto(q, shape = 1, scale = 1, lower.tail = TRUE, log.p = FALSE) qpareto(p, shape = 1, scale = 1, lower.tail = TRUE, log.p = FALSE)"},{"path":"https://ashesitr.github.io/reservr/reference/Pareto.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"The Pareto Distribution — Pareto","text":"n integer number observations. shape shape parameter (must positive). scale scale parameter (must positive). x, q vector quantiles. log, log.p logical; TRUE, probabilities/densities p given log(p). lower.tail logical; TRUE (default), probabilities \\(P(X \\le x)\\), otherwise \\(P(X > x)\\). p vector probabilities.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Pareto.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"The Pareto Distribution — Pareto","text":"rpareto generates random deviates. dpareto gives density. ppareto gives distribution function. qpareto gives quantile function.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Pareto.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"The Pareto Distribution — Pareto","text":"shape scale specified, assume default values 1. Pareto distribution scale \\(\\theta\\) shape \\(\\xi\\) density $$f(x) = \\xi \\theta^\\xi / (x + \\theta)^(\\xi + 1)$$ support \\(x \\ge 0\\). Expected value exists \\(\\xi > 1\\) equal $$E(X) = \\theta / (\\xi - 1)$$ k-th moments exist general \\(k < \\xi\\).","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Pareto.html","id":"references","dir":"Reference","previous_headings":"","what":"References","title":"The Pareto Distribution — Pareto","text":"https://en.wikipedia.org/wiki/Pareto_distribution - named Lomax therein.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Pareto.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"The Pareto Distribution — Pareto","text":"","code":"x <- rpareto(1000, shape = 10, scale = 5) xx <- seq(-1, 10, 0.01) hist(x, breaks = 100, freq = FALSE, xlim = c(-1, 10)) lines(xx, dpareto(xx, shape = 10, scale = 5)) plot(xx, dpareto(xx, shape = 10, scale = 5), type = \"l\") lines(xx, dpareto(xx, shape = 3, scale = 5), col = \"red\", lwd = 2) plot(xx, dpareto(xx, shape = 10, scale = 10), type = \"l\") lines(xx, dpareto(xx, shape = 10, scale = 5), col = \"blue\", lwd = 2) lines(xx, dpareto(xx, shape = 10, scale = 20), col = \"red\", lwd = 2)"},{"path":"https://ashesitr.github.io/reservr/reference/as_params.html","id":null,"dir":"Reference","previous_headings":"","what":"Convert TensorFlow tensors to distribution parameters recursively — as_params","title":"Convert TensorFlow tensors to distribution parameters recursively — as_params","text":"Convert TensorFlow tensors distribution parameters recursively","code":""},{"path":"https://ashesitr.github.io/reservr/reference/as_params.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Convert TensorFlow tensors to distribution parameters recursively — as_params","text":"","code":"as_params(x)"},{"path":"https://ashesitr.github.io/reservr/reference/as_params.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Convert TensorFlow tensors to distribution parameters recursively — as_params","text":"x possibly nested list structure tensorflow.tensors","code":""},{"path":"https://ashesitr.github.io/reservr/reference/as_params.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Convert TensorFlow tensors to distribution parameters recursively — as_params","text":"nested list vectors suitable distribution parameters","code":""},{"path":"https://ashesitr.github.io/reservr/reference/as_params.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Convert TensorFlow tensors to distribution parameters recursively — as_params","text":"","code":"if (interactive()) { tf_params <- list( probs = k_matrix(t(c(0.5, 0.3, 0.2))), shapes = k_matrix(t(c(1L, 2L, 3L)), dtype = \"int32\"), scale = keras3::as_tensor(1.0, keras3::config_floatx()) ) params <- as_params(tf_params) dist <- dist_erlangmix(vector(\"list\", 3L)) dist$sample(10L, with_params = params) }"},{"path":"https://ashesitr.github.io/reservr/reference/blended_transition.html","id":null,"dir":"Reference","previous_headings":"","what":"Transition functions for blended distributions — blended_transition","title":"Transition functions for blended distributions — blended_transition","text":"Transition functions blended distributions","code":""},{"path":"https://ashesitr.github.io/reservr/reference/blended_transition.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Transition functions for blended distributions — blended_transition","text":"","code":"blended_transition(x, u, eps, .gradient = FALSE, .extend_na = FALSE) blended_transition_inv(x, u, eps, .component)"},{"path":"https://ashesitr.github.io/reservr/reference/blended_transition.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Transition functions for blended distributions — blended_transition","text":"x Points evaluate u Sorted vector blending thresholds, rowwise sorted matrix blending thresholds eps Corresponding vector matrix blending bandwidths. Must positive dimensions u, scalar. rowwise blending regions (u - eps, u + eps) may overlap. .gradient Also evaluate gradient respect x? .extend_na Extend -range transitions last -range value (.e. corresponding u) NA? .component Component index (length(u) + 1) invert.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/blended_transition.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Transition functions for blended distributions — blended_transition","text":"blended_transition returns matrix length(x) rows length(u) + 1 columns containing transformed values blending components. .gradient TRUE, attribute \"gradient\" attached dimensions, containing derivative respective transition component respect x. blended_transition_inv returns vector length(x) values containing inverse transformed values .componentth blending component.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/blended_transition.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Transition functions for blended distributions — blended_transition","text":"","code":"library(ggplot2) xx <- seq(from = 0, to = 20, length.out = 101) blend_mat <- blended_transition(xx, u = 10, eps = 3, .gradient = TRUE) ggplot( data.frame( x = rep(xx, 2L), fun = rep(c(\"p\", \"q\"), each = length(xx)), y = as.numeric(blend_mat), relevant = c(xx <= 13, xx >= 7) ), aes(x = x, y = y, color = fun, linetype = relevant) ) %+% geom_line() %+% theme_bw() %+% theme( legend.position = \"bottom\", legend.box = \"horizontal\" ) %+% guides(color = guide_legend(direction = \"horizontal\", title = \"\"), linetype = guide_none()) %+% scale_linetype_manual(values = c(\"TRUE\" = 1, \"FALSE\" = 3)) ggplot( data.frame( x = rep(xx, 2L), fun = rep(c(\"p'\", \"q'\"), each = length(xx)), y = as.numeric(attr(blend_mat, \"gradient\")), relevant = c(xx <= 13, xx >= 7) ), aes(x = x, y = y, color = fun, linetype = relevant) ) %+% geom_line() %+% theme_bw() %+% theme( legend.position = \"bottom\", legend.box = \"horizontal\" ) %+% guides(color = guide_legend(direction = \"horizontal\", title = \"\"), linetype = guide_none()) %+% scale_linetype_manual(values = c(\"TRUE\" = 1, \"FALSE\" = 3))"},{"path":"https://ashesitr.github.io/reservr/reference/callback_adaptive_lr.html","id":null,"dir":"Reference","previous_headings":"","what":"Keras Callback for adaptive learning rate with weight restoration — callback_adaptive_lr","title":"Keras Callback for adaptive learning rate with weight restoration — callback_adaptive_lr","text":"Provides keras callback similar keras3::callback_reduce_lr_on_plateau() also restores weights best seen far whenever learning rate reduction occurs, slightly restrictive improvement detection.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/callback_adaptive_lr.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Keras Callback for adaptive learning rate with weight restoration — callback_adaptive_lr","text":"","code":"callback_adaptive_lr( monitor = \"val_loss\", factor = 0.1, patience = 10L, verbose = 0L, mode = c(\"auto\", \"min\", \"max\"), delta_abs = 1e-04, delta_rel = 0, cooldown = 0L, min_lr = 0, restore_weights = TRUE )"},{"path":"https://ashesitr.github.io/reservr/reference/callback_adaptive_lr.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Keras Callback for adaptive learning rate with weight restoration — callback_adaptive_lr","text":"monitor quantity monitored. factor factor learning rate reduced. new_lr = old_lr * factor. patience number epochs significant improvement learning rate reduced. verbose integer. Set 1 receive update messages. mode Optimisation mode. \"auto\" detects mode name monitor. \"min\" monitors decreasing metrics. \"max\" monitors increasing metrics. delta_abs Minimum absolute metric improvement per epoch. learning rate reduced average improvement less delta_abs per epoch patience epochs. delta_rel Minimum relative metric improvement per epoch. learning rate reduced average improvement less |metric| * delta_rel per epoch patience epochs. cooldown number epochs wait resuming normal operation learning rate reduced. minimum number epochs two learning rate reductions patience + cooldown. min_lr lower bound learning rate. learning rate reduction lower learning rate min_lr, clipped min_lr instead reductions performed. restore_weights Bool. TRUE, best weights restored learning rate reduction. useful metric oscillates.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/callback_adaptive_lr.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Keras Callback for adaptive learning rate with weight restoration — callback_adaptive_lr","text":"KerasCallback suitable passing keras3::fit().","code":""},{"path":"https://ashesitr.github.io/reservr/reference/callback_adaptive_lr.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Keras Callback for adaptive learning rate with weight restoration — callback_adaptive_lr","text":"Note keras3::callback_reduce_lr_on_plateau() automatically logs learning rate metric 'lr', currently impossible R. Thus, want also log learning rate, add keras3::callback_reduce_lr_on_plateau() high min_lr effectively disable callback still monitor learning rate.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/callback_adaptive_lr.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Keras Callback for adaptive learning rate with weight restoration — callback_adaptive_lr","text":"","code":"dist <- dist_exponential() group <- sample(c(0, 1), size = 100, replace = TRUE) x <- dist$sample(100, with_params = list(rate = group + 1)) global_fit <- fit(dist, x) if (interactive()) { library(keras3) l_in <- layer_input(shape = 1L) mod <- tf_compile_model( inputs = list(l_in), intermediate_output = l_in, dist = dist, optimizer = optimizer_adam(), censoring = FALSE, truncation = FALSE ) tf_initialise_model(mod, global_fit$params) fit_history <- fit( mod, x = as_tensor(group, config_floatx()), y = as_trunc_obs(x), epochs = 20L, callbacks = list( callback_adaptive_lr(\"loss\", factor = 0.5, patience = 2L, verbose = 1L, min_lr = 1.0e-4), callback_reduce_lr_on_plateau(\"loss\", min_lr = 1.0) # to track lr ) ) plot(fit_history) predicted_means <- predict(mod, data = as_tensor(c(0, 1), config_floatx())) }"},{"path":"https://ashesitr.github.io/reservr/reference/callback_debug_dist_gradients.html","id":null,"dir":"Reference","previous_headings":"","what":"Callback to monitor likelihood gradient components — callback_debug_dist_gradients","title":"Callback to monitor likelihood gradient components — callback_debug_dist_gradients","text":"Provides keras callback monitor individual components censored truncated likelihood. Useful debugging TensorFlow implementations Distributions.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/callback_debug_dist_gradients.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Callback to monitor likelihood gradient components — callback_debug_dist_gradients","text":"","code":"callback_debug_dist_gradients( object, data, obs, keep_grads = FALSE, stop_on_na = TRUE, verbose = TRUE )"},{"path":"https://ashesitr.github.io/reservr/reference/callback_debug_dist_gradients.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Callback to monitor likelihood gradient components — callback_debug_dist_gradients","text":"object reservr_keras_model created tf_compile_model(). data Input data model. obs Observations associated data. keep_grads Log actual gradients? (memory hungry!) stop_on_na Stop likelihood component NaN gradients? verbose Print message training halted? Message contain information likelihood components NaN gradients.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/callback_debug_dist_gradients.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Callback to monitor likelihood gradient components — callback_debug_dist_gradients","text":"KerasCallback suitable passing keras3::fit().","code":""},{"path":"https://ashesitr.github.io/reservr/reference/callback_debug_dist_gradients.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Callback to monitor likelihood gradient components — callback_debug_dist_gradients","text":"","code":"dist <- dist_exponential() group <- sample(c(0, 1), size = 100, replace = TRUE) x <- dist$sample(100, with_params = list(rate = group + 1)) global_fit <- fit(dist, x) if (interactive()) { library(keras3) l_in <- layer_input(shape = 1L) mod <- tf_compile_model( inputs = list(l_in), intermediate_output = l_in, dist = dist, optimizer = optimizer_adam(), censoring = FALSE, truncation = FALSE ) tf_initialise_model(mod, global_fit$params) gradient_tracker <- callback_debug_dist_gradients( mod, as_tensor(group, config_floatx()), x, keep_grads = TRUE ) fit_history <- fit( mod, x = as_tensor(group, config_floatx()), y = x, epochs = 20L, callbacks = list( callback_adaptive_lr(\"loss\", factor = 0.5, patience = 2L, verbose = 1L, min_lr = 1.0e-4), gradient_tracker, callback_reduce_lr_on_plateau(\"loss\", min_lr = 1.0) # to track lr ) ) gradient_tracker$gradient_logs[[20]]$dens plot(fit_history) predicted_means <- predict(mod, data = as_tensor(c(0, 1), config_floatx())) }"},{"path":"https://ashesitr.github.io/reservr/reference/dist_bdegp.html","id":null,"dir":"Reference","previous_headings":"","what":"Construct a BDEGP-Family — dist_bdegp","title":"Construct a BDEGP-Family — dist_bdegp","text":"Constructs BDEGP-Family distribution fixed number components blending interval.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_bdegp.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Construct a BDEGP-Family — dist_bdegp","text":"","code":"dist_bdegp(n, m, u, epsilon)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_bdegp.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Construct a BDEGP-Family — dist_bdegp","text":"n Number dirac components, starting point mass 0. m Number erlang components, translated n - 0.5. u Blending cut-, must positive real. epsilon Blending radius, must positive real less u. blending interval u - epsilon < x < u + epsilon.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_bdegp.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Construct a BDEGP-Family — dist_bdegp","text":"MixtureDistribution n DiracDistributions 0 .. n - 1 BlendedDistribution object child Distributions TranslatedDistribution offset n - 0.5 ErlangMixtureDistribution m shapes GeneralizedParetoDistribution shape parameter restricted [0, 1] location parameter fixed u break u bandwidth epsilon.","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_bdegp.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Construct a BDEGP-Family — dist_bdegp","text":"","code":"dist <- dist_bdegp(n = 1, m = 2, u = 10, epsilon = 3) params <- list( dists = list( list(), list( dists = list( list( dist = list( shapes = list(1L, 2L), scale = 1.0, probs = list(0.7, 0.3) ) ), list( sigmau = 1.0, xi = 0.1 ) ), probs = list(0.1, 0.9) ) ), probs = list(0.95, 0.05) ) x <- dist$sample(100, with_params = params) plot_distributions( theoretical = dist, empirical = dist_empirical(x), .x = seq(0, 20, length.out = 101), with_params = list(theoretical = params) ) #> Warning: Removed 33 rows containing missing values or values outside the scale range #> (`geom_line()`)."},{"path":"https://ashesitr.github.io/reservr/reference/dist_beta.html","id":null,"dir":"Reference","previous_headings":"","what":"Beta Distribution — dist_beta","title":"Beta Distribution — dist_beta","text":"See stats::Beta","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_beta.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Beta Distribution — dist_beta","text":"","code":"dist_beta(shape1 = NULL, shape2 = NULL, ncp = NULL)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_beta.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Beta Distribution — dist_beta","text":"shape1 First scalar shape parameter, NULL placeholder. shape2 Second scalar shape parameter, NULL placeholder. ncp Scalar non-centrality parameter, NULL placeholder.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_beta.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Beta Distribution — dist_beta","text":"BetaDistribution object.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_beta.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Beta Distribution — dist_beta","text":"parameters can overridden with_params = list(shape = ..., scale = ...).","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_beta.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Beta Distribution — dist_beta","text":"","code":"d_beta <- dist_beta(shape1 = 2, shape2 = 2, ncp = 0) x <- d_beta$sample(100) d_emp <- dist_empirical(x) plot_distributions( empirical = d_emp, theoretical = d_beta, estimated = d_beta, with_params = list( estimated = inflate_params( fitdistrplus::fitdist(x, distr = \"beta\")$estimate ) ), .x = seq(0, 2, length.out = 100) ) #> Warning: Removed 141 rows containing missing values or values outside the scale range #> (`geom_line()`)."},{"path":"https://ashesitr.github.io/reservr/reference/dist_binomial.html","id":null,"dir":"Reference","previous_headings":"","what":"Binomial Distribution — dist_binomial","title":"Binomial Distribution — dist_binomial","text":"See stats::Binomial","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_binomial.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Binomial Distribution — dist_binomial","text":"","code":"dist_binomial(size = NULL, prob = NULL)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_binomial.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Binomial Distribution — dist_binomial","text":"size Number trials parameter (integer), NULL placeholder. prob Success probability parameter, NULL placeholder.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_binomial.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Binomial Distribution — dist_binomial","text":"BinomialDistribution object.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_binomial.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Binomial Distribution — dist_binomial","text":"parameters can overridden with_params = list(size = ..., prob = ...).","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_binomial.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Binomial Distribution — dist_binomial","text":"","code":"d_binom <- dist_binomial(size = 10, prob = 0.5) x <- d_binom$sample(100) d_emp <- dist_empirical(x) plot_distributions( empirical = d_emp, theoretical = d_binom, estimated = d_binom, with_params = list( estimated = list( size = max(x), prob = mean(x) / max(x) ) ), .x = 0:max(x) )"},{"path":"https://ashesitr.github.io/reservr/reference/dist_blended.html","id":null,"dir":"Reference","previous_headings":"","what":"Blended distribution — dist_blended","title":"Blended distribution — dist_blended","text":"Blended distribution","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_blended.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Blended distribution — dist_blended","text":"","code":"dist_blended(dists, probs = NULL, breaks = NULL, bandwidths = NULL)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_blended.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Blended distribution — dist_blended","text":"dists list k >= 2 component Distributions. probs k Mixture weight parameters breaks k - 1 Centers blending zones. dists[] blend dists[+ 1] around breaks[]. bandwidths k - 1 Radii blending zones. -th blending zone begin breaks[] - bandwidths[] end breaks[] + bandwidths[]. bandwidth 0 corresponds hard cut-, .e. jump discontinuity density blended Distribution.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_blended.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Blended distribution — dist_blended","text":"BlendedDistribution object.","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_blended.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Blended distribution — dist_blended","text":"","code":"bd <- dist_blended( list( dist_normal(mean = 0.0, sd = 1.0), dist_genpareto(u = 3.0, sigmau = 1.0, xi = 3.0) ), breaks = list(3.0), bandwidths = list(0.5), probs = list(0.9, 0.1) ) plot_distributions( bd, .x = seq(-3, 10, length.out = 100), plots = c(\"d\", \"p\") )"},{"path":"https://ashesitr.github.io/reservr/reference/dist_dirac.html","id":null,"dir":"Reference","previous_headings":"","what":"Dirac (degenerate point) Distribution — dist_dirac","title":"Dirac (degenerate point) Distribution — dist_dirac","text":"degenerate distribution mass single point.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_dirac.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Dirac (degenerate point) Distribution — dist_dirac","text":"","code":"dist_dirac(point = NULL)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_dirac.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Dirac (degenerate point) Distribution — dist_dirac","text":"point point probability mass 1.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_dirac.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Dirac (degenerate point) Distribution — dist_dirac","text":"DiracDistribution object.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_dirac.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Dirac (degenerate point) Distribution — dist_dirac","text":"parameter can overridden with_params = list(point = ...).","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_dirac.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Dirac (degenerate point) Distribution — dist_dirac","text":"","code":"d_dirac <- dist_dirac(1.5) d_dirac$sample(2L) #> [1] 1.5 1.5 d_dirac$sample(2L, list(point = 42.0)) #> [1] 42 42"},{"path":"https://ashesitr.github.io/reservr/reference/dist_discrete.html","id":null,"dir":"Reference","previous_headings":"","what":"Discrete Distribution — dist_discrete","title":"Discrete Distribution — dist_discrete","text":"full-flexibility discrete distribution values 1 size.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_discrete.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Discrete Distribution — dist_discrete","text":"","code":"dist_discrete(size = NULL, probs = NULL)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_discrete.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Discrete Distribution — dist_discrete","text":"size Number classes parameter (integer). Required probs NULL. probs Vector probabilties parameter, NULL placeholder.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_discrete.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Discrete Distribution — dist_discrete","text":"DiscreteDistribution object.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_discrete.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Discrete Distribution — dist_discrete","text":"Parameters can overridden with_params = list(probs = ...).","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_discrete.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Discrete Distribution — dist_discrete","text":"","code":"d_discrete <- dist_discrete(probs = list(0.5, 0.25, 0.15, 0.1)) x <- d_discrete$sample(100) d_emp <- dist_empirical(x) plot_distributions( empirical = d_emp, theoretical = d_discrete, estimated = d_discrete, with_params = list( estimated = list( size = max(x), probs = as.list(unname(table(x)) / 100) ) ), .x = 0:max(x) )"},{"path":"https://ashesitr.github.io/reservr/reference/dist_empirical.html","id":null,"dir":"Reference","previous_headings":"","what":"Empirical distribution — dist_empirical","title":"Empirical distribution — dist_empirical","text":"Creates empirical distribution object sample. Assumes iid. samples. with_params used distribution estimation relevant indicators happens construction.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_empirical.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Empirical distribution — dist_empirical","text":"","code":"dist_empirical(sample, positive = FALSE, bw = \"nrd0\")"},{"path":"https://ashesitr.github.io/reservr/reference/dist_empirical.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Empirical distribution — dist_empirical","text":"sample Sample build empirical distribution positive underlying distribution known positive? effect density estimation procedure. positive = FALSE uses kernel density estimate produced density(), positive = TRUE uses log-kernel density estimate produced logKDE::logdensity_fft(). latter can improve density estimation near zero. bw Bandwidth parameter density estimation. Passed density estimation function selected positive.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_empirical.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Empirical distribution — dist_empirical","text":"EmpiricalDistribution object.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_empirical.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Empirical distribution — dist_empirical","text":"sample() samples iid. sample. approach similar bootstrapping. density() evaluates kernel density estimate, approximating zero outside known support. estimate either obtained using stats::density logKDE::logdensity_fft, depending positive. probability() evaluates empirical cumulative density function obtained stats::ecdf. quantile() evaluates empirical quantiles using stats::quantile hazard() estimates hazard rate using density estimate empirical cumulative density function: h(t) = df(t) / (1 - cdf(t)).","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_empirical.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Empirical distribution — dist_empirical","text":"","code":"x <- rexp(20, rate = 1) dx <- dist_empirical(sample = x, positive = TRUE) y <- rnorm(20) dy <- dist_empirical(sample = y) plot_distributions( exponential = dx, normal = dy, .x = seq(-3, 3, length.out = 100) )"},{"path":"https://ashesitr.github.io/reservr/reference/dist_erlangmix.html","id":null,"dir":"Reference","previous_headings":"","what":"Erlang Mixture distribution — dist_erlangmix","title":"Erlang Mixture distribution — dist_erlangmix","text":"Erlang Mixture distribution","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_erlangmix.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Erlang Mixture distribution — dist_erlangmix","text":"","code":"dist_erlangmix(shapes, scale = NULL, probs = NULL)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_erlangmix.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Erlang Mixture distribution — dist_erlangmix","text":"shapes Shape parameters, trunc_erlangmix fit, NULL placeholder. scale Common scale parameter, NULL placeholder. probs Mixing probabilities, NULL placeholder.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_erlangmix.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Erlang Mixture distribution — dist_erlangmix","text":"ErlangMixtureDistribution object.","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_erlangmix.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Erlang Mixture distribution — dist_erlangmix","text":"","code":"params <- list(scale = 1.0, probs = list(0.5, 0.3, 0.2), shapes = list(1L, 2L, 3L)) dist <- dist_erlangmix(vector(\"list\", 3L)) x <- dist$sample(20, with_params = params) d_emp <- dist_empirical(x, positive = TRUE) plot_distributions( empirical = d_emp, theoretical = dist, with_params = list( theoretical = params ), .x = seq(1e-4, 5, length.out = 100) )"},{"path":"https://ashesitr.github.io/reservr/reference/dist_exponential.html","id":null,"dir":"Reference","previous_headings":"","what":"Exponential distribution — dist_exponential","title":"Exponential distribution — dist_exponential","text":"See stats::Exponential.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_exponential.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Exponential distribution — dist_exponential","text":"","code":"dist_exponential(rate = NULL)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_exponential.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Exponential distribution — dist_exponential","text":"rate Scalar rate parameter, NULL placeholder.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_exponential.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Exponential distribution — dist_exponential","text":"ExponentialDistribution object.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_exponential.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Exponential distribution — dist_exponential","text":"parameter can overridden with_params = list(rate = ...).","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_exponential.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Exponential distribution — dist_exponential","text":"","code":"rate <- 1 d_exp <- dist_exponential() x <- d_exp$sample(20, with_params = list(rate = rate)) d_emp <- dist_empirical(x, positive = TRUE) plot_distributions( empirical = d_emp, theoretical = d_exp, estimated = d_exp, with_params = list( theoretical = list(rate = rate), estimated = list(rate = 1 / mean(x)) ), .x = seq(1e-4, 5, length.out = 100) ) #> Warning: Removed 27 rows containing missing values or values outside the scale range #> (`geom_line()`)."},{"path":"https://ashesitr.github.io/reservr/reference/dist_gamma.html","id":null,"dir":"Reference","previous_headings":"","what":"Gamma distribution — dist_gamma","title":"Gamma distribution — dist_gamma","text":"See stats::GammaDist.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_gamma.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Gamma distribution — dist_gamma","text":"","code":"dist_gamma(shape = NULL, rate = NULL)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_gamma.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Gamma distribution — dist_gamma","text":"shape Scalar shape parameter, NULL placeholder. rate Scalar rate parameter, NULL placeholder.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_gamma.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Gamma distribution — dist_gamma","text":"GammaDistribution object.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_gamma.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Gamma distribution — dist_gamma","text":"parameters can overridden with_params = list(shape = ..., rate = ...).","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_gamma.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Gamma distribution — dist_gamma","text":"","code":"alpha <- 2 beta <- 2 d_gamma <- dist_gamma(shape = alpha, rate = beta) x <- d_gamma$sample(100) d_emp <- dist_empirical(x, positive = TRUE) plot_distributions( empirical = d_emp, theoretical = d_gamma, estimated = d_gamma, with_params = list( estimated = inflate_params( fitdistrplus::fitdist(x, distr = \"gamma\")$estimate ) ), .x = seq(1e-3, max(x), length.out = 100) )"},{"path":"https://ashesitr.github.io/reservr/reference/dist_genpareto.html","id":null,"dir":"Reference","previous_headings":"","what":"Generalized Pareto Distribution — dist_genpareto","title":"Generalized Pareto Distribution — dist_genpareto","text":"See evmix::gpd","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_genpareto.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Generalized Pareto Distribution — dist_genpareto","text":"","code":"dist_genpareto(u = NULL, sigmau = NULL, xi = NULL) dist_genpareto1(u = NULL, sigmau = NULL, xi = NULL)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_genpareto.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Generalized Pareto Distribution — dist_genpareto","text":"u Scalar location parameter, NULL placeholder. sigmau Scalar scale parameter, NULL placeholder. xi Scalar shape parameter, NULL placeholder.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_genpareto.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Generalized Pareto Distribution — dist_genpareto","text":"GeneralizedParetoDistribution object.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_genpareto.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Generalized Pareto Distribution — dist_genpareto","text":"parameters can overridden with_params = list(u = ..., sigmau = ..., xi = ...). dist_genpareto1 equivalent dist_genpareto enforces bound constraints xi [0, 1]. ensures unboundedness finite expected value unless xi == 1.0.","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_genpareto.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Generalized Pareto Distribution — dist_genpareto","text":"","code":"d_genpareto <- dist_genpareto(u = 0, sigmau = 1, xi = 1) x <- d_genpareto$sample(100) d_emp <- dist_empirical(x) d_genpareto$export_functions(\"gpd\") # so fitdistrplus finds it #> Exported `dgpd()`. #> Exported `rgpd()`. #> Exported `pgpd()`. #> Exported `qgpd()`. plot_distributions( empirical = d_emp, theoretical = d_genpareto, estimated = d_genpareto, with_params = list( estimated = fit(dist_genpareto(), x)$params ), .x = seq(0, 5, length.out = 100) )"},{"path":"https://ashesitr.github.io/reservr/reference/dist_lognormal.html","id":null,"dir":"Reference","previous_headings":"","what":"Log Normal distribution — dist_lognormal","title":"Log Normal distribution — dist_lognormal","text":"See stats::Lognormal.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_lognormal.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Log Normal distribution — dist_lognormal","text":"","code":"dist_lognormal(meanlog = NULL, sdlog = NULL)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_lognormal.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Log Normal distribution — dist_lognormal","text":"meanlog Scalar mean parameter log scale, NULL placeholder. sdlog Scalar standard deviation parameter log scale, NULL placeholder.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_lognormal.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Log Normal distribution — dist_lognormal","text":"LognormalDistribution object.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_lognormal.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Log Normal distribution — dist_lognormal","text":"parameters can overridden with_params = list(meanlog = ..., sdlog = ...).","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_lognormal.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Log Normal distribution — dist_lognormal","text":"","code":"mu <- 0 sigma <- 1 d_lnorm <- dist_lognormal(meanlog = mu, sdlog = sigma) x <- d_lnorm$sample(20) d_emp <- dist_empirical(x, positive = TRUE) plot_distributions( empirical = d_emp, theoretical = d_lnorm, estimated = d_lnorm, with_params = list( estimated = inflate_params( fitdistrplus::fitdist(x, distr = \"lnorm\")$estimate ) ), .x = seq(1e-3, 5, length.out = 100) )"},{"path":"https://ashesitr.github.io/reservr/reference/dist_mixture.html","id":null,"dir":"Reference","previous_headings":"","what":"Mixture distribution — dist_mixture","title":"Mixture distribution — dist_mixture","text":"Parameters mixing components can overridden with_params = list(dists = list(..., ..., ...)). #' Mixing probabilites can overridden with_params = list(probs = list(..., ..., ...)). number components overridden.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_mixture.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Mixture distribution — dist_mixture","text":"","code":"dist_mixture(dists = list(), probs = NULL)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_mixture.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Mixture distribution — dist_mixture","text":"dists list mixing distributions. May contain placeholders duplicates. probs list mixing probabilities length dists. normalized sum one NULL can used placeholder within probs. reduce number required parameters, probs least partly specified (probs = list(NULL, NULL, ..., 1) k - 1 NULLs k number mixing components).","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_mixture.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Mixture distribution — dist_mixture","text":"MixtureDistribution object.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_mixture.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Mixture distribution — dist_mixture","text":"support quantile() capability!","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_mixture.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Mixture distribution — dist_mixture","text":"","code":"# A complicated way to define a uniform distribution on \\[0, 2\\] dist_mixture( dists = list( dist_uniform(min = 0, max = 1), dist_uniform(min = 1, max = 2) ), probs = list(0.5, 0.5) ) #> A Mixture with 0 dof"},{"path":"https://ashesitr.github.io/reservr/reference/dist_negbinomial.html","id":null,"dir":"Reference","previous_headings":"","what":"Negative binomial Distribution — dist_negbinomial","title":"Negative binomial Distribution — dist_negbinomial","text":"See stats::NegBinomial","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_negbinomial.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Negative binomial Distribution — dist_negbinomial","text":"","code":"dist_negbinomial(size = NULL, mu = NULL)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_negbinomial.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Negative binomial Distribution — dist_negbinomial","text":"size Number successful trials parameter, NULL placeholder. Non-integer values > 0 allowed. mu Mean parameter, NULL placeholder.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_negbinomial.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Negative binomial Distribution — dist_negbinomial","text":"NegativeBinomialDistribution object.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_negbinomial.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Negative binomial Distribution — dist_negbinomial","text":"parameters can overridden with_params = list(size = ..., prob = ...).","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_negbinomial.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Negative binomial Distribution — dist_negbinomial","text":"","code":"d_nbinom <- dist_negbinomial(size = 3.5, mu = 8.75) x <- d_nbinom$sample(100) d_emp <- dist_empirical(x) plot_distributions( empirical = d_emp, theoretical = d_nbinom, estimated = d_nbinom, with_params = list( estimated = inflate_params( fitdistrplus::fitdist(x, distr = \"nbinom\")$estimate ) ), .x = 0:max(x) )"},{"path":"https://ashesitr.github.io/reservr/reference/dist_normal.html","id":null,"dir":"Reference","previous_headings":"","what":"Normal distribution — dist_normal","title":"Normal distribution — dist_normal","text":"See stats::Normal.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_normal.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Normal distribution — dist_normal","text":"","code":"dist_normal(mean = NULL, sd = NULL)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_normal.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Normal distribution — dist_normal","text":"mean Scalar mean parameter, NULL placeholder. sd Scalar standard deviation parameter, NULL placeholder.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_normal.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Normal distribution — dist_normal","text":"NormalDistribution object.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_normal.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Normal distribution — dist_normal","text":"parameters can overridden with_params = list(mean = ..., sd = ...).","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_normal.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Normal distribution — dist_normal","text":"","code":"mu <- 0 sigma <- 1 d_norm <- dist_normal(mean = mu, sd = sigma) x <- d_norm$sample(20) d_emp <- dist_empirical(x) plot_distributions( empirical = d_emp, theoretical = d_norm, estimated = d_norm, with_params = list( estimated = list(mean = mean(x), sd = sd(x)) ), .x = seq(-3, 3, length.out = 100) )"},{"path":"https://ashesitr.github.io/reservr/reference/dist_pareto.html","id":null,"dir":"Reference","previous_headings":"","what":"Pareto Distribution — dist_pareto","title":"Pareto Distribution — dist_pareto","text":"See Pareto","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_pareto.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Pareto Distribution — dist_pareto","text":"","code":"dist_pareto(shape = NULL, scale = NULL)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_pareto.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Pareto Distribution — dist_pareto","text":"shape Scalar shape parameter, NULL placeholder. scale Scalar scale parameter, NULL placeholder.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_pareto.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Pareto Distribution — dist_pareto","text":"ParetoDistribution object.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_pareto.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Pareto Distribution — dist_pareto","text":"parameters can overridden with_params = list(shape = ..., scale = ...).","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_pareto.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Pareto Distribution — dist_pareto","text":"","code":"d_pareto <- dist_pareto(shape = 3, scale = 1) x <- d_pareto$sample(100) d_emp <- dist_empirical(x) plot_distributions( empirical = d_emp, theoretical = d_pareto, estimated = d_pareto, with_params = list( estimated = inflate_params( fitdistrplus::fitdist(x, distr = \"pareto\")$estimate ) ), .x = seq(0, 2, length.out = 100) ) #> Warning: The dpareto function should return a vector of with NaN values when input has inconsistent values and not raise an error"},{"path":"https://ashesitr.github.io/reservr/reference/dist_poisson.html","id":null,"dir":"Reference","previous_headings":"","what":"Poisson Distribution — dist_poisson","title":"Poisson Distribution — dist_poisson","text":"See stats::Poisson","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_poisson.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Poisson Distribution — dist_poisson","text":"","code":"dist_poisson(lambda = NULL)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_poisson.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Poisson Distribution — dist_poisson","text":"lambda Scalar rate parameter, NULL placeholder.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_poisson.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Poisson Distribution — dist_poisson","text":"PoissonDistribution object.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_poisson.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Poisson Distribution — dist_poisson","text":"parameter can overridden with_params = list(lambda = ...).","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_poisson.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Poisson Distribution — dist_poisson","text":"","code":"d_pois <- dist_poisson(lambda = 5.0) x <- d_pois$sample(100) d_emp <- dist_empirical(x) plot_distributions( empirical = d_emp, theoretical = d_pois, estimated = d_pois, with_params = list( estimated = inflate_params( fitdistrplus::fitdist(x, distr = \"pois\")$estimate ) ), .x = 0:max(x) )"},{"path":"https://ashesitr.github.io/reservr/reference/dist_translate.html","id":null,"dir":"Reference","previous_headings":"","what":"Tranlsated distribution — dist_translate","title":"Tranlsated distribution — dist_translate","text":"Tranlsated distribution","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_translate.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Tranlsated distribution — dist_translate","text":"","code":"dist_translate(dist = NULL, offset = NULL, multiplier = 1)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_translate.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Tranlsated distribution — dist_translate","text":"dist underlying distribution, NULL placeholder. offset Offset added observation, NULL placeholder. multiplier Factor multiply observation , NULL placeholder.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_translate.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Tranlsated distribution — dist_translate","text":"TranslatedDistribution object.","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_translate.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Tranlsated distribution — dist_translate","text":"","code":"d_norm <- dist_normal(mean = 0, sd = 1) d_tnorm <- dist_translate(dist = d_norm, offset = 1) plot_distributions(d_norm, d_tnorm, .x = seq(-2, 3, length.out = 100))"},{"path":"https://ashesitr.github.io/reservr/reference/dist_trunc.html","id":null,"dir":"Reference","previous_headings":"","what":"Truncated distribution — dist_trunc","title":"Truncated distribution — dist_trunc","text":"Truncated distribution","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_trunc.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Truncated distribution — dist_trunc","text":"","code":"dist_trunc(dist = NULL, min = NULL, max = NULL, offset = 0, max_retry = 100)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_trunc.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Truncated distribution — dist_trunc","text":"dist underlying distribution, NULL placeholder. min Minimum value truncate (exclusive), NULL placeholder. max Maxmimum value truncate (inclusive), NULL placeholder. offset Offset added observation truncation, NULL placeholder. Truncation dist occur (min, max]. offset added deterministically. max_retry Maximum number resample attempts trying sample rejection.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_trunc.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Truncated distribution — dist_trunc","text":"TruncatedDistribution object.","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_trunc.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Truncated distribution — dist_trunc","text":"","code":"d_norm <- dist_normal(mean = 0, sd = 1) d_tnorm <- dist_trunc(dist = d_norm, min = -2, max = 2, offset = 1) plot_distributions(d_norm, d_tnorm, .x = seq(-2, 3, length.out = 100))"},{"path":"https://ashesitr.github.io/reservr/reference/dist_uniform.html","id":null,"dir":"Reference","previous_headings":"","what":"Uniform distribution — dist_uniform","title":"Uniform distribution — dist_uniform","text":"See stats::Uniform","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_uniform.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Uniform distribution — dist_uniform","text":"","code":"dist_uniform(min = NULL, max = NULL)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_uniform.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Uniform distribution — dist_uniform","text":"min Lower limit, NULL placeholder. max Upper limit, NULL placeholder.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_uniform.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Uniform distribution — dist_uniform","text":"UniformDistribution object.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_uniform.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Uniform distribution — dist_uniform","text":"parameters can overridden with_params = list(min = ..., max = ...).","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_uniform.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Uniform distribution — dist_uniform","text":"","code":"d_unif <- dist_uniform(min = 0, max = 1) x <- d_unif$sample(100) d_emp <- dist_empirical(x) plot_distributions( empirical = d_emp, theoretical = d_unif, estimated = d_unif, with_params = list( estimated = inflate_params( fitdistrplus::fitdist(x, distr = \"unif\")$estimate ) ), .x = seq(0, 1, length.out = 100) ) #> Warning: Removed 2 rows containing missing values or values outside the scale range #> (`geom_line()`)."},{"path":"https://ashesitr.github.io/reservr/reference/dist_weibull.html","id":null,"dir":"Reference","previous_headings":"","what":"Weibull Distribution — dist_weibull","title":"Weibull Distribution — dist_weibull","text":"See stats::Weibull","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_weibull.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Weibull Distribution — dist_weibull","text":"","code":"dist_weibull(shape = NULL, scale = NULL)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_weibull.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Weibull Distribution — dist_weibull","text":"shape Scalar shape parameter, NULL placeholder. scale Scalar scale parameter, NULL placeholder.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_weibull.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Weibull Distribution — dist_weibull","text":"WeibullDistribution object.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_weibull.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Weibull Distribution — dist_weibull","text":"parameters can overridden with_params = list(shape = ..., scale = ...).","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_weibull.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Weibull Distribution — dist_weibull","text":"","code":"d_weibull <- dist_weibull(shape = 3, scale = 1) x <- d_weibull$sample(100) d_emp <- dist_empirical(x) plot_distributions( empirical = d_emp, theoretical = d_weibull, estimated = d_weibull, with_params = list( estimated = inflate_params( fitdistrplus::fitdist(x, distr = \"weibull\")$estimate ) ), .x = seq(0, 2, length.out = 100) )"},{"path":"https://ashesitr.github.io/reservr/reference/fit.reservr_keras_model.html","id":null,"dir":"Reference","previous_headings":"","what":"Fit a neural network based distribution model to data — fit.reservr_keras_model","title":"Fit a neural network based distribution model to data — fit.reservr_keras_model","text":"function delegates work keras3::fit.keras.src.models.model.Model() performs additional consistency checks make sure tf_compile_model() called appropriate options support fitting observations y well automatically converting y n x 6 matrix needed compiled loss function.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/fit.reservr_keras_model.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Fit a neural network based distribution model to data — fit.reservr_keras_model","text":"","code":"# S3 method for reservr_keras_model fit( object, x, y, batch_size = NULL, epochs = 10, verbose = getOption(\"keras.fit_verbose\", default = 1), callbacks = NULL, view_metrics = getOption(\"keras.view_metrics\", default = \"auto\"), validation_split = 0, validation_data = NULL, shuffle = TRUE, class_weight = NULL, sample_weight = NULL, initial_epoch = 0, steps_per_epoch = NULL, validation_steps = NULL, ... )"},{"path":"https://ashesitr.github.io/reservr/reference/fit.reservr_keras_model.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Fit a neural network based distribution model to data — fit.reservr_keras_model","text":"object compiled reservr_keras_model obtained tf_compile_model(). x list input tensors (predictors) y trunc_obs tibble observed outcomes, something convertible via as_trunc_obs(). batch_size Integer NULL. Number samples per gradient update. unspecified, batch_size default 32. specify batch_size data form TF Datasets generators, (since generate batches). epochs Integer. Number epochs train model. epoch iteration entire x y data provided (unless steps_per_epoch flag set something NULL). Note conjunction initial_epoch, epochs understood \"final epoch\". model trained number iterations given epochs, merely epoch index epochs reached. verbose \"auto\", 0, 1, 2. Verbosity mode. 0 = silent, 1 = progress bar, 2 = one line per epoch. \"auto\" becomes 1 cases, 2 knitr render running distributed training server. Note progress bar particularly useful logged file, verbose=2 recommended running interactively (e.g., production environment). Defaults \"auto\". callbacks List Callback() instances. List callbacks apply training. See callback_*. view_metrics View realtime plot training metrics (epoch). default (\"auto\") display plot running within RStudio, metrics specified model compile(), epochs > 1 verbose > 0. Set global options(keras.view_metrics = ) option establish different default. validation_split Float 0 1. Fraction training data used validation data. model set apart fraction training data, train , evaluate loss model metrics data end epoch. validation data selected last samples x y data provided, shuffling. argument supported x TF Dataset generator. validation_data validation_split provided, validation_data override validation_split. validation_data Data evaluate loss model metrics end epoch. model trained data. Thus, note fact validation loss data provided using validation_split validation_data affected regularization layers like noise dropout. validation_data override validation_split. : tuple (x_val, y_val) arrays tensors. tuple (x_val, y_val, val_sample_weights) arrays. generator returning (inputs, targets) (inputs, targets, sample_weights). shuffle Boolean, whether shuffle training data epoch. argument ignored x generator TF Dataset. class_weight Optional named list mapping class indices (integers, 0-based) weight (float) value, used weighting loss function (training ). can useful tell model \"pay attention\" samples -represented class. class_weight specified targets rank 2 greater, either y must one-hot encoded, explicit final dimension 1 must included sparse class labels. sample_weight Optional array weights training samples, used weighting loss function (training ). can either pass flat (1D) array/vector length input samples (1:1 mapping weights samples), case temporal data, can pass 2D array (matrix) shape (samples, sequence_length), apply different weight every timestep every sample. argument supported x TF Dataset generator, instead provide sample_weights third element x. Note sample weighting apply metrics specified via metrics argument compile(). apply sample weighting metrics, can specify via weighted_metrics compile() instead. initial_epoch Integer. Epoch start training (useful resuming previous training run). steps_per_epoch Integer NULL. Total number steps (batches samples) declaring one epoch finished starting next epoch. training input tensors backend-native tensors, default NULL equal number samples dataset divided batch size, 1 determined. x TF Dataset, steps_per_epoch NULL, epoch run input dataset exhausted. passing infinitely repeating dataset, must specify steps_per_epoch argument. steps_per_epoch = -1 training run indefinitely infinitely repeating dataset. validation_steps relevant validation_data provided. Total number steps (batches samples) draw stopping performing validation end every epoch. validation_steps NULL, validation run validation_data dataset exhausted. case infinitely repeated dataset, run infinite loop. validation_steps specified part dataset consumed, evaluation start beginning dataset epoch. ensures validation samples used every time. ... Unused. old arguments supplied, error message raised informing fix issue.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/fit.reservr_keras_model.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Fit a neural network based distribution model to data — fit.reservr_keras_model","text":"history object contains information collected training. model object updated -place side-effect.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/fit.reservr_keras_model.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Fit a neural network based distribution model to data — fit.reservr_keras_model","text":"Additionally, default batch_size min(nrow(y), 10000) instead keras default 32 latter bad choice fitting distributions since involved loss much less stable typical losses used machine learning, leading divergence small batch sizes.","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/fit.reservr_keras_model.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Fit a neural network based distribution model to data — fit.reservr_keras_model","text":"","code":"dist <- dist_exponential() params <- list(rate = 1.0) N <- 100L rand_input <- runif(N) x <- dist$sample(N, with_params = params) if (interactive()) { tf_in <- keras3::layer_input(1L) mod <- tf_compile_model( inputs = list(tf_in), intermediate_output = tf_in, dist = dist, optimizer = keras3::optimizer_adam(), censoring = FALSE, truncation = FALSE ) tf_fit <- fit( object = mod, x = k_matrix(rand_input), y = x, epochs = 10L, callbacks = list( callback_debug_dist_gradients(mod, k_matrix(rand_input), x, keep_grads = TRUE) ) ) }"},{"path":"https://ashesitr.github.io/reservr/reference/fit_blended.html","id":null,"dir":"Reference","previous_headings":"","what":"Fit a Blended mixture using an ECME-Algorithm — fit_blended","title":"Fit a Blended mixture using an ECME-Algorithm — fit_blended","text":"Fit Blended mixture using ECME-Algorithm","code":""},{"path":"https://ashesitr.github.io/reservr/reference/fit_blended.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Fit a Blended mixture using an ECME-Algorithm — fit_blended","text":"","code":"fit_blended( dist, obs, start, min_iter = 0L, max_iter = 100L, skip_first_e = FALSE, tolerance = 1e-05, trace = FALSE, ... )"},{"path":"https://ashesitr.github.io/reservr/reference/fit_blended.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Fit a Blended mixture using an ECME-Algorithm — fit_blended","text":"dist BlendedDistribution. assumed, breaks bandwidths placeholder weights estimated. obs Set observations produced trunc_obs() convertible via as_trunc_obs(). start Initial values placeholder parameters. missing, starting values obtained fit_dist_start(). min_iter Minimum number EM-Iterations max_iter Maximum number EM-Iterations (weight updates) skip_first_e Skip first E-Step (update Probability weights)? can help initial values cause mixture component vanish first E-Step starting values can improved. tolerance Numerical tolerance. trace Include tracing information output? TRUE, additional tracing information added result list. ... Passed fit_dist_start() start missing.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/fit_blended.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Fit a Blended mixture using an ECME-Algorithm — fit_blended","text":"list elements params fitted parameters structure init. params_hist (trace TRUE) history parameters (e- m- step) iter number outer EM-iterations logLik final log-likelihood","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/fit_blended.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Fit a Blended mixture using an ECME-Algorithm — fit_blended","text":"","code":"dist <- dist_blended( list( dist_exponential(), dist_genpareto() ) ) params <- list( probs = list(0.9, 0.1), dists = list( list(rate = 2.0), list(u = 1.5, xi = 0.2, sigmau = 1.0) ), breaks = list(1.5), bandwidths = list(0.3) ) x <- dist$sample(100L, with_params = params) dist$default_params$breaks <- params$breaks dist$default_params$bandwidths <- params$bandwidths if (interactive()) { fit_blended(dist, x) }"},{"path":"https://ashesitr.github.io/reservr/reference/fit_dist.html","id":null,"dir":"Reference","previous_headings":"","what":"Fit a general distribution to observations — fit_dist","title":"Fit a general distribution to observations — fit_dist","text":"default implementation performs maximum likelihood estimation placeholder parameters.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/fit_dist.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Fit a general distribution to observations — fit_dist","text":"","code":"fit_dist(dist, obs, start, ...) fit_dist_direct(dist, obs, start, ..., .start_with_default = FALSE) # S3 method for Distribution fit(object, obs, start, ...)"},{"path":"https://ashesitr.github.io/reservr/reference/fit_dist.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Fit a general distribution to observations — fit_dist","text":"dist Distribution object. obs Set observations produced trunc_obs() convertible via as_trunc_obs(). start Initial values placeholder parameters. missing, starting values obtained fit_dist_start(). ... Distribution-specific arguments fitting procedure .start_with_default directly optimising likelihood, use optimised algorithm finding better starting values? object parameter dist","code":""},{"path":"https://ashesitr.github.io/reservr/reference/fit_dist.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Fit a general distribution to observations — fit_dist","text":"list least elements params fitted parameters structure init. logLik final log-likelihood Additional information may provided depending dist.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/fit_dist.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Fit a general distribution to observations — fit_dist","text":"Erlang mixture distributions Mixture distributions, EM-Algorithm instead used improve stability. fit() fit_dist() chose optimisation method optimized specific distribution given. fit_dist_direct() can used force direct maximisation likelihood.","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/fit_dist.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Fit a general distribution to observations — fit_dist","text":"","code":"x <- rexp(100) lambda_hat <- 1 / mean(x) lambda_hat2 <- fit_dist(dist_exponential(), x)$params$rate identical(lambda_hat, lambda_hat2) #> [1] TRUE dist <- dist_mixture(list(dist_normal(), dist_translate(dist_exponential(), offset = 6))) params <- list( dists = list(list(mean = 5, sd = 1), list(dist = list(rate = 1))), probs = list(0.95, 0.05) ) set.seed(2000) u <- runif(100, 10, 20) x <- dist$sample(100, with_params = params) obs <- trunc_obs(x = x[x <= u], tmin = -Inf, tmax = u[x <= u]) default_fit <- fit_dist(dist, obs) direct_fit <- fit_dist_direct(dist, obs) # NB: direct optimisation steps with pre-run take a few seconds # \\donttest{ direct_fit_init <- fit_dist_direct(dist, obs, start = default_fit$params) direct_fit_auto_init <- fit_dist_direct(dist, obs, .start_with_default = TRUE) stopifnot(direct_fit_init$logLik == direct_fit_auto_init$logLik) c(default_fit$logLik, direct_fit$logLik, direct_fit_init$logLik) #> [1] -153.0052 -153.0052 -153.0052 # }"},{"path":"https://ashesitr.github.io/reservr/reference/fit_dist_start.html","id":null,"dir":"Reference","previous_headings":"","what":"Find starting values for distribution parameters — fit_dist_start.MixtureDistribution","title":"Find starting values for distribution parameters — fit_dist_start.MixtureDistribution","text":"Find starting values distribution parameters","code":""},{"path":"https://ashesitr.github.io/reservr/reference/fit_dist_start.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Find starting values for distribution parameters — fit_dist_start.MixtureDistribution","text":"","code":"# S3 method for MixtureDistribution fit_dist_start(dist, obs, dists_start = NULL, ...) fit_dist_start(dist, obs, ...)"},{"path":"https://ashesitr.github.io/reservr/reference/fit_dist_start.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Find starting values for distribution parameters — fit_dist_start.MixtureDistribution","text":"dist Distribution object. obs Observations fit . dists_start List initial parameters component distributions. left empty, initialisation automatically performed using fit_dist_start() observations support respective component. ... Additional arguments initialisation procedure","code":""},{"path":"https://ashesitr.github.io/reservr/reference/fit_dist_start.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Find starting values for distribution parameters — fit_dist_start.MixtureDistribution","text":"list initial parameters suitable passing fit_dist().","code":""},{"path":"https://ashesitr.github.io/reservr/reference/fit_dist_start.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Find starting values for distribution parameters — fit_dist_start.MixtureDistribution","text":"","code":"fit_dist_start(dist_exponential(), rexp(100)) #> $rate #> [1] 1.258531 #>"},{"path":"https://ashesitr.github.io/reservr/reference/fit_erlang_mixture.html","id":null,"dir":"Reference","previous_headings":"","what":"Fit an Erlang mixture using an ECME-Algorithm — fit_erlang_mixture","title":"Fit an Erlang mixture using an ECME-Algorithm — fit_erlang_mixture","text":"Fit Erlang mixture using ECME-Algorithm","code":""},{"path":"https://ashesitr.github.io/reservr/reference/fit_erlang_mixture.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Fit an Erlang mixture using an ECME-Algorithm — fit_erlang_mixture","text":"","code":"fit_erlang_mixture( dist, obs, start, min_iter = 0L, max_iter = 100L, skip_first_e = FALSE, tolerance = 1e-05, trace = FALSE, parallel = FALSE, ... )"},{"path":"https://ashesitr.github.io/reservr/reference/fit_erlang_mixture.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Fit an Erlang mixture using an ECME-Algorithm — fit_erlang_mixture","text":"dist ErlangMixtureDistribution. assumed, probs scale estimated. obs Set observations produced trunc_obs() convertible via as_trunc_obs(). start Initial values placeholder parameters. missing, starting values obtained fit_dist_start(). min_iter Minimum number EM-Iterations max_iter Maximum number EM-Iterations (weight updates) skip_first_e Skip first E-Step (update Probability weights)? can help initial values cause mixture component vanish first E-Step starting values can improved. tolerance Numerical tolerance. trace Include tracing information output? TRUE, additional tracing information added result list. parallel Enable experimental parallel evaluation expected log-likelihood? ... Passed fit_dist_start() start missing.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/fit_erlang_mixture.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Fit an Erlang mixture using an ECME-Algorithm — fit_erlang_mixture","text":"list elements params fitted parameters structure init. params_hist (trace TRUE) history parameters (e- m- step). Otherwise empty list. iter number outer EM-iterations logLik final log-likelihood","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/fit_erlang_mixture.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Fit an Erlang mixture using an ECME-Algorithm — fit_erlang_mixture","text":"","code":"dist <- dist_erlangmix(list(NULL, NULL, NULL)) params <- list( shapes = list(1L, 4L, 12L), scale = 2.0, probs = list(0.5, 0.3, 0.2) ) x <- dist$sample(100L, with_params = params) fit_erlang_mixture(dist, x, init = \"kmeans\") #> $params #> $params$probs #> $params$probs[[1]] #> [1] 0.31 #> #> $params$probs[[2]] #> [1] 0.43 #> #> $params$probs[[3]] #> [1] 0.26 #> #> #> $params$shapes #> $params$shapes[[1]] #> [1] 1 #> #> $params$shapes[[2]] #> [1] 4 #> #> $params$shapes[[3]] #> [1] 13 #> #> #> $params$scale #> [1] 1.686607 #> #> #> $params_hist #> list() #> #> $iter #> [1] 1 #> #> $logLik #> 'log Lik.' -310.162 (df=6) #>"},{"path":"https://ashesitr.github.io/reservr/reference/fit_mixture.html","id":null,"dir":"Reference","previous_headings":"","what":"Fit a generic mixture using an ECME-Algorithm — fit_mixture","title":"Fit a generic mixture using an ECME-Algorithm — fit_mixture","text":"Fit generic mixture using ECME-Algorithm","code":""},{"path":"https://ashesitr.github.io/reservr/reference/fit_mixture.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Fit a generic mixture using an ECME-Algorithm — fit_mixture","text":"","code":"fit_mixture( dist, obs, start, min_iter = 0L, max_iter = 100L, skip_first_e = FALSE, tolerance = 1e-05, trace = FALSE, ... )"},{"path":"https://ashesitr.github.io/reservr/reference/fit_mixture.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Fit a generic mixture using an ECME-Algorithm — fit_mixture","text":"dist MixtureDistribution specifying structure mixture. Free parameters optimised. dominating measure likelihoods must constant, example dist_dirac() may point parameter free. obs Set observations produced trunc_obs() convertible via as_trunc_obs(). start Initial values placeholder parameters. missing, starting values obtained fit_dist_start(). min_iter Minimum number EM-Iterations max_iter Maximum number EM-Iterations (weight updates) skip_first_e Skip first E-Step (update Probability weights)? can help initial values cause mixture component vanish first E-Step starting values can improved. tolerance Numerical tolerance. trace Include tracing information output? TRUE, additional tracing information added result list. ... Passed fit_dist_start() start missing.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/fit_mixture.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Fit a generic mixture using an ECME-Algorithm — fit_mixture","text":"list elements params fitted parameters structure init. params_hist (trace TRUE) history parameters (e- m- step) iter number outer EM-iterations logLik final log-likelihood","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/fit_mixture.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Fit a generic mixture using an ECME-Algorithm — fit_mixture","text":"","code":"dist <- dist_mixture( list( dist_dirac(0.0), dist_exponential() ) ) params <- list( probs = list(0.1, 0.9), dists = list( list(), list(rate = 1.0) ) ) x <- dist$sample(100L, with_params = params) fit_mixture(dist, x) #> $params #> $params$dists #> $params$dists[[1]] #> list() #> #> $params$dists[[2]] #> $params$dists[[2]]$rate #> [1] 0.8578941 #> #> #> #> $params$probs #> $params$probs[[1]] #> [1] 0.11 #> #> $params$probs[[2]] #> [1] 0.89 #> #> #> #> $iter #> [1] 1 #> #> $logLik #> 'log Lik.' -137.293 (df=2) #>"},{"path":"https://ashesitr.github.io/reservr/reference/flatten_params.html","id":null,"dir":"Reference","previous_headings":"","what":"Flatten / Inflate parameter lists / vectors — flatten_params","title":"Flatten / Inflate parameter lists / vectors — flatten_params","text":"Flatten / Inflate parameter lists / vectors","code":""},{"path":"https://ashesitr.github.io/reservr/reference/flatten_params.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Flatten / Inflate parameter lists / vectors — flatten_params","text":"","code":"flatten_params(params) flatten_params_matrix(params) flatten_bounds(bounds) inflate_params(flat_params)"},{"path":"https://ashesitr.github.io/reservr/reference/flatten_params.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Flatten / Inflate parameter lists / vectors — flatten_params","text":"params named list parameters flattened. form passed with_params argument distribution functions. bounds List parameter bounds returned dist$get_param_bounds() flat_params named numeric vector parameters","code":""},{"path":"https://ashesitr.github.io/reservr/reference/flatten_params.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Flatten / Inflate parameter lists / vectors — flatten_params","text":"flatten_params returns 'flattened' vector parameters. intended adapter multi-dimensional optimisation functions distribution objects. flatten_params_matrix returns 'flattened' matrix parameters. intended adapter multi-dimensional optimisation functions distribution objects. column corresponds one input element. flatten_bounds returns named list vectors names lower upper. Containing upper lower bounds parameter. inflate_params returns 'inflated' list parameters. can passed with_params argument distribution functions.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/flatten_params.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Flatten / Inflate parameter lists / vectors — flatten_params","text":"","code":"library(ggplot2) mm <- dist_mixture(list( dist_exponential(NULL), dist_lognormal(0.5, NULL) ), list(NULL, 1)) ph <- mm$get_placeholders() ph_flat <- flatten_params(ph) ph_reinflated <- inflate_params(ph_flat) ph_flat[] <- c(1, 1, 6) ph_sample <- inflate_params(ph_flat) x <- mm$sample( 100, with_params = ph_sample ) emp_cdf <- ecdf(x) ggplot(data.frame(t = seq(from = min(x), to = max(x), length.out = 100))) %+% geom_point(aes(x = t, y = emp_cdf(t))) %+% geom_line(aes(x = t, y = mm$probability(t, with_params = ph_sample)), linetype = 2)"},{"path":"https://ashesitr.github.io/reservr/reference/integrate_gk.html","id":null,"dir":"Reference","previous_headings":"","what":"Adaptive Gauss-Kronrod Quadrature for multiple limits — integrate_gk","title":"Adaptive Gauss-Kronrod Quadrature for multiple limits — integrate_gk","text":"Integrates fun bounds [ lower, upper ] vectorized lower upper. Vectorized list structures parameters can also passed.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/integrate_gk.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Adaptive Gauss-Kronrod Quadrature for multiple limits — integrate_gk","text":"","code":"integrate_gk( fun, lower, upper, params = list(), .tolerance = .Machine$double.eps^0.25, .max_iter = 100L )"},{"path":"https://ashesitr.github.io/reservr/reference/integrate_gk.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Adaptive Gauss-Kronrod Quadrature for multiple limits — integrate_gk","text":"fun function integrate. Must vectorized take one two arguments, first points evaluate second (optionally) parameters apply. must return numeric vector length first input. Currently, infinite bounds supported. lower, upper Integration bounds. Must length. params Parameters pass second argument fun. actual parameters must length number integrals compute. Can possibly nested list structures containing numeric vectors. Alternatively, can matrix number rows number integrals compute. .tolerance Absolute element-wise tolerance. .max_iter Maximum number iterations. number integration intervals length(lower) * .max_iter. Therefor maximum number function evaluations per integration interval 15 * .max_iter.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/integrate_gk.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Adaptive Gauss-Kronrod Quadrature for multiple limits — integrate_gk","text":"vector integrals -th entry containing approximation integral fun(t, pick_params_at(params, )) dt interval lower[] upper[]","code":""},{"path":"https://ashesitr.github.io/reservr/reference/integrate_gk.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Adaptive Gauss-Kronrod Quadrature for multiple limits — integrate_gk","text":"integration error estimated Gauss-Kronrod quadrature absolute difference 7-point quadrature 15-point quadrature. Integrals converge bisected midpoint. params object recursively subsetted numeric vectors length number observations.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/integrate_gk.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Adaptive Gauss-Kronrod Quadrature for multiple limits — integrate_gk","text":"","code":"# Argument recycling and parallel integration of two intervals integrate_gk(sin, 0, c(pi, 2 * pi)) #> [1] 2.000000e+00 -3.141135e-16 dist <- dist_exponential() integrate_gk( function(x, p) dist$density(x, with_params = p), lower = 0, upper = 1:10, params = list(rate = 1 / 1:10) ) #> [1] 0.6321206 0.6321206 0.6321206 0.6321206 0.6321206 0.6321206 0.6321206 #> [8] 0.6321206 0.6321206 0.6321206 dist$probability(1:10, with_params = list(rate = 1 / 1:10)) #> [1] 0.6321206 0.6321206 0.6321206 0.6321206 0.6321206 0.6321206 0.6321206 #> [8] 0.6321206 0.6321206 0.6321206"},{"path":"https://ashesitr.github.io/reservr/reference/interval-operations.html","id":null,"dir":"Reference","previous_headings":"","what":"Convex union and intersection of intervals — interval-operations","title":"Convex union and intersection of intervals — interval-operations","text":"Convex union intersection intervals","code":""},{"path":"https://ashesitr.github.io/reservr/reference/interval-operations.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Convex union and intersection of intervals — interval-operations","text":"","code":"interval_union(..., intervals = list()) interval_intersection(..., intervals = list())"},{"path":"https://ashesitr.github.io/reservr/reference/interval-operations.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Convex union and intersection of intervals — interval-operations","text":"... appened intervals present. intervals list Intervals.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/interval-operations.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Convex union and intersection of intervals — interval-operations","text":"interval_union returns convex union intervals intervals. smallest interval completely containing intervals. interval_intersection returns set intersection intervals intervals. empty set represented open interval (0, 0).","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/interval-operations.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Convex union and intersection of intervals — interval-operations","text":"","code":"interval_union( interval(c(0, 1), closed = TRUE), interval(c(1, 2)) ) #> [0, 2) interval_union( interval(c(0, 5)), interval(c(1, 4), closed = TRUE) ) #> (0, 5) # Convex union is not equal to set union: interval_union( interval(c(0, 1)), interval(c(2, 3)) ) #> (0, 3) # The empty union is {} interval_union() #> {} interval_intersection( interval(c(0, 1)), interval(c(0.5, 2)) ) #> (0.5, 1) interval_intersection( interval(c(0, Inf)), interval(c(-Inf, 0)) ) #> {} interval_intersection( interval(c(0, Inf), include_lowest = TRUE), interval(c(-Inf, 0), include_highest = TRUE) ) #> {0} interval_intersection( interval(c(0, 5)), interval(c(1, 6), closed = TRUE) ) #> [1, 5) # The empty intersection is (-Inf, Inf) interval_intersection() #> (-Inf, Inf)"},{"path":"https://ashesitr.github.io/reservr/reference/interval.html","id":null,"dir":"Reference","previous_headings":"","what":"Intervals — interval","title":"Intervals — interval","text":"Intervals","code":""},{"path":"https://ashesitr.github.io/reservr/reference/interval.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Intervals — interval","text":"","code":"interval( range = c(-Inf, Inf), ..., include_lowest = closed, include_highest = closed, closed = FALSE, integer = FALSE, read_only = FALSE ) is.Interval(x)"},{"path":"https://ashesitr.github.io/reservr/reference/interval.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Intervals — interval","text":"range interval boundaries sorted two-element numeric vector. ... First argument used endpoint range length 1. Additional arguments, range length 2, cause warning ignored. include_lowest lower boundary part interval? include_highest upper boundary part interval? closed interval closed? integer interval integers? read_only Make interval object read-? x object.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/interval.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Intervals — interval","text":"interval returns Interval. .Interval returns TRUE x Interval, FALSE otherwise.","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/interval.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Intervals — interval","text":"","code":"# The real line interval() #> (-Inf, Inf) # Closed unit interval interval(c(0, 1), closed = TRUE) #> [0, 1] # Alternative form interval(0, 1, closed = TRUE) #> [0, 1] # Non-negative real line interval(c(0, Inf), include_lowest = TRUE) #> [0, Inf)"},{"path":"https://ashesitr.github.io/reservr/reference/is.Distribution.html","id":null,"dir":"Reference","previous_headings":"","what":"Test if object is a Distribution — is.Distribution","title":"Test if object is a Distribution — is.Distribution","text":"Test object Distribution","code":""},{"path":"https://ashesitr.github.io/reservr/reference/is.Distribution.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Test if object is a Distribution — is.Distribution","text":"","code":"is.Distribution(object)"},{"path":"https://ashesitr.github.io/reservr/reference/is.Distribution.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Test if object is a Distribution — is.Distribution","text":"object R object.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/is.Distribution.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Test if object is a Distribution — is.Distribution","text":"TRUE object Distribution, FALSE otherwise.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/is.Distribution.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Test if object is a Distribution — is.Distribution","text":"","code":"is.Distribution(dist_dirac()) #> [1] TRUE"},{"path":"https://ashesitr.github.io/reservr/reference/k_matrix.html","id":null,"dir":"Reference","previous_headings":"","what":"Cast to a TensorFlow matrix — k_matrix","title":"Cast to a TensorFlow matrix — k_matrix","text":"Cast TensorFlow matrix","code":""},{"path":"https://ashesitr.github.io/reservr/reference/k_matrix.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Cast to a TensorFlow matrix — k_matrix","text":"","code":"k_matrix(x, dtype = NULL)"},{"path":"https://ashesitr.github.io/reservr/reference/k_matrix.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Cast to a TensorFlow matrix — k_matrix","text":"x Numeric object converted matrix Tensor. dtype Type elements resulting tensor. Defaults keras3::config_floatx().","code":""},{"path":"https://ashesitr.github.io/reservr/reference/k_matrix.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Cast to a TensorFlow matrix — k_matrix","text":"two-dimensional tf.Tensor values x. shape (nrow(x), ncol(x)) x first converted R matrix via .matrix().","code":""},{"path":"https://ashesitr.github.io/reservr/reference/k_matrix.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Cast to a TensorFlow matrix — k_matrix","text":"","code":"if (interactive()) { k_matrix(diag(1:3)) k_matrix(diag(1:3), dtype = \"int32\") # Vectors are converted to columns: k_matrix(1:3) }"},{"path":"https://ashesitr.github.io/reservr/reference/plot_distributions.html","id":null,"dir":"Reference","previous_headings":"","what":"Plot several distributions — plot_distributions","title":"Plot several distributions — plot_distributions","text":"Plot several distributions","code":""},{"path":"https://ashesitr.github.io/reservr/reference/plot_distributions.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Plot several distributions — plot_distributions","text":"","code":"plot_distributions( ..., distributions = list(), .x, plots = c(\"density\", \"probability\", \"hazard\"), with_params = list(), as_list = FALSE )"},{"path":"https://ashesitr.github.io/reservr/reference/plot_distributions.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Plot several distributions — plot_distributions","text":"... distribution objects (must named) distributions Named list distribution objects. concatenated .... .x Numeric vector points evaluate . plots Plots created. May abbreviated. plots stacked order given top bottom. with_params list distribution parameters given distribution using with_params. named, names matched distribution names. Otherwise, allocated positionally, index 1 corresponding first element distributions, elements distributions followed arguments ... order. as_list return list ggplots instead patchwork?","code":""},{"path":"https://ashesitr.github.io/reservr/reference/plot_distributions.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Plot several distributions — plot_distributions","text":"stacked patchwork requested ggplots","code":""},{"path":"https://ashesitr.github.io/reservr/reference/plot_distributions.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Plot several distributions — plot_distributions","text":"","code":"rate <- 1 x <- rexp(20, rate) d_emp <- dist_empirical(x, positive = TRUE) d_exp <- dist_exponential() plot_distributions( empirical = d_emp, theoretical = d_exp, estimated = d_exp, with_params = list( theoretical = list(rate = rate), estimated = list(rate = 1 / mean(x)) ), .x = seq(1e-4, 5, length.out = 100) )"},{"path":"https://ashesitr.github.io/reservr/reference/predict.reservr_keras_model.html","id":null,"dir":"Reference","previous_headings":"","what":"Predict individual distribution parameters — predict.reservr_keras_model","title":"Predict individual distribution parameters — predict.reservr_keras_model","text":"Predict individual distribution parameters","code":""},{"path":"https://ashesitr.github.io/reservr/reference/predict.reservr_keras_model.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Predict individual distribution parameters — predict.reservr_keras_model","text":"","code":"# S3 method for reservr_keras_model predict(object, data, as_matrix = FALSE, ...)"},{"path":"https://ashesitr.github.io/reservr/reference/predict.reservr_keras_model.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Predict individual distribution parameters — predict.reservr_keras_model","text":"object compiled trained reservr_keras_model. data Input data compatible model. as_matrix Return parameter matrix instead list structure? ... ignored","code":""},{"path":"https://ashesitr.github.io/reservr/reference/predict.reservr_keras_model.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Predict individual distribution parameters — predict.reservr_keras_model","text":"parameter list suitable with_params argument distribution family used model. Contains one set parameters per row data.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/predict.reservr_keras_model.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Predict individual distribution parameters — predict.reservr_keras_model","text":"","code":"if (interactive()) { dist <- dist_exponential() params <- list(rate = 1.0) N <- 100L rand_input <- runif(N) x <- dist$sample(N, with_params = params) tf_in <- keras3::layer_input(1L) mod <- tf_compile_model( inputs = list(tf_in), intermediate_output = tf_in, dist = dist, optimizer = keras3::optimizer_adam(), censoring = FALSE, truncation = FALSE ) tf_fit <- fit( object = mod, x = k_matrix(rand_input), y = x, epochs = 10L, callbacks = list( callback_debug_dist_gradients(mod, k_matrix(rand_input), x) ) ) tf_preds <- predict(mod, data = k_matrix(rand_input)) }"},{"path":"https://ashesitr.github.io/reservr/reference/prob_report.html","id":null,"dir":"Reference","previous_headings":"","what":"Determine probability of reporting under a Poisson arrival Process — prob_report","title":"Determine probability of reporting under a Poisson arrival Process — prob_report","text":"Determines probability claims occuring Poisson process arrival intensity expo reporting delay distribution dist time t_min t_max reported tau_min tau_max.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/prob_report.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Determine probability of reporting under a Poisson arrival Process — prob_report","text":"","code":"prob_report( dist, intervals, expo = NULL, with_params = list(), .tolerance = .Machine$double.eps^0.5, .max_iter = 100L, .try_compile = TRUE )"},{"path":"https://ashesitr.github.io/reservr/reference/prob_report.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Determine probability of reporting under a Poisson arrival Process — prob_report","text":"dist reporting delay Distribution, compiled interval probability function. intervals data frame columns xmin, xmax, tmin, tmax. Claims occur within [xmin, xmax] reported within [tmin, tmax]. expo Poisson intensity. given, must vectorised function yields intensity claim arrival process specified time. expo = NULL equivalent constant intensity function. expo relevant multiplicative constant. with_params Parameters dist use. Can parameter set different values interval. dist compiled interval probability function, with_params can matrix instead. .tolerance Absolute element-wise tolerance. .max_iter Maximum number iterations. number integration intervals length(lower) * .max_iter. Therefor maximum number function evaluations per integration interval 15 * .max_iter. .try_compile Try compiling distributions probability function speed integration?","code":""},{"path":"https://ashesitr.github.io/reservr/reference/prob_report.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Determine probability of reporting under a Poisson arrival Process — prob_report","text":"vector reporting probabilities, one entry per row intervals.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/prob_report.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Determine probability of reporting under a Poisson arrival Process — prob_report","text":"reporting probability given P(x + d [tmin, tmax] | x [xmin, xmax]) = E(P(x + d [tmin, tmax] | x) | x [xmin, xmax]) / P(x [xmin, xmax]) = int_[xmin, xmax] expo(x) P(x + d [tmin, tmax]) dx = int_[xmin, xmax] expo(x) P(d [tmin - x, tmax - x]) dx / int_[xmin, xmax] expo(x) dx prob_report uses integrate_gk() compute two integrals.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/prob_report.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Determine probability of reporting under a Poisson arrival Process — prob_report","text":"","code":"dist <- dist_exponential() ints <- data.frame( xmin = 0, xmax = 1, tmin = seq_len(10) - 1.0, tmax = seq_len(10) ) params <- list(rate = rep(c(1, 0.5), each = 5)) prob_report(dist, ints, with_params = params) #> [1] 0.367879441 0.399576401 0.146995943 0.054076785 0.019893738 0.041904709 #> [7] 0.025416491 0.015415881 0.009350204 0.005671186"},{"path":"https://ashesitr.github.io/reservr/reference/quantile.Distribution.html","id":null,"dir":"Reference","previous_headings":"","what":"Quantiles of Distributions — quantile.Distribution","title":"Quantiles of Distributions — quantile.Distribution","text":"Produces quantiles corresponding given probabilities configurable distribution parameters.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/quantile.Distribution.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Quantiles of Distributions — quantile.Distribution","text":"","code":"# S3 method for Distribution quantile(x, probs = seq(0, 1, 0.25), with_params = list(), ..., .start = 0)"},{"path":"https://ashesitr.github.io/reservr/reference/quantile.Distribution.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Quantiles of Distributions — quantile.Distribution","text":"x Distribution. probs Quantiles compute. with_params Optional list distribution parameters. Note x$has_capability(\"quantile\") false, with_params assumed contain one set parameters. ... ignored .start Starting value quantiles computed numerically. Must within support x.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/quantile.Distribution.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Quantiles of Distributions — quantile.Distribution","text":"quantiles x corresponding probs parameters with_params.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/quantile.Distribution.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Quantiles of Distributions — quantile.Distribution","text":"x$has_capability(\"quantile\") true, returns x$quantile(probs, with_params = with_params). case, with_params may contain separate sets parameters quantile determined. Otherwise, numerical estimation quantiles done using density probability function. method assumes with_params cantain one set parameters. strategy uses two steps: Find smallest largest quantiles probs using newton method starting .start. Find remaining quantiles bisection using stats::uniroot().","code":""},{"path":"https://ashesitr.github.io/reservr/reference/quantile.Distribution.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Quantiles of Distributions — quantile.Distribution","text":"","code":"# With quantiles available dist <- dist_normal(sd = 1) qqs <- quantile(dist, probs = rep(0.5, 3), with_params = list(mean = 1:3)) stopifnot(all.equal(qqs, 1:3)) # Without quantiles available dist <- dist_erlangmix(shapes = list(1, 2, 3), scale = 1.0) my_probs <- c(0, 0.01, 0.25, 0.5, 0.75, 1) qqs <- quantile( dist, probs = my_probs, with_params = list(probs = list(0.5, 0.3, 0.2)), .start = 2 ) all.equal(dist$probability(qqs, with_params = list(probs = list(0.5, 0.3, 0.2))), my_probs) #> [1] \"Mean relative difference: 2.890015e-06\" # Careful: Numerical estimation of extreme quantiles can result in out-of-bounds values. # The correct 0-quantile would be 0 in this case, but it was estimated < 0. qqs[1L] #> [1] -1.138089"},{"path":"https://ashesitr.github.io/reservr/reference/reexports.html","id":null,"dir":"Reference","previous_headings":"","what":"Objects exported from other packages — reexports","title":"Objects exported from other packages — reexports","text":"objects imported packages. Follow links see documentation. generics fit","code":""},{"path":"https://ashesitr.github.io/reservr/reference/softmax.html","id":null,"dir":"Reference","previous_headings":"","what":"Soft-Max function — softmax","title":"Soft-Max function — softmax","text":"Softmax vector x defined ","code":""},{"path":"https://ashesitr.github.io/reservr/reference/softmax.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Soft-Max function — softmax","text":"","code":"softmax(x) dsoftmax(x)"},{"path":"https://ashesitr.github.io/reservr/reference/softmax.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Soft-Max function — softmax","text":"x numeric vector matrix","code":""},{"path":"https://ashesitr.github.io/reservr/reference/softmax.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Soft-Max function — softmax","text":"softmax returns softmax x; rowwise x matrix. dsoftmax returns Jacobi-matrix softmax(x) x. x must vector.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/softmax.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Soft-Max function — softmax","text":"\\(s_i = \\exp(x_i) / \\sum_k \\exp(x_k)\\) satisfies sum(s) == 1.0 can used smoothly enforce sum constraint.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/softmax.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Soft-Max function — softmax","text":"","code":"softmax(c(5, 5)) #> [1] 0.5 0.5 softmax(diag(nrow = 5, ncol = 6)) #> [,1] [,2] [,3] [,4] [,5] [,6] #> [1,] 0.3521874 0.1295625 0.1295625 0.1295625 0.1295625 0.1295625 #> [2,] 0.1295625 0.3521874 0.1295625 0.1295625 0.1295625 0.1295625 #> [3,] 0.1295625 0.1295625 0.3521874 0.1295625 0.1295625 0.1295625 #> [4,] 0.1295625 0.1295625 0.1295625 0.3521874 0.1295625 0.1295625 #> [5,] 0.1295625 0.1295625 0.1295625 0.1295625 0.3521874 0.1295625"},{"path":"https://ashesitr.github.io/reservr/reference/tf_compile_model.html","id":null,"dir":"Reference","previous_headings":"","what":"Compile a Keras model for truncated data under dist — tf_compile_model","title":"Compile a Keras model for truncated data under dist — tf_compile_model","text":"Compile Keras model truncated data dist","code":""},{"path":"https://ashesitr.github.io/reservr/reference/tf_compile_model.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compile a Keras model for truncated data under dist — tf_compile_model","text":"","code":"tf_compile_model( inputs, intermediate_output, dist, optimizer, censoring = TRUE, truncation = TRUE, metrics = NULL, weighted_metrics = NULL )"},{"path":"https://ashesitr.github.io/reservr/reference/tf_compile_model.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compile a Keras model for truncated data under dist — tf_compile_model","text":"inputs List keras input layers intermediate_output Intermediate model layer used input distribution parameters dist Distribution use compiling loss parameter outputs optimizer String (name optimizer) optimizer instance. See optimizer_* family. censoring flag, whether compiled model support censored observations. Set FALSE higher efficiency. fit(...) error resulting model used fit censored observations. truncation flag, whether compiled model support truncated observations. Set FALSE higher efficiency. fit(...) warn resuting model used fit truncated observations. metrics List metrics evaluated model training testing. can : string (name built-function), function, optionally \"name\" attribute Metric() instance. See metric_* family functions. Typically use metrics = c('accuracy'). function callable signature result = fn(y_true, y_pred). specify different metrics different outputs multi-output model, also pass named list, metrics = list(= 'accuracy', b = c('accuracy', 'mse')). can also pass list specify metric list metrics output, metrics = list(c('accuracy'), c('accuracy', 'mse')) metrics = list('accuracy', c('accuracy', 'mse')). pass strings 'accuracy' 'acc', convert one metric_binary_accuracy(), metric_categorical_accuracy(), metric_sparse_categorical_accuracy() based shapes targets model output. similar conversion done strings \"crossentropy\" \"ce\" well. metrics passed evaluated without sample weighting; like sample weighting apply, can specify metrics via weighted_metrics argument instead. providing anonymous R function, can customize printed name training assigning attr(, \"name\") <- \"my_custom_metric_name\", calling custom_metric(\"my_custom_metric_name\", ) weighted_metrics List metrics evaluated weighted sample_weight class_weight training testing.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/tf_compile_model.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compile a Keras model for truncated data under dist — tf_compile_model","text":"reservr_keras_model can used train truncated censored observations dist based input data inputs.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/tf_compile_model.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compile a Keras model for truncated data under dist — tf_compile_model","text":"","code":"dist <- dist_exponential() params <- list(rate = 1.0) N <- 100L rand_input <- runif(N) x <- dist$sample(N, with_params = params) if (interactive()) { tf_in <- keras3::layer_input(1L) mod <- tf_compile_model( inputs = list(tf_in), intermediate_output = tf_in, dist = dist, optimizer = keras3::optimizer_adam(), censoring = FALSE, truncation = FALSE ) }"},{"path":"https://ashesitr.github.io/reservr/reference/tf_initialise_model.html","id":null,"dir":"Reference","previous_headings":"","what":"Initialise model weights to a global parameter fit — tf_initialise_model","title":"Initialise model weights to a global parameter fit — tf_initialise_model","text":"Initialises compiled reservr_keras_model weights predictions equal , close , distribution parameters given params.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/tf_initialise_model.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Initialise model weights to a global parameter fit — tf_initialise_model","text":"","code":"tf_initialise_model( model, params, mode = c(\"scale\", \"perturb\", \"zero\", \"none\") )"},{"path":"https://ashesitr.github.io/reservr/reference/tf_initialise_model.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Initialise model weights to a global parameter fit — tf_initialise_model","text":"model reservr_compiled_model obtained tf_compile_model(). params list distribution parameters compatible model. mode initialisation mode scale Initialise biases according params kernels uniform [-0.1, 0.1] * bias scale. perturb Initialise biases according params leave kernels . zero Initialise biases according params set kernel zero. none modify weights.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/tf_initialise_model.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Initialise model weights to a global parameter fit — tf_initialise_model","text":"Invisibly model changed weights","code":""},{"path":"https://ashesitr.github.io/reservr/reference/tf_initialise_model.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Initialise model weights to a global parameter fit — tf_initialise_model","text":"","code":"dist <- dist_exponential() group <- sample(c(0, 1), size = 100, replace = TRUE) x <- dist$sample(100, with_params = list(rate = group + 1)) global_fit <- fit(dist, x) if (interactive()) { library(keras3) l_in <- layer_input(shape = 1L) mod <- tf_compile_model( inputs = list(l_in), intermediate_output = l_in, dist = dist, optimizer = optimizer_adam(), censoring = FALSE, truncation = FALSE ) tf_initialise_model(mod, global_fit$params) fit_history <- fit( mod, x = group, y = x, epochs = 200L ) predicted_means <- predict(mod, data = as_tensor(c(0, 1), config_floatx())) }"},{"path":"https://ashesitr.github.io/reservr/reference/trunc_obs.html","id":null,"dir":"Reference","previous_headings":"","what":"Define a set of truncated observations — trunc_obs","title":"Define a set of truncated observations — trunc_obs","text":"x missing, xmin xmax must specified.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/trunc_obs.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Define a set of truncated observations — trunc_obs","text":"","code":"trunc_obs(x, xmin = x, xmax = x, tmin = -Inf, tmax = Inf, w = 1) as_trunc_obs(.data) truncate_obs(.data, tmin_new = -Inf, tmax_new = Inf, .partial = FALSE) repdel_obs(.data, accident, delay, time, .truncate = FALSE)"},{"path":"https://ashesitr.github.io/reservr/reference/trunc_obs.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Define a set of truncated observations — trunc_obs","text":"x Observations xmin, xmax Censoring bounds. xmin != xmax, x must NA. tmin, tmax Truncation bounds. May vary per observation. w Case weights .data data frame numeric vector. tmin_new New truncation minimum tmax_new New truncation maximum .partial Enable partial truncation censored observations? potentially create inconsistent data actual observation lies outside truncation bounds censoring interval overlaps. accident accident time (unquoted, evaluated .data) delay reporting delay (unquoted, evaluated .data) time evaluation time (unquoted, evaluated .data) .truncate claims reported time silently discarded? claims reported time .truncate FALSE, error raised.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/trunc_obs.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Define a set of truncated observations — trunc_obs","text":"trunc_obs: trunc_obs tibble columns x, xmin, xmax, tmin tmax describing possibly interval-censored observations truncation as_trunc_obs returns trunc_obs tibble. truncate_obs returns trunc_obs tibble possibly fewer observations .data updated truncation bounds. repdel_obs returns trunc_obs tibble corresponding reporting delay observations claim. .truncate FALSE, result guaranteed number rows .data.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/trunc_obs.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Define a set of truncated observations — trunc_obs","text":"Uncensored observations must satisfy tmin <= xmin = x = xmax <= tmax. Censored observations must satisfy tmin <= xmin < xmax <= tmax x = NA.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/trunc_obs.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Define a set of truncated observations — trunc_obs","text":"","code":"N <- 100 x <- rexp(N, 0.5) # Random, observation dependent truncation intervals tmin <- runif(N, 0, 1) tmax <- tmin + runif(N, 1, 2) oob <- x < tmin | x > tmax x <- x[!oob] tmin <- tmin[!oob] tmax <- tmax[!oob] # Number of observations after truncation N <- length(x) # Randomly interval censor 30% of observations cens <- rbinom(N, 1, 0.3) == 1L xmin <- x xmax <- x xmin[cens] <- pmax(tmin[cens], floor(x[cens])) xmax[cens] <- pmin(tmax[cens], ceiling(x[cens])) x[cens] <- NA trunc_obs(x, xmin, xmax, tmin, tmax) #> # A tibble: 44 × 6 #> x xmin xmax tmin tmax w #> #> 1 NA 0.832 1 0.832 2.08 1 #> 2 NA 1 2 0.464 2.24 1 #> 3 1.46 1.46 1.46 0.450 2.36 1 #> 4 0.665 0.665 0.665 0.487 1.80 1 #> 5 0.979 0.979 0.979 0.0436 1.11 1 #> 6 1.03 1.03 1.03 0.560 2.19 1 #> 7 0.657 0.657 0.657 0.185 1.98 1 #> 8 NA 1 2 0.612 2.36 1 #> 9 0.526 0.526 0.526 0.240 2.03 1 #> 10 1.60 1.60 1.60 0.668 2.38 1 #> # ℹ 34 more rows as_trunc_obs(c(1, 2, 3)) #> # A tibble: 3 × 6 #> x xmin xmax tmin tmax w #> #> 1 1 1 1 -Inf Inf 1 #> 2 2 2 2 -Inf Inf 1 #> 3 3 3 3 -Inf Inf 1 as_trunc_obs(data.frame(x = 1:3, tmin = 0, tmax = 10)) #> # A tibble: 3 × 6 #> x xmin xmax tmin tmax w #> #> 1 1 1 1 0 10 1 #> 2 2 2 2 0 10 1 #> 3 3 3 3 0 10 1 as_trunc_obs(data.frame(x = c(1, NA), xmin = c(1, 2), xmax = c(1, 3))) #> # A tibble: 2 × 6 #> x xmin xmax tmin tmax w #> #> 1 1 1 1 -Inf Inf 1 #> 2 NA 2 3 -Inf Inf 1 truncate_obs(1:10, tmin_new = 2.0, tmax_new = 8.0) #> # A tibble: 7 × 6 #> x xmin xmax tmin tmax w #> #> 1 2 2 2 2 8 1 #> 2 3 3 3 2 8 1 #> 3 4 4 4 2 8 1 #> 4 5 5 5 2 8 1 #> 5 6 6 6 2 8 1 #> 6 7 7 7 2 8 1 #> 7 8 8 8 2 8 1"},{"path":"https://ashesitr.github.io/reservr/reference/truncate_claims.html","id":null,"dir":"Reference","previous_headings":"","what":"Truncate claims data subject to reporting delay — truncate_claims","title":"Truncate claims data subject to reporting delay — truncate_claims","text":"Truncate claims data subject reporting delay","code":""},{"path":"https://ashesitr.github.io/reservr/reference/truncate_claims.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Truncate claims data subject to reporting delay — truncate_claims","text":"","code":"truncate_claims(data, accident, delay, time, .report_col = \"report\")"},{"path":"https://ashesitr.github.io/reservr/reference/truncate_claims.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Truncate claims data subject to reporting delay — truncate_claims","text":"data Full claims data including IBNR accident Accident times. May unquoted column name data. delay Reporting delays. May unquoted column name data. time Observation time (scalar number one per claim). Claims accident + delay > time truncated. Set time = Inf compute reporting times perform truncation. .report_col NULL column name store reporting time report = accident + delay.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/truncate_claims.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Truncate claims data subject to reporting delay — truncate_claims","text":"Truncated data. reporting time stored colnumn named .report_col unless .report_col NULL. .report_col NULL time contains Infs, warning issued since data returned unchanged work done.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/truncate_claims.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Truncate claims data subject to reporting delay — truncate_claims","text":"","code":"claims_full <- data.frame( acc = runif(100), repdel = rexp(100) ) tau <- 2.0 truncate_claims(claims_full, acc, repdel, tau) #> acc repdel report #> 1 1.341272e-01 0.028235584 0.16236281 #> 2 7.952195e-02 0.008358716 0.08788066 #> 3 3.443970e-02 0.140290387 0.17473009 #> 4 7.098971e-01 1.152279972 1.86217711 #> 5 2.960615e-01 1.574453000 1.87051447 #> 6 7.289446e-01 0.088894959 0.81783959 #> 7 1.101838e-01 1.216726908 1.32691067 #> 8 5.654015e-01 1.387287179 1.95268864 #> 9 7.189693e-01 0.069208830 0.78817813 #> 10 8.909014e-01 0.152164621 1.04306601 #> 11 4.515028e-01 0.206191922 0.65769476 #> 13 1.224872e-01 0.514714528 0.63720168 #> 14 1.555099e-05 0.319640385 0.31965594 #> 15 6.111351e-01 0.932906239 1.54404130 #> 17 3.713955e-02 0.471633688 0.50877323 #> 18 7.302117e-01 0.470371407 1.20058307 #> 19 4.125354e-01 0.825975930 1.23851137 #> 21 5.602826e-01 0.078439292 0.63872193 #> 23 5.899033e-01 0.356895571 0.94679888 #> 24 5.889085e-01 0.886226820 1.47513534 #> 25 1.670153e-01 0.177537690 0.34455295 #> 26 6.828304e-01 0.163108702 0.84593913 #> 27 3.082466e-01 0.319356196 0.62760279 #> 28 7.793431e-01 0.952858891 1.73220199 #> 29 3.284492e-01 1.587426414 1.91587561 #> 30 4.171246e-02 1.250061904 1.29177437 #> 31 1.383127e-01 1.177008377 1.31532110 #> 32 2.560620e-01 0.055133459 0.31119549 #> 33 4.429471e-01 0.267354952 0.71030207 #> 34 2.050347e-01 0.464885217 0.66991992 #> 35 7.792969e-01 0.157671728 0.93696860 #> 37 7.766178e-01 1.075450784 1.85206859 #> 39 1.776113e-01 1.112906751 1.29051807 #> 42 5.313195e-01 0.137196474 0.66851598 #> 43 4.677886e-01 0.664616256 1.13240482 #> 44 1.206373e-01 0.297930363 0.41856766 #> 45 6.348620e-01 1.141797280 1.77665926 #> 46 8.912517e-01 0.680814184 1.57206585 #> 47 8.781296e-01 0.540906635 1.41903623 #> 48 2.148099e-02 0.518280302 0.53976129 #> 50 5.886034e-01 0.705613201 1.29421661 #> 51 1.850858e-01 0.508072750 0.69315858 #> 52 5.010168e-01 0.352654210 0.85367102 #> 54 9.485277e-01 0.332760828 1.28128852 #> 55 1.860526e-01 0.110473518 0.29652609 #> 56 9.358294e-01 0.568551742 1.50438114 #> 57 7.175151e-01 0.091612742 0.80912781 #> 58 3.182253e-01 0.436845455 0.75507071 #> 59 5.823165e-01 0.114667189 0.69698367 #> 61 6.969247e-01 0.476378988 1.17330367 #> 62 7.630700e-01 0.010118335 0.77318837 #> 63 1.971220e-01 1.478337746 1.67545979 #> 64 1.982628e-01 1.151255731 1.34951853 #> 65 3.902595e-01 1.343615915 1.73387538 #> 66 6.074760e-01 0.774707003 1.38218298 #> 67 5.993708e-01 0.646028800 1.24539964 #> 68 7.847457e-01 0.986775468 1.77152118 #> 69 5.605707e-01 0.550733615 1.11130433 #> 70 4.787836e-01 0.947700852 1.42648449 #> 71 9.189584e-01 0.396267166 1.31522557 #> 72 1.237870e-02 0.390503512 0.40288222 #> 74 5.759545e-01 0.819434726 1.39538924 #> 75 3.667410e-01 0.043780853 0.41052181 #> 76 1.393693e-01 0.232285622 0.37165496 #> 78 8.949839e-01 0.206483914 1.10146782 #> 79 2.749953e-01 0.453207363 0.72820271 #> 80 2.195253e-01 0.777957749 0.99748306 #> 81 7.483612e-02 0.264012017 0.33884814 #> 82 2.389014e-01 0.022741144 0.26164256 #> 84 2.821164e-01 0.205027999 0.48714435 #> 86 2.685969e-01 0.517190858 0.78578778 #> 87 3.688022e-01 0.114156616 0.48295881 #> 90 1.491891e-01 1.768341100 1.91753016 #> 91 2.765223e-01 0.306067371 0.58258966 #> 92 7.019701e-01 0.056862872 0.75883296 #> 94 6.576286e-01 1.223552487 1.88118109 #> 95 4.837228e-01 0.368050174 0.85177300 #> 97 6.680541e-01 0.381928012 1.04998212 #> 98 1.240903e-01 0.434685851 0.55877612 #> 99 4.515315e-01 0.622061988 1.07359347 #> 100 3.454657e-01 0.470663017 0.81612874"},{"path":"https://ashesitr.github.io/reservr/reference/weighted_moments.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute weighted moments — weighted_moments","title":"Compute weighted moments — weighted_moments","text":"Compute weighted moments","code":""},{"path":"https://ashesitr.github.io/reservr/reference/weighted_moments.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute weighted moments — weighted_moments","text":"","code":"weighted_moments(x, w, n = 2L, center = TRUE)"},{"path":"https://ashesitr.github.io/reservr/reference/weighted_moments.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute weighted moments — weighted_moments","text":"x Observations w Case weights (optional) n Number moments calculate center Calculate centralized moments (default) noncentralized moments, .e. E((X - E(X))^k) E(X^k).","code":""},{"path":"https://ashesitr.github.io/reservr/reference/weighted_moments.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute weighted moments — weighted_moments","text":"vector length n kth entry kth weighted moment x weights w. center TRUE moments centralized, .e. E((X - E(X))^k). first moment never centralized. moments scaled 1 / sum(w), de-biased. e.g. second central weighted moment weighted_moment(x, w)[2L] equal var(rep(x, w)) * (sum(w) - 1) / sum(w) integer w","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/weighted_moments.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute weighted moments — weighted_moments","text":"","code":"weighted_moments(rexp(100)) #> [1] 0.9877458 1.2957378 weighted_moments(c(1, 2, 3), c(1, 2, 3)) #> [1] 2.3333333 0.5555556 c(mean(rep(1:3, 1:3)), var(rep(1:3, 1:3)) * 5 / 6) #> [1] 2.3333333 0.5555556"},{"path":"https://ashesitr.github.io/reservr/reference/weighted_quantile.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute weighted quantiles — weighted_quantile","title":"Compute weighted quantiles — weighted_quantile","text":"Compute weighted quantiles","code":""},{"path":"https://ashesitr.github.io/reservr/reference/weighted_quantile.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute weighted quantiles — weighted_quantile","text":"","code":"weighted_quantile(x, w, probs) weighted_median(x, w)"},{"path":"https://ashesitr.github.io/reservr/reference/weighted_quantile.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute weighted quantiles — weighted_quantile","text":"x Observations w Case weights (optional) probs Quantiles calculate","code":""},{"path":"https://ashesitr.github.io/reservr/reference/weighted_quantile.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute weighted quantiles — weighted_quantile","text":"vector length probs corresponding weighted quantiles x weight w. integer weights, equivalent quantile(rep(x, w), probs) weighted median x weights w. integer weights, equivalent median(rep(x, w))","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/weighted_quantile.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute weighted quantiles — weighted_quantile","text":"","code":"weighted_median(1:6) #> [1] 3.5 weighted_median(1:3, c(1, 4, 9)) #> [1] 3 weighted_median(1:3, c(9, 4, 1)) #> [1] 1 weighted_quantile(1:3, c(1, 4, 9), seq(0.0, 1.0, by = 0.25)) #> [1] 1 2 3 3 3 quantile(rep(1:3, c(1, 4, 9)), seq(0.0, 1.0, by = 0.25)) #> 0% 25% 50% 75% 100% #> 1 2 3 3 3"},{"path":"https://ashesitr.github.io/reservr/reference/weighted_tabulate.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute weighted tabulations — weighted_tabulate","title":"Compute weighted tabulations — weighted_tabulate","text":"Computes sum w grouped bin. w missing result equivalent tabulate(bin, nbins)","code":""},{"path":"https://ashesitr.github.io/reservr/reference/weighted_tabulate.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute weighted tabulations — weighted_tabulate","text":"","code":"weighted_tabulate(bin, w, nbins = max(1L, bin, na.rm = TRUE))"},{"path":"https://ashesitr.github.io/reservr/reference/weighted_tabulate.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute weighted tabulations — weighted_tabulate","text":"bin integer vector values 1L nbins w Weights per entry bin. nbins Number bins","code":""},{"path":"https://ashesitr.github.io/reservr/reference/weighted_tabulate.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute weighted tabulations — weighted_tabulate","text":"vector length nbins ith result equal sum(w[bin == ]) sum(bin == ) w missing. integer weights, equivalent tabulate(rep(bin, w), nbins).","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/weighted_tabulate.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute weighted tabulations — weighted_tabulate","text":"","code":"weighted_tabulate(c(1, 1, 2)) #> [1] 2 1 weighted_tabulate(c(1, 1, 2), nbins = 3L) #> [1] 2 1 0 weighted_tabulate(c(1, 1, 2), w = c(0.5, 0.5, 1), nbins = 3L) #> [1] 1 1 0"},{"path":"https://ashesitr.github.io/reservr/news/index.html","id":"reservr-003","dir":"Changelog","previous_headings":"","what":"reservr 0.0.3","title":"reservr 0.0.3","text":"Fixed segfaults r-devel caused zero-length input C++ routines. Migrated keras3 keras support.","code":""},{"path":"https://ashesitr.github.io/reservr/news/index.html","id":"reservr-002","dir":"Changelog","previous_headings":"","what":"reservr 0.0.2","title":"reservr 0.0.2","text":"CRAN release: 2023-10-18 Fixed tensorflow log-density implementation dist_erlangmix() dist_exponential() work censored data. Multiple bug fixes related tensorflow training integration, input tensor shapes can unknown. Improved testing tensorflow integration.","code":""},{"path":"https://ashesitr.github.io/reservr/news/index.html","id":"reservr-001","dir":"Changelog","previous_headings":"","what":"reservr 0.0.1","title":"reservr 0.0.1","text":"CRAN release: 2022-12-09 Initial CRAN release","code":""}] +[{"path":"https://ashesitr.github.io/reservr/articles/distributions.html","id":"distributions","dir":"Articles","previous_headings":"","what":"Distributions","title":"Working with Distributions","text":"Distributions set classes available reservr specify distribution families random variables. Distribution inherits R6 Class Distribution provides functionality necessary working specific family. Distribution can defined calling one constructor functions, prefixed dist_ package. constructors accept parameters family arguments. arguments specified, corresponding parameter considered fixed sense need specified computing something distribution assumed fixed calling fit() distribution instance.","code":""},{"path":"https://ashesitr.github.io/reservr/articles/distributions.html","id":"sample","dir":"Articles","previous_headings":"Distributions","what":"Sample","title":"Working with Distributions","text":"example, unspecified normal distribution can created calling dist_normal() without arguments. means parameters mean sd considered placeholders. want , e.g., sample norm, must specify placeholders with_params argument:","code":"library(reservr) set.seed(1L) # Instantiate an unspecified normal distribution norm <- dist_normal() x <- norm$sample(n = 10L, with_params = list(mean = 3, sd = 1)) set.seed(1L) norm2 <- dist_normal(sd = 1) x2 <- norm2$sample(n = 10L, with_params = list(mean = 3)) # the same RVs are drawn because the distribution parameters and the seed were the same stopifnot(identical(x, x2))"},{"path":"https://ashesitr.github.io/reservr/articles/distributions.html","id":"density","dir":"Articles","previous_headings":"Distributions","what":"Density","title":"Working with Distributions","text":"density() function computes density distribution respect natural measure. Use is_discrete_at() check point discrete mass lebesgue density. diff_density() computes gradient density respect free parameter. Setting log = TRUE computes gradient log-density, .e., gradient log f(x, params) instead.","code":"norm$density(x, with_params = list(mean = 3, sd = 1)) #> [1] 0.3278626 0.3922715 0.2813724 0.1117603 0.3778620 0.2849269 0.3542572 #> [8] 0.3037652 0.3380030 0.3807663 dnorm(x, mean = 3, sd = 1) #> [1] 0.3278626 0.3922715 0.2813724 0.1117603 0.3778620 0.2849269 0.3542572 #> [8] 0.3037652 0.3380030 0.3807663 norm$density(x, log = TRUE, with_params = list(mean = 3, sd = 1)) # log-density #> [1] -1.1151607 -0.9358010 -1.2680761 -2.1913990 -0.9732262 -1.2555227 #> [7] -1.0377321 -1.1915002 -1.0847006 -0.9655696 norm$is_discrete_at(x, with_params = list(mean = 3, sd = 1)) #> [1] FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE # A discrete distribution with mass only at point = x[1]. dd <- dist_dirac(point = x[1]) dd$density(x) #> [1] 1 0 0 0 0 0 0 0 0 0 dd$is_discrete_at(x) #> [1] TRUE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE FALSE norm$diff_density(x, with_params = list(mean = 3, sd = 1)) #> $mean #> [1] -0.20539076 0.07203805 -0.23512285 0.17828905 0.12450847 -0.23377349 #> [7] 0.17267525 0.22427736 0.19461580 -0.11628160 #> #> $sd #> [1] -0.19919475 -0.37904224 -0.08489705 0.17266080 -0.33683550 -0.09312311 #> [7] -0.27009027 -0.13817569 -0.22594681 -0.34525522"},{"path":"https://ashesitr.github.io/reservr/articles/distributions.html","id":"probability","dir":"Articles","previous_headings":"Distributions","what":"Probability","title":"Working with Distributions","text":"probability(), c.d.f., survival function, logarithms can computed. discrete distributions, dist$probability(x, lower.tail = TRUE) returns \\(P(X \\le x)\\) dist$probability(x, lower.tail = FALSE) returns \\(P(X > x)\\). Gradients (log-)c.d.f. survival function respect parameters can computed using diff_probability().","code":"norm$probability(x, with_params = list(mean = 3, sd = 1)) #> [1] 0.2655087 0.5728534 0.2016819 0.9446753 0.6291140 0.2059746 0.6870228 #> [8] 0.7698414 0.7176185 0.3800352 pnorm(x, mean = 3, sd = 1) #> [1] 0.2655087 0.5728534 0.2016819 0.9446753 0.6291140 0.2059746 0.6870228 #> [8] 0.7698414 0.7176185 0.3800352 dd$probability(x) #> [1] 1 1 0 1 1 0 1 1 1 1 dd$probability(x, lower.tail = FALSE, log.p = TRUE) #> [1] -Inf -Inf 0 -Inf -Inf 0 -Inf -Inf -Inf -Inf norm$diff_probability(x, with_params = list(mean = 3, sd = 1)) #> $mean #> [1] -0.3278626 -0.3922715 -0.2813724 -0.1117603 -0.3778620 -0.2849269 #> [7] -0.3542572 -0.3037652 -0.3380030 -0.3807663 #> #> $sd #> [1] 0.20539076 -0.07203805 0.23512285 -0.17828905 -0.12450847 0.23377349 #> [7] -0.17267525 -0.22427736 -0.19461580 0.11628160"},{"path":"https://ashesitr.github.io/reservr/articles/distributions.html","id":"hazard","dir":"Articles","previous_headings":"Distributions","what":"Hazard","title":"Working with Distributions","text":"hazard rate defined \\(h(x, \\theta) = f(x, \\theta) / S(x, \\theta)\\), .e., ratio density survival function.","code":"norm$hazard(x, with_params = list(mean = 3, sd = 1)) #> [1] 0.4463805 0.9183533 0.3524565 2.0200785 1.0188091 0.3588385 1.1318948 #> [8] 1.3198083 1.1969728 0.6141740 norm$hazard(x, log = TRUE, with_params = list(mean = 3, sd = 1)) #> [1] -0.80658365 -0.08517306 -1.04282794 0.70313635 0.01863443 -1.02488292 #> [7] 0.12389301 0.27748652 0.17979571 -0.48747702"},{"path":"https://ashesitr.github.io/reservr/articles/distributions.html","id":"fitting","dir":"Articles","previous_headings":"Distributions","what":"Fitting","title":"Working with Distributions","text":"fit() generic defined Distributions perform maximum likelihood estimation. accepts weighted, censored truncated sample class trunc_obs, can automatically convert uncensored, untruncated observations without weight proper trunc_obs object.","code":"# Fit with mean, sd free fit1 <- fit(norm, x) # Fit with mean free fit2 <- fit(norm2, x) # Fit with sd free fit3 <- fit(dist_normal(mean = 3), x) # Fitted parameters fit1$params #> $mean #> [1] 3.132203 #> #> $sd #> [1] 0.7405289 fit2$params #> $mean #> [1] 3.132203 fit3$params #> $sd #> [1] 0.752237 # log-Likelihoods can be computed on AIC(fit1$logLik) #> [1] 26.37096 AIC(fit2$logLik) #> [1] 25.8626 AIC(fit3$logLik) #> [1] 24.68469 # Convergence checks fit1$opt$message #> [1] \"NLOPT_SUCCESS: Generic success return value.\" fit2$opt$message #> [1] \"NLOPT_SUCCESS: Generic success return value.\" fit3$opt$message #> [1] \"NLOPT_SUCCESS: Generic success return value.\""},{"path":"https://ashesitr.github.io/reservr/articles/distributions.html","id":"fitting-censored-data","dir":"Articles","previous_headings":"Distributions","what":"Fitting censored data","title":"Working with Distributions","text":"can also fit interval-censored data.","code":"params <- list(mean = 30, sd = 10) x <- norm$sample(100L, with_params = params) xl <- floor(x) xr <- ceiling(x) cens_fit <- fit(norm, trunc_obs(xmin = xl, xmax = xr)) print(cens_fit) #> $params #> $params$mean #> [1] 31.25 #> #> $params$sd #> [1] 9.112857 #> #> #> $opt #> $opt$par #> mean sd #> 31.250000 9.112857 #> #> $opt$value #> [1] 362.9126 #> #> $opt$iter #> [1] 5 #> #> $opt$convergence #> [1] 1 #> #> $opt$message #> [1] \"NLOPT_SUCCESS: Generic success return value.\" #> #> #> $logLik #> 'log Lik.' -362.9126 (df=2)"},{"path":"https://ashesitr.github.io/reservr/articles/distributions.html","id":"fitting-truncated-data","dir":"Articles","previous_headings":"Distributions","what":"Fitting truncated data","title":"Working with Distributions","text":"possible fit randomly truncated samples, .e., samples truncation bound also random differs observed observation.","code":"params <- list(mean = 30, sd = 10) x <- norm$sample(100L, with_params = params) tl <- runif(length(x), min = 0, max = 20) tr <- runif(length(x), min = 0, max = 60) + tl # truncate_obs() also truncates observations. # if data is already truncated, use trunc_obs(x = ..., tmin = ..., tmax = ...) instead. trunc_fit <- fit(norm, truncate_obs(x, tl, tr)) print(trunc_fit) #> $params #> $params$mean #> [1] 26.72871 #> #> $params$sd #> [1] 8.242123 #> #> #> $opt #> $opt$par #> mean sd #> 26.728710 8.242123 #> #> $opt$value #> [1] 203.8095 #> #> $opt$iter #> [1] 9 #> #> $opt$convergence #> [1] 1 #> #> $opt$message #> [1] \"NLOPT_SUCCESS: Generic success return value.\" #> #> #> $logLik #> 'log Lik.' -203.8095 (df=2) attr(trunc_fit$logLik, \"nobs\") #> [1] 62"},{"path":"https://ashesitr.github.io/reservr/articles/distributions.html","id":"plotting","dir":"Articles","previous_headings":"Distributions","what":"Plotting","title":"Working with Distributions","text":"Visualising different distributions, parametrizations, e.g., fits, can done plot_distributions()","code":"# Plot fitted densities plot_distributions( true = norm, fit1 = norm, fit2 = norm2, fit3 = dist_normal(3), .x = seq(-2, 7, 0.01), with_params = list( true = list(mean = 3, sd = 1), fit1 = fit1$params, fit2 = fit2$params, fit3 = fit3$params ), plots = \"density\" ) # Plot fitted densities, c.d.f.s and hazard rates plot_distributions( true = norm, cens_fit = norm, trunc_fit = norm, .x = seq(0, 60, length.out = 101L), with_params = list( true = list(mean = 30, sd = 10), cens_fit = cens_fit$params, trunc_fit = trunc_fit$params ) ) # More complex distributions plot_distributions( bdegp = dist_bdegp(2, 3, 10, 3), .x = c(seq(0, 12, length.out = 121), 1.5 - 1e-6), with_params = list( bdegp = list( dists = list( list(), list(), list( dists = list( list( dist = list( shapes = as.list(1:3), scale = 2.0, probs = list(0.2, 0.5, 0.3) ) ), list( sigmau = 0.4, xi = 0.2 ) ), probs = list(0.7, 0.3) ) ), probs = list(0.15, 0.1, 0.75) ) ) )"},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"introduction","dir":"Articles","previous_headings":"","what":"Introduction","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"Statistical analyses typically concerned modelling estimating distribution measured variable interest \\(Y\\), called outcome, possibly conditional value one several endogenous variables \\(X\\), called predictors. absence endogenous variables, process usually called distribution fitting, presence endogenous variables called regression. Classical regression, via generalized linear models (GLMs), concerned influence endogenous variables mean outcome, .e., \\(\\mathsf{E}(Y|X) = f(X)\\), often links parameters conditional outcome distribution mean. gentle introduction generalized linear models can found Dobson Barnett (2018). implementation GLMs available stats R package, part R (R Core Team 2023). models also allow specification additional parameters conditional outcome distribution, Generalized Additive Models Location, Scale Shape (Stasinopoulos Rigby 2007). recently, deep distributional regression proposed, allows flexible specification individual outcome distribution parameters (Rügamer et al. 2023). Statistical methods (described implemented previously mentioned papers) often require complete data, full information observations \\((X, Y)\\) interest. paper, describe R-package allows distributional regression three common observation schemes provide complete data. First , data interval censoring applied outcome \\(Y\\) refers case lower upper bounds \\(Y\\) observed, instead actual value. Next, truncated data misses observations outcome \\(Y\\) falls certain lower upper truncation bound. consider case random truncation, truncation bounds also random variables may vary observation. Finally, consider combination two, randomly truncated interval censoring. three scenarios can combined single general scheme: instead observing real-valued target variable \\(Y\\) (\\(\\mu\\)-density \\(f_\\theta\\) c.d.f. \\(F_\\theta\\), \\(\\mu\\) sigma-finite measure \\(\\mathbb R\\) \\(\\theta\\) parameter vector parameter space \\(\\Theta\\)), observe vector \\((M, V, L, U)\\), satisfies \\(L \\le M \\le V \\le U\\) \\(L C_1) + \\mathbf{1}(Y > C_2), \\end{align*}\\] define new random variables \\((M, V) = f(Y,C_1,C_2)\\) \\[\\begin{align*} (M, V) & := \\begin{cases} (-\\infty, C_1), & D = 0, \\\\ (C_1, C_2), & D = 1, \\\\ (C_2, \\infty), & D = 2. \\end{cases} \\end{align*}\\] Note \\(D\\) can reconstructed \\((M, V)\\): \\(D=0\\) \\(M=-\\infty\\), \\(D=1\\) \\(-\\infty m) \\\\ & = F_\\theta((m, \\infty]) \\cdot \\mathsf{P}(C_2 = m). \\end{align*}\\] assume distribution censoring variable \\((C_1,C_2)\\) non-informative, .e., distribution depend \\(\\theta\\), likelihood observing \\((M, V) = (m, v)\\) equal \\(F_\\theta((m, v])\\), factor depend \\(\\theta\\). similar argumentation can used non-discrete case. Overall, noting \\(F_\\infty((-\\infty, \\infty]) = 1\\), motivated likelihood contribution \\(F_\\theta((m, v]) \\cdot \\mathbf{1}(m < v)\\) censored, untruncated observation (1.1). Next, consider uncensored, truncated observation \\((m, v, l, u)\\) \\(y = m = v\\); may hence identify observation \\((y, l, u)\\). may proceed assume \\((L, U)\\) independent \\(Y\\) satisfies \\(L \\le U\\), \\(L\\) possibly equal \\(-\\infty\\) \\(U\\) possibly equal \\(\\infty\\). , \\((L, U)\\) shall density \\(f_{(L, U)}\\) respect dominating sigma-finite measure \\(\\nu\\). Truncation means happen observe \\((Y, L, U)\\) \\(L < Y \\le U\\). consequence, observed value \\(M = V\\) can regarded drawn \\((\\mu \\otimes \\nu)\\)-density \\[\\begin{align} f_{(Y, L, U) | L < Y \\le U}(y, l, u) = \\frac{f_{(L, U)}(l, u) f_\\theta(y)}{\\mathsf{P}(L < Y \\le U)} \\mathbf{1}(l < y \\le u). \\tag{1.4} \\end{align}\\] Subsequently, write \\((Y^{(t)}, L^{(t)}, U^{(t)})\\) random vector following density, .e., \\[\\begin{align*} f_{(Y^{(t)}, L^{(t)}, U^{(t)})}(y, l, u) = f_{(Y, L, U) | L < Y \\le U}(y, l, u). \\end{align*}\\] Conditioning density \\((L^{(t)}, U^{(t)}) = (l, u)\\), arrive expression involve nuisance density \\(f_{(L,U)}\\): \\[\\begin{align*} f_{Y^{(t)} | L^{(t)} = l, U^{(t)} = u}(y) & = \\frac{f_{(Y^{(t)}, L^{(t)}, U^{(t)})}(y, l, u)}{f_{(L^{(t)}, U^{(t)})}(l, u)} \\\\ & = \\frac{f_{(Y, L, U) | L < Y \\le U}(y, l, u)}{\\int_{(l, u]} f_{(Y, L, U) | L < Y \\le U}(z, l, u) \\,\\mathrm{d}\\mu(z)} = \\frac{f_\\theta(y)}{\\int_{(l, u]} f_\\theta(z) \\,\\mathrm{d}\\mu(z)}. \\end{align*}\\] Overall, arrive (conditional) log-likelihood contribution \\(\\log f_\\theta(y) - \\log F_\\theta((l, u])\\) uncensored, truncated observation (1.1). Finally, truncation censoring can occur time, .e., \\(l \\le m < v \\le u\\) either \\(l \\ne -\\infty\\) \\(u \\ne \\infty\\). accordance previous two cases, make assumption \\(Y, (C_1, C_2)\\) \\((L, U)\\) mutually independent satisfy \\(C_1 < C_2\\) \\(L < U\\). Define \\[\\begin{align*} D = \\mathbf{1}(Y > C_1) + \\mathbf{1}(Y > C_2) \\end{align*}\\] \\[\\begin{align*} (M, V) := \\begin{cases} (L, \\min(U, C_1)), & D = 0, \\\\ (\\max(L, C_1), \\min(C_2, U)), & D = 1, \\\\ (\\max(L,C_2), U), & D = 2. \\end{cases} \\end{align*}\\] simplicity, assume random variables discrete. observation \\((m, v, l, u)\\), one following four cases met \\[\\begin{align*} l < m < v < u, \\quad l = m < v < u, \\quad l < m < v = u, \\quad l = m < v = u. \\end{align*}\\] case \\(l < m < v < u\\), \\[\\begin{align*} \\mathsf{P}(M = m, V = v | L = l, U = u, L < Y \\le U) & = \\frac{\\mathsf{P}(C_1 = m, C_2 = v, Y \\(m, v], L = l, U = u)}{\\mathsf{P}(L = l, U = u, l < Y \\le u)} \\\\ & = \\frac{\\mathsf{P}(C_1 = m, C_2 = v) F_\\theta((m, v])}{F_\\theta((l, u])} \\end{align*}\\] independence assumption. factor front depend \\(\\theta\\) irrelevant (conditional) likelihood contribution. Likewise, case \\(l = m < v < u\\), \\[\\begin{align*} \\mathsf{P}(M = l, V = v | L = l, U = u, L < Y \\le U) & = \\frac{\\mathsf{P}(M = l, V = v, L = l, U = u, l < Y \\le u)}{\\mathsf{P}(L = l, U = u, l < Y \\le u)}. \\end{align*}\\] definition \\((M,V)\\), event numerator disjoint union following two sets: \\[\\begin{align*} & \\{D = 0, C_1 = v, L = l, U = u, l < Y \\le u\\} = \\{C_1 = v, L = l, U = u, Y \\(l, v]\\} \\\\ & \\{D = 1, C_1 \\le l, C_2 = v, L = l, U = u, l < Y \\le u\\} = \\{C_1 \\le l, C_2 = v, L = l, U = u, Y \\(l, v]\\}. \\end{align*}\\] independence, obtain \\[\\begin{align*} \\mathsf{P}(M = l, V = v | L = l, U = u, L < Y \\le U) = \\{\\mathsf{P}(C_1 = v) + \\mathsf{P}(C_1 \\le l, C_2 = v)\\} \\frac{F_\\theta((l, v])}{F_\\theta((l, u])}. \\end{align*}\\] , factor front fraction independent \\(\\theta\\) irrelevant likelihood. two cases \\(l < m < v = u\\) \\(l = m < v = u\\) can treated similarly; cases, likelihood contribution equal \\(F_\\theta((m, v]) /F_\\theta((l, u])\\) times factor depend \\(\\theta\\).","code":""},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"related-packages","dir":"Articles","previous_headings":"1 Introduction","what":"Related packages","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"less general cases non-informative censoring without random truncation fixed truncation, .e., \\((L, U)\\) constant observations, well estimation distribution parameters absence censoring random truncation, number R packages can fit distributions, also supporting weights. Among MASS (Venables Ripley 2002), fitdistrplus (Delignette-Muller Dutang 2015), survival (Therneau 2023), flexsurv (Jackson 2016). Note fixed truncation operation can baked distribution family whose parameters estimated, allowing classical maximum likelihood estimation. Many packages also support classic regression expected values given predictors. Distributional regression packages, gamlss (Stasinopoulos Rigby 2007) deepregression (Rügamer et al. 2023) currently support interval censoring random truncation. See following table overview available features package. Another R6-based interface provided ROOPSD (Robin 2022). reservr builds upon R packages tensorflow (Allaire Tang 2022) keras (Chollet, Allaire, et al. 2017) interface machine learning library TensorFlow (Abadi et al. 2015) perform distributional regression. underlying infrastructure shared distributional regression package deepregression (Rügamer et al. 2023). latter also supports distributional regression, time writing requires complete samples support truncation censoring. remaining parts paper structured follows: Section 2 details core functionality corresponding R package reservr. split definition samples \\(\\mathfrak{}\\) (Section 2.1), definition distribution families (Section 2.2), mathematical definitions available distribution families (Section 2.3), estimation distribution parameters (Section 2.4) distributional regression using tensorflow (Section 2.5). conclusion given Section 3.","code":""},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"pkg-overview","dir":"Articles","previous_headings":"","what":"Usage of reservr","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"package serves two main goals: fitting distributions randomly truncated non-informatively interval censored data performing (deep) distributional regression randomly truncated non-informatively interval censored data. Four main components integrated facilitate analysis goals Methods representing randomly truncated non-informatively interval censored sample \\(\\mathfrak{}\\). Methods specifying parametrized distribution family \\(\\mathcal{F} = \\{F_\\theta | \\theta \\\\Theta\\}\\) fitted. Methods estimating distribution parameters \\(\\theta\\) given sample \\(\\mathfrak{}\\). Methods regression distribution parameters given regression sample \\(\\mathfrak{}_{\\text{reg}}\\), parametrized family \\(\\mathcal{F}\\) general tensorflow network \\(\\mathcal{G} : \\mathfrak{X} \\\\Theta\\) processes \\(X\\) estimate conditional distribution \\(Y | X = x\\) \\(F_{g(x)}\\) \\(g \\\\mathcal G\\). components described one one following sections.","code":""},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"trunc-obs","dir":"Articles","previous_headings":"2 Usage of reservr","what":"Working with samples","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"sample \\(\\mathfrak{} = \\{(m, v, l, u, w)_i\\}\\) represented tibble (package tibble). core function create tibble trunc_obs(). tibble created trunc_obs() consists five columns: x: observed, exact value random variable, referred \\(Y\\) Section 1. Otherwise NA. xmin: Lower interval censoring bound (\\(M\\) Section 1) observation. observation censored, xmin equal x. xmax: Upper interval censoring bound (\\(V\\) Section 1) observation. observation censored, xmax equal x. tmin: Lower truncation bound (\\(L\\) Section 1). observations \\(\\mathtt{x} \\ge \\mathtt{tmin}\\) observed. Can \\(-\\infty\\) indicate lower truncation. tmax: Upper truncation bound (\\(U\\) Section 1). observations \\(\\mathtt{x} \\le \\mathtt{tmax}\\) observed. Can \\(\\infty\\) indicate upper truncation. w: weight associated observation. Defaults \\(1\\). Note , unlike Section 1, lower bounds intervals trunc_obs included, , allow \\(\\mathtt{x} \\ge \\mathtt{tmin}\\) rather \\(\\mathtt{x} > \\mathtt{tmin}\\), unknown variable interest called \\(\\mathtt{x}\\) instead \\(Y\\). continuous random variables, formulas equivalent half-open formulation. discrete random variables, \\(\\mathtt{xmin}\\) \\(\\mathtt{tmin}\\) may appropriately shifted, e.g., replacing \\(\\mathtt{xmin}\\) \\(\\mathtt{xmin}-0.5\\) integer valued variables. following code defines sample size 1 without truncation censoring, realized value \\(1.3\\). Simulating randomly truncated interval censored data standard normal distribution \\(80\\%\\) observations randomly interval censored random uniform truncation \\(L \\sim \\mathrm{Unif}[-2, 0]\\) \\(U \\sim \\mathrm{Unif}[0, 2]\\) can simulated follows Observations look like: total number observations smaller base population \\(1000\\) due truncation: total number censored observations roughly \\(0.8 \\cdot \\mathtt{nrow(obs)}\\). addition trunc_obs() constructor function, functions as_trunc_obs() coercion, truncate_obs() artificially changing truncation bounds, repdel_obs() computing randomly truncated reporting delay observations general insurance claims data containing accident date, reporting delay evaluation date information. latter takes inputs form \\((T_\\text{acc}, D, \\tau)\\) \\(T_{\\text{acc}} < \\tau\\) accident dates corresponding reporting delays \\(D \\ge 0\\) \\(\\tau\\) calendar date observation. returns sample \\((\\mathtt{xmin} = \\mathtt{xmax} = D, \\mathtt{tmin} = 0, \\mathtt{tmax} = \\tau - T_{\\text{acc}}, \\mathtt{w} = 1)\\) suitable estimating reporting delay distribution claim observed reported evaluation date, .e., \\(T_{\\text{acc}} + D \\le \\tau\\). analysis performed using reservr .","code":"trunc_obs(1.3) ## # A data frame: 1 × 6 ## x xmin xmax tmin tmax w ## ## 1 1.3 1.3 1.3 -Inf Inf 1 set.seed(123) N <- 1000L x <- rnorm(N) is_censored <- rbinom(N, size = 1L, prob = 0.8) == 1L c_lower <- runif(sum(is_censored), min = -2.0, max = 0.0) c_upper <- c_lower + runif(sum(is_censored), min = 0, max = 1.0) x_lower <- x x_upper <- x x_lower[is_censored] <- dplyr::case_when( x[is_censored] <= c_lower ~ -Inf, x[is_censored] <= c_upper ~ c_lower, TRUE ~ c_upper ) x_upper[is_censored] <- dplyr::case_when( x[is_censored] <= c_lower ~ c_lower, x[is_censored] <= c_upper ~ c_upper, TRUE ~ Inf ) t_lower <- runif(N, min = -2.0, max = 0.0) t_upper <- runif(N, min = 0.0, max = 2.0) is_observed <- t_lower <= x & x <= t_upper obs <- trunc_obs( xmin = pmax(x_lower, t_lower)[is_observed], xmax = pmin(x_upper, t_upper)[is_observed], tmin = t_lower[is_observed], tmax = t_upper[is_observed] ) obs[8L:12L, ] ## # A tibble: 5 × 6 ## x xmin xmax tmin tmax w ## ## 1 NA -0.479 1.15 -1.93 1.15 1 ## 2 NA -0.177 1.79 -0.210 1.79 1 ## 3 -0.556 -0.556 -0.556 -0.957 0.791 1 ## 4 NA -0.379 0.616 -0.379 0.616 1 ## 5 NA 0.0575 1.45 -0.437 1.45 1 nrow(obs) ## [1] 623 sum(is.na(obs$x)) ## [1] 496"},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"distributions","dir":"Articles","previous_headings":"2 Usage of reservr","what":"Definition of distribution families","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"Distribution families implemented using R6 class system (Chang 2021). inherit class Distribution feature common interface manage fixed free parameters underlying familiy, use basic distribution functions random number generation computation density, cumulative distribution, hazard quantile function, use additional functions supporting parameter estimation procedures computing support presence point mass, compile performance enhanced functions speed basic functions repeated evaluation, provide tensorflow-specific implementations support (deep) distributional regression. Distribution object represents distribution family \\(\\mathcal{F}\\) supported subset real line parameterized fixed finite-dimensional parameter space \\(\\Theta\\). family may singleton, case rather distribution distribution family. reservr provides set basic distribution families, optionally fixed parameters, well transformations distribution families take one underlying distribution families. time writing, :","code":""},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"parameters","dir":"Articles","previous_headings":"2 Usage of reservr > 2.2 Definition of distribution families","what":"Parameters","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"Parameters distribution families can either fixed constant value, free. Free parameters (placeholders) estimated data whereas fixed parameters held constant. Distribution methods argument with_params provide values free parameters need fully specified parameters work. example, generating samples distribution possible fully parameterized using fixed parameters with_params argument Distribution$sample(). now defined dist normal distribution family standard deviation \\(1\\) free mean. Since parameters required normal distribution fixed, dist$sample() error provided mean parameter. with_params argument can used provide free parameters override fixed parameters, necessary. two observations drawn standard normal normal distribution mean zero standard deviation \\(2\\), respectively. Since chosen seed identical, second sample exactly double first sample. Whenever output length greater one, taking one sample, with_params can optionally contain individual parameters entry. three observations drawn \\(\\mathcal{N}(\\mu = 0, \\sigma = 0.5)\\), \\(\\mathcal{N}(\\mu = 1, \\sigma = 0.5)\\) \\(\\mathcal{N}(\\mu = 2, \\sigma = 0.5)\\), respectively. Distributions set fields methods related managing parameters: active binding default_params gets sets list parameters fixed values, NULL represents free parameter. Component families included Distribution objects. get_params() gets list parameters fixed values, traversing component distribution families. get_placeholders() gets list free parameters NULL values. active binding param_bounds gets sets domain regular family parameters Interval object. Setting bound via param_bounds active binding allows restricting natural parameter space family. get_param_bounds() returns bounds free parameters list Intervals, traversing component distribution families. get_param_constraint() returns NULL function evaluates constraints parameter set. function must return vector constraint values (need equal \\(0\\) valid parameters) list elements constraints jacobian. returning list, jacobian element contain jacobian constraint function. Used nloptr::slsqp(heq=) estimation. example mixture families require probs parameters sum \\(1\\) addition box constraint parameter \\([0, 1]\\). Note box constraints handled param_bounds need specified constraint function. get_components() returns list component families transformations mixtures. list empty basic families. example normal family fixed standard deviation \\(\\sigma = 1\\) mixture distribution family two components, one specified normal distribution family:","code":"dist <- dist_normal(sd = 1.0) dist$sample(1L) ## Error in (function (n, mean = 0, sd = 1) : invalid arguments set.seed(10L) dist$sample(1L, with_params = list(mean = 0.0)) ## [1] 0.01874617 set.seed(10L) dist$sample(1L, with_params = list(mean = 0.0, sd = 2.0)) ## [1] 0.03749234 set.seed(10L) dist$sample(3L, with_params = list(mean = 0.0:2.0, sd = 0.5)) ## [1] 0.009373085 0.907873729 1.314334725 dist <- dist_normal(sd = 1.0) mix <- dist_mixture(dists = list(dist_normal(), NULL)) dist$default_params ## $mean ## NULL ## ## $sd ## [1] 1 mix$default_params ## $dists ## $dists[[1]] ## A NormalDistribution with 2 dof ## ## $dists[[2]] ## NULL ## ## ## $probs ## $probs[[1]] ## NULL ## ## $probs[[2]] ## NULL str(dist$get_placeholders()) ## List of 1 ## $ mean: NULL str(mix$get_placeholders()) ## List of 2 ## $ dists:List of 2 ## ..$ :List of 2 ## .. ..$ mean: NULL ## .. ..$ sd : NULL ## ..$ : NULL ## $ probs:List of 2 ## ..$ : NULL ## ..$ : NULL str(dist$param_bounds) ## List of 2 ## $ mean:Classes 'Interval', 'R6' (-Inf, Inf) ## $ sd :Classes 'Interval', 'R6' (0, Inf) str(mix$param_bounds) ## List of 2 ## $ dists:List of 1 ## ..$ : NULL ## $ probs:List of 1 ## ..$ :Classes 'Interval', 'R6' [0, 1] str(dist$get_param_bounds()) ## List of 1 ## $ mean:Classes 'Interval', 'R6' (-Inf, Inf) str(mix$get_param_bounds()) ## List of 2 ## $ dists:List of 1 ## ..$ :List of 2 ## .. ..$ mean:Classes 'Interval', 'R6' (-Inf, Inf) ## .. ..$ sd :Classes 'Interval', 'R6' (0, Inf) ## $ probs:List of 2 ## ..$ :Classes 'Interval', 'R6' [0, 1] ## ..$ :Classes 'Interval', 'R6' [0, 1] str(dist$get_param_constraints()) ## NULL str(mix$get_param_constraints()) ## function (params) dist$get_components() ## list() mix$get_components() ## [[1]] ## A NormalDistribution with 2 dof ## ## [[2]] ## NULL"},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"basic-distribution-functions","dir":"Articles","previous_headings":"2 Usage of reservr > 2.2 Definition of distribution families","what":"Basic distribution functions","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"basic distribution functions (density, probability, hazard quantile function, well random number generation) provided distribution family. general, argument with_params can used specify missing parameters (placeholders) override fixed distribution parameters. provided parameters vectors length greater 1, must conform input dimension (e.g. length(x) density). case, parameters “vectorized” sense \\(\\)th output element computed using \\(\\)th entry parameter list. density(x, log = FALSE, with_params = list()) computes (log-)density. probability(q, lower.tail = TRUE, log.p = FALSE, with_params = list() computes (log-)cumulative distribution function (log-)survival function. hazard(x, log = FALSE. with_params = list()) computes (log-)hazard function. quantile(p, lower.tail = TRUE, log.p = FALSE, with_params = list()) computes upper lower quantiles. sample(n, with_params = list()) generates random sample size n. (with_params can contain length n vectors case).","code":""},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"additional-functions","dir":"Articles","previous_headings":"2 Usage of reservr > 2.2 Definition of distribution families","what":"Additional functions","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"addition basic functions, several supporting functions useful , e.g., estimation parameters. export_functions(name, with_params = list()) exports {d,p,q,r} functions adhering common R convention distribution functions. get_type() returns one \"continuous\", \"discrete\", \"mixed\" depending whether distribution family density respect Lebesgue measure, counting measure, sum Lebesgue measure one many point measures. is_continuous() is_discrete() testing particular type. has_capability(caps) gives information whether specific implementation provides features described. Possible capabilities \"sample\", \"density\", \"probability\", \"quantile\", \"diff_density\", \"diff_probability\", \"tf_logdensity\", \"tf_logprobability\". require_capability(caps) errors specified capabilities implemented family hand. is_discrete_at(x, with_params = list()) returns logical vector indicating whether distribution point mass x. is_in_support(x, with_params = list()) returns logical vector indicating whether distribution mass x.","code":""},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"performance-enhancements","dir":"Articles","previous_headings":"2 Usage of reservr > 2.2 Definition of distribution families","what":"Performance enhancements","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"working larger data many calls distribution functions, performing fit, can beneficial just--time compile specialized functions avoid overhead dealing generic structure distributions parametrization. Distributions offer set “compiler” functions return simplified, faster, versions basic distribution functions, analytically compute gradients. functions necessarily implemented Distribution classes, automatically used , e.g., fit_dist() useful. input structure param_matrix can obtained flatten_params_matrix(dist$get_placeholders()) dist Distribution object question. compile_density() compiles fast function signature (x, param_matrix, log = FALSE) compute density fixed parameters hard-coded taking free parameters matrix defined layout instead nested list. compile_probability() compiles fast replacement probability signature (q, param_matrix, lower.tail = TRUE, log.p = FALSE). compile_probability_interval() compiles fast function signature (qmin, qmax, param_matrix, log.p = FALSE) computing \\(P(X \\[\\mathtt{qmin}, \\mathtt{qmax}])\\) logarithm efficiently. expression necessary computing truncation probabilities. compile_sample() compiles fast replacement sample signature (n, param_matrix). diff_density(x, log = FALSE, with_params = list()) computes (log-)gradients density function respect free distribution family parameters, useful maximum likelihood estimation. diff_probability(q, lower.tail = TRUE, log.p = FALSE, with_params = list()) computes (log-)gradients cumulative density function respect free distribution family parameters. useful conditional maximum likelihood estimation presence random truncation non-informative interval censoring.","code":"dist <- dist_normal() flatten_params_matrix(dist$get_placeholders()) ## mean sd ## [1,] NA NA denscmp <- dist$compile_density() if (requireNamespace(\"bench\", quietly = TRUE)) { bench::mark( dist$density(-2:2, with_params = list(mean = 0.0, sd = 1.0)), denscmp(-2:2, matrix(c(0.0, 1.0), nrow = 5L, ncol = 2L, byrow = TRUE)), dnorm(-2:2, mean = rep(0.0, 5L), sd = rep(1.0, 5L)) ) } ## # A tibble: 3 × 6 ## expression min median `itr/sec` mem_alloc `gc/sec` ## ## 1 dist$density(-2:2, with_params =… 26.01µs 28µs 34686. 0B 17.4 ## 2 denscmp(-2:2, matrix(c(0, 1), nr… 4.12µs 4.71µs 204950. 0B 41.0 ## 3 dnorm(-2:2, mean = rep(0, 5L), s… 1.68µs 1.92µs 492249. 2.58KB 0"},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"tensorflow-interface","dir":"Articles","previous_headings":"2 Usage of reservr > 2.2 Definition of distribution families","what":"tensorflow interface","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"Use distribution families within tensorflow networks requires specialized implementations using tensorflow APIs instead regular R functions. tailored needs maximizing (conditional) likelihoods weighted, censored randomly truncated data. Details working tensorflow can found Section 2.5. tf_compile_params(input, name_prefix = \"\") creates keras layers take input layer transform valid parametrization distribution family. tf_is_discrete_at() returns tensorflow-ready version is_discrete_at(). tf_logdensity() returns tensorflow-ready version compile_density() implied log = TRUE. tf_logprobability() returns tensorflow-ready version pf compile_probability_interval() implied log.p = TRUE. tf_make_constants() creates list constant tensors fixed distribution family parameters.","code":""},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"dist-definitions","dir":"Articles","previous_headings":"2 Usage of reservr","what":"Special families","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"distribution families available reservr tailored algorithms parameter estimation, commonly known. section contains mathematical definitions function families.","code":""},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"dist-mixture","dir":"Articles","previous_headings":"2 Usage of reservr > 2.3 Special families","what":"Mixture distribution families","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"mixture distribution family defined fixed number \\(k\\) component families \\(\\{\\mathcal{F}_i\\}_{= 1}^k\\) via set distributions \\[\\begin{align*} \\mathop{\\mathrm{Mixture}}(\\mathcal{F}_1, \\ldots, \\mathcal{F}_k) & := \\Bigl\\{ F = \\sum_{= 1}^k p_i F_i \\Bigm| F_i \\\\mathcal{F}_i, p_i \\[0, 1], \\sum_{= 1}^k p_i = 1 \\Bigr\\}. \\end{align*}\\]","code":""},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"dist-erlangmix","dir":"Articles","previous_headings":"2 Usage of reservr > 2.3 Special families","what":"Erlang mixture distribution families","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"Erlang mixture distribution family defined number components \\(k\\) mixture Erlang distributions (Gamma distributions integer shape parameter) common scale parameter. \\(\\Gamma_{\\alpha, \\theta}\\) denotes Gamma distribution shape \\(\\alpha\\) scale \\(\\theta\\), erlang mixture family \\(k\\) components can defined follows: \\[\\begin{align*} \\mathop{\\mathrm{ErlangMixture}}(k) := \\Bigl\\{ F = \\sum_{= 1}^k p_i \\Gamma_{\\alpha_i, \\theta} \\Bigm| \\alpha_i \\\\mathbb{N}, \\theta \\(0, \\infty), p_i \\[0, 1], \\sum_{= 1}^k p_i = 1 \\Bigr\\}. \\end{align*}\\] Note \\(k \\\\infty\\), Erlang mixtures dense space distributions \\((0, \\infty)\\) respect weak convergence (Lee Lin 2012), making useful modeling choice general positive continuous distributions. However, tail index Erlang mixture distributions always zero due exponential decay Gamma densities.","code":""},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"dist-blended","dir":"Articles","previous_headings":"2 Usage of reservr > 2.3 Special families","what":"Blended distribution families","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"Blended distribution defined follows: Given two underlying distributions \\(P, Q\\) \\(\\mathbb{R}\\) cdfs \\(F(\\cdot)=P((-\\infty, \\cdot])\\) \\(G(\\cdot)=Q((-\\infty, \\cdot])\\), respectively, parameters \\(\\kappa \\\\mathbb{R}, \\varepsilon \\(0, \\infty), p_1, p_2 \\[0, 1], p_1 + p_2 = 1\\) \\(F(\\kappa) > 0\\) \\(G(\\kappa) < 1\\), define Blended Distribution \\(B = \\mathop{\\mathrm{Blended}}(P, Q; p, \\kappa, \\varepsilon)\\) \\(P\\) \\(Q\\) blending interval \\([\\kappa - \\varepsilon, \\kappa + \\varepsilon]\\) mixture probabilities \\(p\\) via cdf \\(F_B\\): \\[\\begin{align*} p_{\\kappa, \\varepsilon}(x) &= \\begin{cases} x & , x \\(-\\infty, \\kappa-\\varepsilon],\\\\ \\tfrac12 (x + \\kappa - \\varepsilon) + \\tfrac\\varepsilon\\pi \\cos\\Big( \\frac{\\pi (x - \\kappa)}{2 \\varepsilon} \\Big) &, x \\(\\kappa-\\varepsilon , \\kappa+\\varepsilon], \\\\ \\kappa &, x \\(\\kappa +\\varepsilon, \\infty), \\end{cases} \\\\ \\nonumber q_{\\kappa, \\varepsilon}(x) & = \\begin{cases} \\kappa & , x \\(-\\infty, \\kappa-\\varepsilon],\\\\ \\tfrac12 (x + \\kappa + \\varepsilon) - \\tfrac\\varepsilon\\pi \\cos\\Big( \\frac{\\pi (x - \\kappa)}{2 \\varepsilon} \\Big) &, x \\(\\kappa-\\varepsilon , \\kappa+\\varepsilon], \\\\ x &, x \\(\\kappa +\\varepsilon, \\infty), \\end{cases} \\\\ F_B(x) & = p_1 \\frac{F(p_{\\kappa, \\varepsilon}(x))}{F(\\kappa)} + p_2 \\frac{G(q_{\\kappa, \\varepsilon}(x)) - G(\\kappa)}{1 - G(\\kappa)}. \\end{align*}\\] following illustration shows components \\(\\mathrm{Blended}(\\mathcal{N}(\\mu = -1, \\sigma = 1), \\mathrm{Exp}(\\lambda = 1); p = (0.5, 0.5), \\kappa = 0, \\varepsilon = 1)\\) distribution. transformation original component distributions (\\(\\mathcal{N}\\) \\(\\mathrm{Exp}\\)) can illustrated first right- left-truncating \\(\\kappa = 0\\) respectively, applying blending transformations \\(p_{\\kappa, \\varepsilon}\\) \\(q_{\\kappa, \\varepsilon}\\). latter distributions can obtained reservr setting probability weights blended distribution \\(p = (1, 0)\\) \\(p = (0, 1)\\) respectively. Intermediate truncated distributions obtained via trunc_dist(), \\(\\kappa\\) upper lower bound respectively. show resulting density steps, final blended density obtained weighting blended component densities. definition blended distribution leads definition blended distribution family allowing \\(P, Q, \\kappa\\) \\(\\varepsilon\\) vary: Given two families \\(\\mathcal{F}, \\mathcal{G}\\) distributions \\(\\mathbb{R}\\), parameters \\(\\kappa \\\\mathbb{R}, \\varepsilon \\(0, \\infty)\\), define Blended Distribution family family Distributions \\[\\begin{align*} \\mathop{\\mathrm{Blended}}(\\mathcal{F}, \\mathcal{G}; \\kappa, \\varepsilon) & := \\{ \\mathop{\\mathrm{Blended}}(P, Q ; p, \\kappa, \\varepsilon) \\mid P \\\\mathcal{F}, Q \\\\mathcal{G}, p_1, p_2 \\[0, 1], p_1 + p_2 = 1 \\}. \\end{align*}\\] Blended distribution families can generalized number components \\(k\\) letting \\(\\kappa\\) \\(\\varepsilon\\) become vectors dimension \\(k - 1\\) \\(\\kappa_i + \\varepsilon_i \\le \\kappa_{+ 1} - \\varepsilon_{+ 1}\\) \\(= 1, \\ldots, k - 2\\). Compared piecewise distribution families obtained mixture truncated distribution families supports \\((-\\infty, \\kappa]\\) \\([\\kappa, \\infty)\\) commonly used extreme value modelling, blended distribution families exhibit continuous density within blending region \\((\\kappa - \\varepsilon, \\kappa + \\varepsilon)\\). reservr provides implementation via dist_blended(), limited support two component families.","code":"dist1 <- dist_normal(mean = -1.0, sd = 1.0) dist2 <- dist_exponential(rate = 1.0) distb <- dist_blended( dists = list(dist1, dist2), breaks = list(0.0), bandwidths = list(1.0), probs = list(0.5, 0.5) ) distt1 <- dist_trunc(dist1, min = -Inf, max = 0.0) distt2 <- dist_trunc(dist2, min = 0.0, max = Inf) distb1 <- distb$clone() distb1$default_params$probs <- list(1.0, 0.0) distb2 <- distb$clone() distb2$default_params$probs <- list(0.0, 1.0)"},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"dist-bdegp","dir":"Articles","previous_headings":"2 Usage of reservr > 2.3 Special families","what":"The Blended Dirac Erlang Generalized Pareto distribution family","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"Using construction Blended distribution family, can define Blended Dirac Erlang Generalized Pareto (BDEGP) family follows, see . Given parameters \\(n \\\\mathbb{N}, m \\\\mathbb{N}, \\kappa \\\\mathbb{R}\\) \\(\\varepsilon \\(0, \\infty)\\), define Blended Dirac Erlang Generalized Pareto family family distributions \\[\\begin{align*} \\mathop{\\mathrm{BDEGP}}(n, m, \\kappa, \\varepsilon) := & \\mathop{\\mathrm{Mixture}}( \\\\ & \\qquad \\{\\delta_0\\}, \\{\\delta_1\\}, \\ldots, \\{\\delta_{n-1}\\}, \\\\ & \\qquad \\mathop{\\mathrm{Blended}}( \\\\ & \\qquad\\qquad \\mathop{\\mathrm{ErlangMixture}}(m), \\\\ & \\qquad\\qquad \\{ \\mathop{\\mathrm{GPD}}(\\kappa, \\sigma, \\xi) \\mid \\sigma \\(0, \\infty), \\xi \\[0, 1)) \\}; \\\\ & \\qquad\\qquad \\kappa, \\varepsilon \\\\ & \\qquad) \\\\ &), \\end{align*}\\] \\(\\delta_k\\) dirac distribution \\(k\\) \\(\\mathrm{GPD}\\) generalized Pareto distribution. Note constraint tail index \\(\\xi \\[0, 1)\\), guaranteeing finite expectation. distribution family three features making useful modelling general heavy-tailed distributions \\((0, \\infty)\\): maximally flexible lower tail flexible family distributions body flexible tail index due generalized Pareto component","code":""},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"fit-dist","dir":"Articles","previous_headings":"2 Usage of reservr","what":"Methods of estimating distribution parameters","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"section describes functions problem estimating parameter \\(\\theta \\\\Theta\\) given sample \\(\\mathfrak{}\\) parameterized family \\(\\mathcal{F} = \\{F_\\theta \\mid \\theta \\\\Theta\\}\\). Sometimes, conditional log-likelihood (1.2) can directly maximized, yielding estimate \\(\\theta\\). default behavior reservr specialized estimation routine provided family \\(\\mathcal{F}_\\theta\\) defined. Depending whether box constraints, nonlinear constraints constraints parameter space \\(\\Theta\\), different implementations nonlinear optimization algorithms nloptr (Johnson 2007), particular truncated Newton (Dembo Steihaug 1983) unconstrained families, L-BFGS (Liu Nocedal 1989) box-constrained families SLSQP (Kraft 1994) general constrained families employed. addition naive direct optimization approach, families lend specialized estimation algorithms usually show faster convergence due making use special structures parameter space \\(\\Theta\\). Estimating distribution parameters truncated observations handled using generic fit() method. delegates fit_dist(), also generic signature: dist: distribution family fit obs: trunc_obs object, vector observed values start: Starting parameters, list compatible dist$get_placeholders(). time writing specialized algorithms six types families: Blended distribution families Erlang mixture distribution families Generalized pareto distribution families free lower bound u (estimated minimum xmin sample) Mixture distribution families Translated distribution families fixed offset multiplier (transform sample via \\(\\tfrac{\\cdot-\\text{offset}}{\\text{multiplier}}\\) fit component distribution family transformed sample) Uniform distribution families free lower bound min upper bound max (estimated minimum xmin, min, maximum xmax, max, sample) present, start parameter obtained via fit_dist_start() generic. generic implements family specific method generating valid starting values placeholder parameters. notable implementation fit_dist_start.ErlangMixtureDistribution() Erlang mixture distribution families. shape parameters free, different initialization strategies can chosen using additional arguments fit_dist_start(): init = \"shapes\" paired shapes = c(...) manually specifies starting shape parameters \\(\\alpha\\) init = \"fan\" paired spread = d uses \\(\\alpha = (1, 1 + d, \\ldots, 1 + (k - 1) \\cdot d)\\) default \\(d = 1\\) resulting \\(\\alpha = (1, \\ldots, k)\\) init = \"kmeans\" uses 1-dimensional K-means based clustering sample observations cluster corresponds unique shape init = \"cmm\" uses centralized method moments procedure described Re-using dist <- dist_normal(sd = 1.0) generated sample obs, can fit free parameter mean: Using function plot_distributions() can also assess quality fit. , density labelled empirical corresponds kernel density estimate automatic bandwidth selection. follow example fitting \\(\\mathrm{ErlangMixture}(3)\\) distribution family using various initialization strategies. Note , \"kmeans\" \"cmm\" use random number generator internal K-means clustering. necessitates setting constant seed running fit_dist_start() fit() ensure chosen starting parameters calls. noted different initialization methods considerable impact outcome example due discrete nature Erlang mixture distribution shape parameters thus combinatorial difficulty picking optimal shapes \\(\\alpha\\). fit() result Erlang mixture distribution families contains element named \"params_hist\". can populated passing trace = TRUE fit() record parameters ECME steps ECME-based estimation algorithms . element \"iter\" contains number full ECME-Iterations performed.","code":"dist <- dist_normal(sd = 1.0) the_fit <- fit(dist, obs) str(the_fit) ## List of 3 ## $ params:List of 1 ## ..$ mean: num 0.0822 ## $ opt :List of 5 ## ..$ par : Named num 0.0822 ## .. ..- attr(*, \"names\")= chr \"mean\" ## ..$ value : num 341 ## ..$ iter : int 7 ## ..$ convergence: int 1 ## ..$ message : chr \"NLOPT_SUCCESS: Generic success return value.\" ## $ logLik:Class 'logLik' : -341 (df=1) plot_distributions( true = dist, fitted = dist, empirical = dist_empirical(0.5 * (obs$xmin + obs$xmax)), .x = seq(-5, 5, length.out = 201), plots = \"density\", with_params = list( true = list(mean = 0.0, sd = 1.0), fitted = the_fit$params ) ) dist <- dist_erlangmix(list(NULL, NULL, NULL)) params <- list( shapes = list(1L, 4L, 12L), scale = 2.0, probs = list(0.5, 0.3, 0.2) ) set.seed(1234) x <- dist$sample(100L, with_params = params) set.seed(32) init_true <- fit_dist_start(dist, x, init = \"shapes\", shapes = as.numeric(params$shapes)) init_fan <- fit_dist_start(dist, x, init = \"fan\", spread = 3L) init_kmeans <- fit_dist_start(dist, x, init = \"kmeans\") init_cmm <- fit_dist_start(dist, x, init = \"cmm\") rbind( flatten_params(init_true), flatten_params(init_fan), flatten_params(init_kmeans), flatten_params(init_cmm) ) ## shapes[1] shapes[2] shapes[3] scale probs[1] probs[2] probs[3] ## [1,] 1 4 12 1.590800 0.43 0.33 0.24 ## [2,] 1 4 7 2.688103 0.55 0.32 0.13 ## [3,] 1 5 13 1.484960 0.43 0.36 0.21 ## [4,] 2 10 24 1.010531 0.56 0.27 0.17 set.seed(32) str(fit(dist, x, init = \"shapes\", shapes = as.numeric(params$shapes))) ## List of 4 ## $ params :List of 3 ## ..$ probs :List of 3 ## .. ..$ : num 0.43 ## .. ..$ : num 0.33 ## .. ..$ : num 0.24 ## ..$ shapes:List of 3 ## .. ..$ : num 1 ## .. ..$ : num 4 ## .. ..$ : num 13 ## ..$ scale : num 1.59 ## $ params_hist: list() ## $ iter : int 1 ## $ logLik :Class 'logLik' : -290 (df=6) fit(dist, x, init = \"fan\", spread = 3L)$logLik ## 'log Lik.' -292.0026 (df=6) fit(dist, x, init = \"kmeans\")$logLik ## 'log Lik.' -289.2834 (df=6) fit(dist, x, init = \"cmm\")$logLik ## 'log Lik.' -293.1273 (df=6)"},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"tensorflow","dir":"Articles","previous_headings":"2 Usage of reservr","what":"Distributional regression using tensorflow integration","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"maximization problem (1.3) delegated tensorflow, supplies ample stochastic optimization algorithms. Functions reservr necessary create suitable output layer tensorflow maps onto \\(\\Theta\\) provide implementation (negative) log-likelihood (1.3) loss function. two tasks combined tf_compile_model(). function returns object class reservr_keras_model, can used estimation procedure. Given input layers inputs intermediate output layer intermediate_output well family distributions dist, function Compiles loss dist defined (1.3) \\(l(g) = -\\tfrac{1}{\\#(\\mathfrak{}_{\\mathrm{reg}})} \\ell(g|\\mathfrak{}_{\\mathrm{reg}})\\), optionally disabling censoring truncation efficiency. Creates list final output layers mapping intermediate_output onto parameter space \\(\\Theta\\) dist using Distribution$tf_compile_params(). step adds additional degrees freedom overall model, approach described Runs keras3::compile() underlying keras.src.models.model.Model. following example defines linear model homoskedasticity assumption fits using \\(100\\) iterations Adam optimization algorithm (Kingma Ba 2015). First, simulate data \\((Y,X)\\) model defined \\(X \\sim \\mathrm{Unif}(10,20)\\) \\(Y | X =x \\sim \\mathcal{N}(\\mu = 2x, \\sigma = 1)\\). Next, specify distribution family \\(\\mathcal{F} = \\{\\mathcal{N}(\\mu, \\sigma = 1) | \\mu\\\\mathbb R\\}\\), incorporating homoskedasticity assumption. Using keras, define empty neural network, just taking \\(x\\) input performing transformation. , tf_compile_model() adapts input layer free parameter space \\(\\Theta = \\mathbb{R}\\). introduces two parameters function family \\(\\mathcal{G}\\) implies functional relationship \\(\\mu = g(x) := \\theta_1 \\cdot x + \\theta_0\\). Since sample fully observed, disable censoring truncation, leading simplified loss \\[\\begin{align*} l(g) = -\\tfrac{1}{100} \\sum_{x, y} \\log f_{g(x)}(y), \\end{align*}\\] \\(f_\\mu(y)\\) density \\(\\mathcal{N}(\\mu=\\mu, \\sigma=1)\\) evaluated \\(y\\). fit can now performed, modifying parameters (weights) nnet -place. Note argument y fit accepts trunc_obs object. example, vector y silently converted untruncated, uncensored trunc_obs object. fit() returns keras_training_history underlying call fit() keras.src.models.model.Model. training history can plotted, displaying loss epoch (black circles), blue smoothing line. predict() method reservr_keras_model takes input tensors returns predicted distribution parameters list compatible dist$get_placeholders(). can thus extract parameter mean compare OLS fit dataset: Since reservr_keras_model includes underlying keras.src.models.model.Model, parameters can also extracted compared OLS coefficients now discuss complex example involving censoring, using right-censored ovarian dataset bundled survival package (R Core Team 2023). goal predict rate parameter exponential survival time distribution cancer patients given four features \\(X = (\\mathtt{age}, \\mathtt{resid.ds}, \\mathtt{rx}, \\mathtt{ecog.ps})\\) collected study. variables \\(\\mathtt{resid.ds}, \\mathtt{rx}\\) \\(\\mathtt{ecog.ps}\\) indicator variables coded \\(\\{1, 2\\}\\). \\(\\mathtt{age}\\) continuous variable values \\((38, 75)\\). Due different scale \\(\\mathtt{age}\\) variable, useful separate variables order perform normalization. Normalization using keras3::layer_normalization() transforms input variables zero mean unit variance. step necessary categorical features. Next, define input layers shapes, conforming input predictor list dat$x. age normalized concatenated features, stored flags, resulting 4-dimensional representation. add two hidden ReLU-layers \\(5\\) neurons network compile result, adapting 5-dimensional hidden output parameter space \\(\\Theta = (0, \\infty)\\) rate parameter exponential distribution. accomplished using dense layer \\(1\\) neuron \\(\\mathrm{softplus}\\) activation function. stability reasons, default weight initialization optimal. circumvent , estimate global exponential distribution fit observations initialize final layer weights global fit initial prediction network. Finally, can train network visualize predictions. plot expected lifetime \\((\\mathtt{age}, \\mathtt{rx})\\) shows network learned longer expected lifetimes lower \\(\\mathtt{age}\\) treatment group (\\(\\mathtt{rx}\\)) 2. global fit included dashed blue line. Individual predictions observations can also plotted subject level.","code":"set.seed(1431L) keras3::set_random_seed(1432L) dataset <- tibble::tibble( x = runif(100, min = 10, max = 20), y = 2 * x + rnorm(100) ) dist <- dist_normal(sd = 1.0) nnet_input <- keras3::keras_input(shape = 1L, name = \"x_input\") nnet_output <- nnet_input nnet <- tf_compile_model( inputs = list(nnet_input), intermediate_output = nnet_output, dist = dist, optimizer = keras3::optimizer_adam(learning_rate = 0.1), censoring = FALSE, truncation = FALSE ) nnet$dist nnet$model nnet_fit <- fit( nnet, x = dataset$x, y = dataset$y, epochs = 100L, batch_size = 100L, shuffle = FALSE, verbose = FALSE ) # Fix weird behavior of keras3 nnet_fit$params$epochs <- max(nnet_fit$params$epochs, length(nnet_fit$metrics$loss)) plot(nnet_fit) pred_params <- predict(nnet, data = list(keras3::as_tensor(dataset$x, keras3::config_floatx()))) lm_fit <- lm(y ~ x, data = dataset) dataset$y_pred <- pred_params$mean dataset$y_lm <- predict(lm_fit, newdata = dataset, type = \"response\") library(ggplot2) ggplot(dataset, aes(x = x, y = y)) + geom_point() + geom_line(aes(y = y_pred), color = \"blue\") + geom_line(aes(y = y_lm), linetype = 2L, color = \"green\") coef_nnet <- rev(as.numeric(nnet$model$get_weights())) coef_lm <- unname(coef(lm_fit)) str(coef_nnet) str(coef_lm) set.seed(1219L) tensorflow::set_random_seed(1219L) keras3::config_set_floatx(\"float32\") dist <- dist_exponential() ovarian <- survival::ovarian dat <- list( y = trunc_obs( xmin = ovarian$futime, xmax = ifelse(ovarian$fustat == 1, ovarian$futime, Inf) ), x = list( age = keras3::as_tensor(ovarian$age, keras3::config_floatx(), shape = nrow(ovarian)), flags = k_matrix(ovarian[, c(\"resid.ds\", \"rx\", \"ecog.ps\")] - 1.0) ) ) nnet_inputs <- list( keras3::keras_input(shape = 1L, name = \"age\"), keras3::keras_input(shape = 3L, name = \"flags\") ) hidden1 <- keras3::layer_concatenate( keras3::layer_normalization(nnet_inputs[[1L]]), nnet_inputs[[2L]] ) hidden2 <- keras3::layer_dense( hidden1, units = 5L, activation = keras3::activation_relu ) nnet_output <- keras3::layer_dense( hidden2, units = 5L, activation = keras3::activation_relu ) nnet <- tf_compile_model( inputs = nnet_inputs, intermediate_output = nnet_output, dist = dist, optimizer = keras3::optimizer_adam(learning_rate = 0.01), censoring = TRUE, truncation = FALSE ) nnet$model str(predict(nnet, dat$x)) global_fit <- fit(dist, dat$y) tf_initialise_model(nnet, params = global_fit$params, mode = \"zero\") str(predict(nnet, dat$x)) nnet_fit <- fit( nnet, x = dat$x, y = dat$y, epochs = 100L, batch_size = nrow(dat$y), shuffle = FALSE, verbose = FALSE ) nnet_fit$params$epochs <- max(nnet_fit$params$epochs, length(nnet_fit$metrics$loss)) plot(nnet_fit) ovarian$expected_lifetime <- 1.0 / predict(nnet, dat$x)$rate"},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"conclusion","dir":"Articles","previous_headings":"","what":"Conclusion","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"presented reservr, package supports distribution parameter estimation distributional regression using R. tasks supported samples without interval censoring without random truncation, general form truncation typical packages support. package includes facilities (1) description randomly truncated non-informatively interval censored samples, (2) definition distribution families consider, (3) global distribution parameter estimation ..d. assumption sample (4) distributional regression - employing tensorflow package flexibility speed.","code":""},{"path":"https://ashesitr.github.io/reservr/articles/jss_paper.html","id":"acknowledgements","dir":"Articles","previous_headings":"","what":"Acknowledgements","title":"Fitting Distributions and Neural Networks to Censored and Truncated Data: The R Package reservr","text":"Author like thank Axel Bücher proofreading valuable comments earlier version article.","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/articles/tensorflow.html","id":"overview","dir":"Articles","previous_headings":"","what":"Overview","title":"TensorFlow Integration","text":"reservr capable fitting distributions censored truncated observations, directly allow modelling influence exogenous variables observed alongside primary outcome. integration TensorFlow comes . TensorFlow integration allows fit neural network simultaneously parameters distribution taking exogenous variables account. reservr accepts partial tensorflow networks yield single arbitrary-dimension rank 2 tensor (e.g. dense layer) output can connect suitable layers intermediate output complete network predicts parameters pre-specified distribution family. also dynamically compiles suitable conditional likelihood based loss, depending type problem (censoring, truncation), can optimized using keras3::fit implementation --box. means full flexibility respect callbacks, optimizers, mini-batching, etc.","code":"library(reservr) library(tensorflow) library(keras3) #> #> Attaching package: 'keras3' #> The following objects are masked from 'package:tensorflow': #> #> set_random_seed, shape library(tibble) library(ggplot2)"},{"path":"https://ashesitr.github.io/reservr/articles/tensorflow.html","id":"a-simple-linear-model","dir":"Articles","previous_headings":"Overview","what":"A simple linear model","title":"TensorFlow Integration","text":"following example show code necessary fit simple model assumptions OLS data. true relationship use \\(y = 2 x + \\epsilon\\) \\(\\epsilon \\sim \\mathcal{N}(0, 1)\\). use censoring truncation.","code":"if (reticulate::py_module_available(\"keras\")) { set.seed(1431L) tensorflow::set_random_seed(1432L) dataset <- tibble( x = runif(100, min = 10, max = 20), y = 2 * x + rnorm(100) ) print(qplot(x, y, data = dataset)) # Specify distributional assumption of OLS: dist <- dist_normal(sd = 1.0) # OLS assumption: homoskedasticity # Optional: Compute a global fit global_fit <- fit(dist, dataset$y) # Define a neural network nnet_input <- layer_input(shape = 1L, name = \"x_input\") # in practice, this would be deeper nnet_output <- nnet_input optimizer <- optimizer_adam(learning_rate = 0.1) nnet <- tf_compile_model( inputs = list(nnet_input), intermediate_output = nnet_output, dist = dist, optimizer = optimizer, censoring = FALSE, # Turn off unnecessary features for this problem truncation = FALSE ) nnet_fit <- fit(nnet, x = dataset$x, y = dataset$y, epochs = 100L, batch_size = 100L, shuffle = FALSE) # Fix weird behavior of keras3 nnet_fit$params$epochs <- max(nnet_fit$params$epochs, length(nnet_fit$metrics$loss)) print(plot(nnet_fit)) pred_params <- predict(nnet, data = list(as_tensor(dataset$x, config_floatx()))) lm_fit <- lm(y ~ x, data = dataset) dataset$y_pred <- pred_params$mean dataset$y_lm <- predict(lm_fit, newdata = dataset, type = \"response\") p <- ggplot(dataset, aes(x = x, y = y)) %+% geom_point() %+% geom_line(aes(y = y_pred)) %+% geom_line(aes(y = y_lm), linetype = 2L) print(p) coef_nnet <- rev(as.numeric(nnet$model$get_weights())) coef_lm <- coef(lm_fit) print(coef_nnet) print(coef_lm) } #> Warning: `qplot()` was deprecated in ggplot2 3.4.0. #> This warning is displayed once every 8 hours. #> Call `lifecycle::last_lifecycle_warnings()` to see where this warning was #> generated. #> Epoch 0/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 341ms/step - loss: 56.18431/1 ━━━━━━━━━━━━━━━━━━━━ 0s 344ms/step - loss: 56.1843 #> Epoch 1/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 46.63251/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 46.6325 #> Epoch 2/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 39.75831/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 39.7583 #> Epoch 3/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 35.50141/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 35.5014 #> Epoch 4/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 33.62621/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 33.6262 #> Epoch 5/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 33.64411/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 33.6441 #> Epoch 6/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 34.80991/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 34.8099 #> Epoch 7/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 36.28141/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 36.2814 #> Epoch 8/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 37.38771/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 37.3877 #> Epoch 9/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 20ms/step - loss: 37.80121/1 ━━━━━━━━━━━━━━━━━━━━ 0s 21ms/step - loss: 37.8012 #> Epoch 10/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 37.51511/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 37.5151 #> Epoch 11/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 36.71811/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 36.7181 #> Epoch 12/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 35.67251/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 35.6725 #> Epoch 13/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 34.63281/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 34.6328 #> Epoch 14/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 33.79741/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 33.7974 #> Epoch 15/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.28291/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.2829 #> Epoch 16/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.11521/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.1152 #> Epoch 17/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.23721/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.2372 #> Epoch 18/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.53231/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.5323 #> Epoch 19/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 33.86181/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.8618 #> Epoch 20/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 34.10381/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 34.1038 #> Epoch 21/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 34.18291/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 34.1829 #> Epoch 22/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 34.08131/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 34.0813 #> Epoch 23/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.83221/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.8322 #> Epoch 24/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.50171/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.5017 #> Epoch 25/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 33.16641/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 33.1664 #> Epoch 26/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 32.89201/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.8920 #> Epoch 27/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 32.71911/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.7191 #> Epoch 28/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.65621/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.6562 #> Epoch 29/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 32.68181/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.6818 #> Epoch 30/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.75441/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.7544 #> Epoch 31/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.82651/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 32.8265 #> Epoch 32/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 32.85911/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.8591 #> Epoch 33/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 32.83111/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.8311 #> Epoch 34/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 32.74251/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.7425 #> Epoch 35/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 32.61121/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.6112 #> Epoch 36/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 32.46491/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.4649 #> Epoch 37/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.33131/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.3313 #> Epoch 38/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 32.23041/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.2304 #> Epoch 39/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 32.16951/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.1695 #> Epoch 40/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 32.14301/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.1430 #> Epoch 41/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 32.13571/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.1357 #> Epoch 42/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 32.12951/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.1295 #> Epoch 43/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 32.10861/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.1086 #> Epoch 44/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 32.06491/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 32.0649 #> Epoch 45/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 31.99891/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.9989 #> Epoch 46/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 31.91821/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.9182 #> Epoch 47/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.83421/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.8342 #> Epoch 48/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.75761/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.7576 #> Epoch 49/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.69491/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.6949 #> Epoch 50/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.64701/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.6470 #> Epoch 51/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - loss: 31.60981/1 ━━━━━━━━━━━━━━━━━━━━ 0s 14ms/step - loss: 31.6098 #> Epoch 52/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.57631/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.5763 #> Epoch 53/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.53921/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.5392 #> Epoch 54/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.49401/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.4940 #> Epoch 55/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 31.43951/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.4395 #> Epoch 56/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.37821/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.3782 #> Epoch 57/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.31441/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.3144 #> Epoch 58/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 20ms/step - loss: 31.25271/1 ━━━━━━━━━━━━━━━━━━━━ 0s 21ms/step - loss: 31.2527 #> Epoch 59/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.19611/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.1961 #> Epoch 60/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.14541/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.1454 #> Epoch 61/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.09891/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.0989 #> Epoch 62/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.05381/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.0538 #> Epoch 63/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.00711/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 31.0071 #> Epoch 64/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.95711/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.9571 #> Epoch 65/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.90341/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.9034 #> Epoch 66/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.84721/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.8472 #> Epoch 67/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.79051/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.7905 #> Epoch 68/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.73501/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.7350 #> Epoch 69/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.68181/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.6818 #> Epoch 70/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.63071/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.6307 #> Epoch 71/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 30.58091/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.5809 #> Epoch 72/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 30.53111/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.5311 #> Epoch 73/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.48021/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.4802 #> Epoch 74/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.42771/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.4277 #> Epoch 75/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.37391/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.3739 #> Epoch 76/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 30.31951/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.3195 #> Epoch 77/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.26531/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.2653 #> Epoch 78/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.21201/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.2120 #> Epoch 79/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.15961/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.1596 #> Epoch 80/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 30.10781/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.1078 #> Epoch 81/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 30.05621/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.0562 #> Epoch 82/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 30.00421/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 30.0042 #> Epoch 83/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.95161/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.9516 #> Epoch 84/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 29.89851/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.8985 #> Epoch 85/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 29.84501/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.8450 #> Epoch 86/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 29.79171/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.7917 #> Epoch 87/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.73871/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.7387 #> Epoch 88/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.68611/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.6861 #> Epoch 89/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 29.63381/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.6338 #> Epoch 90/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 29.58161/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.5816 #> Epoch 91/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 29.52921/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.5292 #> Epoch 92/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.47661/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.4766 #> Epoch 93/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 29.42371/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.4237 #> Epoch 94/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.37081/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.3708 #> Epoch 95/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.31801/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.3180 #> Epoch 96/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.26531/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.2653 #> Epoch 97/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 29.21291/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.2129 #> Epoch 98/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.16051/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.1605 #> Epoch 99/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 29.10831/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.1083 #> Epoch 100/100 #> 1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - loss: 29.05601/1 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - loss: 29.0560 #> [1] 4.854740 1.606937 #> (Intercept) x #> 0.5645856 1.9574191"},{"path":"https://ashesitr.github.io/reservr/authors.html","id":null,"dir":"","previous_headings":"","what":"Authors","title":"Authors and Citation","text":"Alexander Rosenstock. Author, maintainer, copyright holder.","code":""},{"path":"https://ashesitr.github.io/reservr/authors.html","id":"citation","dir":"","previous_headings":"","what":"Citation","title":"Authors and Citation","text":"Rosenstock (2024). reservr: Fit Distributions Neural Networks Censored Truncated Data. R package version 0.0.3, https://github.com/AshesITR/reservr, https://ashesitr.github.io/reservr/.","code":"@Manual{, title = {reservr: Fit Distributions and Neural Networks to Censored and Truncated Data}, author = {Alexander Rosenstock}, year = {2024}, note = {R package version 0.0.3, https://github.com/AshesITR/reservr}, url = {https://ashesitr.github.io/reservr/}, }"},{"path":"https://ashesitr.github.io/reservr/index.html","id":"reservr","dir":"","previous_headings":"","what":"Fit Distributions and Neural Networks to Censored and Truncated Data","title":"Fit Distributions and Neural Networks to Censored and Truncated Data","text":"goal reservr provide flexible interface specifying distributions fitting (randomly) truncated possibly interval-censored data. provides custom fitting algorithms fit distributions ..d. samples well dynnamic TensorFlow integration allow training neural networks arbitrary output distributions. latter can used include explanatory variables distributional fits. Reservr also provides tools relevant working core functionality actuarial setting, namely functions prob_report() truncate_claims(), make assumptions type random truncation applied data. Please refer vignettes distributions.Rmd tensorflow.Rmd detailed introductions.","code":""},{"path":"https://ashesitr.github.io/reservr/index.html","id":"installation","dir":"","previous_headings":"","what":"Installation","title":"Fit Distributions and Neural Networks to Censored and Truncated Data","text":"reservr yet CRAN. can install latest development version reservr via can install released version reservr CRAN : want use reservrs features, make sure also install tensorflow.","code":"devtools::install_github(\"AshesITR/reservr\") install.packages(\"reservr\")"},{"path":"https://ashesitr.github.io/reservr/index.html","id":"example","dir":"","previous_headings":"","what":"Example","title":"Fit Distributions and Neural Networks to Censored and Truncated Data","text":"basic example shows fit normal distribution randomly truncated censored data.","code":"library(reservr) set.seed(123) mu <- 0 sigma <- 1 N <- 1000 p_cens <- 0.8 x <- rnorm(N, mean = mu, sd = sigma) is_censored <- rbinom(N, size = 1L, prob = p_cens) == 1L x_lower <- x x_lower[is_censored] <- x[is_censored] - runif(sum(is_censored), min = 0, max = 0.5) x_upper <- x x_upper[is_censored] <- x[is_censored] + runif(sum(is_censored), min = 0, max = 0.5) t_lower <- runif(N, min = -2, max = 0) t_upper <- runif(N, min = 0, max = 2) is_observed <- t_lower <= x & x <= t_upper obs <- trunc_obs( xmin = pmax(x_lower, t_lower)[is_observed], xmax = pmin(x_upper, t_upper)[is_observed], tmin = t_lower[is_observed], tmax = t_upper[is_observed] ) # Summary of the simulation cat(sprintf( \"simulated samples: %d\\nobserved samples: %d\\ncensored samples: %d\\n\", N, nrow(obs), sum(is.na(obs$x)) )) # Define outcome distribution and perform fit to truncated and (partially) censored sample dist <- dist_normal() the_fit <- fit(dist, obs) # Visualize resulting parameters and show a kernel density estimate of the samples. # We replace interval-censored samples with their midpoint for the kernel density estimate. plot_distributions( true = dist, fitted = dist, empirical = dist_empirical(0.5 * (obs$xmin + obs$xmax)), .x = seq(-5, 5, length.out = 201), plots = \"density\", with_params = list( true = list(mean = mu, sd = sigma), fitted = the_fit$params ) )"},{"path":"https://ashesitr.github.io/reservr/index.html","id":"code-of-conduct","dir":"","previous_headings":"","what":"Code of Conduct","title":"Fit Distributions and Neural Networks to Censored and Truncated Data","text":"Please note reservr project released Contributor Code Conduct. contributing project, agree abide terms.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":null,"dir":"Reference","previous_headings":"","what":"Base class for Distributions — Distribution","title":"Base class for Distributions — Distribution","text":"Represents modifiable Distribution family","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"active-bindings","dir":"Reference","previous_headings":"","what":"Active bindings","title":"Base class for Distributions — Distribution","text":"default_params Get set (non-recursive) default parameters Distribution param_bounds Get set (non-recursive) parameter bounds (box constraints) Distribution","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"public-methods","dir":"Reference","previous_headings":"","what":"Public methods","title":"Base class for Distributions — Distribution","text":"Distribution$new() Distribution$sample() Distribution$density() Distribution$tf_logdensity() Distribution$probability() Distribution$tf_logprobability() Distribution$quantile() Distribution$hazard() Distribution$diff_density() Distribution$diff_probability() Distribution$is_in_support() Distribution$is_discrete_at() Distribution$tf_is_discrete_at() Distribution$has_capability() Distribution$get_type() Distribution$get_components() Distribution$is_discrete() Distribution$is_continuous() Distribution$require_capability() Distribution$get_dof() Distribution$get_placeholders() Distribution$get_params() Distribution$tf_make_constants() Distribution$tf_compile_params() Distribution$get_param_bounds() Distribution$get_param_constraints() Distribution$export_functions() Distribution$clone()","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$new(type, caps, params, name, default_params)"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Base class for Distributions — Distribution","text":"type Type distribution. string constant default implementation. Distributions non-constant type must override get_type() function. caps Character vector capabilities fuel default implementations has_capability() require_capability(). Distributions dynamic capabilities must override has_capability() function. params Initial parameter bounds structure, backing param_bounds active binding (usually list intervals). name Name Distribution class. CamelCase end \"Distribution\". default_params Initial fixed parameters backing default_params active binding (usually list numeric / NULLs).","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Construct Distribution instance Used internally dist_* functions.","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-1","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$sample(n, with_params = list())"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"arguments-1","dir":"Reference","previous_headings":"","what":"Arguments","title":"Base class for Distributions — Distribution","text":"n number samples draw. with_params Distribution parameters use. parameter value can also numeric vector length n. case -th sample use -th parameters.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-1","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Sample Distribution","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"length n vector ..d. random samples Distribution specified parameters.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_exponential(rate = 2.0)$sample(10)"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-2","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$density(x, log = FALSE, with_params = list())"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"arguments-2","dir":"Reference","previous_headings":"","what":"Arguments","title":"Base class for Distributions — Distribution","text":"x Vector points evaluate density . log Flag. TRUE, return log-density instead. with_params Distribution parameters use. parameter value can also numeric vector length length(x). case, -th density point use -th parameters.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-2","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Density Distribution","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-1","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"numeric vector (log-)densities","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-1","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_exponential()$density(c(1.0, 2.0), with_params = list(rate = 2.0))"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-3","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$tf_logdensity()"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-3","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Compile TensorFlow function log-density evaluation","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-2","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"tf_function taking arguments x args returning log-density Distribution evaluated x parameters args.","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-4","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$probability( q, lower.tail = TRUE, log.p = FALSE, with_params = list() )"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"arguments-3","dir":"Reference","previous_headings":"","what":"Arguments","title":"Base class for Distributions — Distribution","text":"q Vector points evaluate probability function . lower.tail TRUE, return P(X <= q). Otherwise return P(X > q). log.p TRUE, probabilities returned log(p). with_params Distribution parameters use. parameter value can also numeric vector length length(q). case, -th probability point use -th parameters.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-4","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Cumulative probability Distribution","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-3","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"numeric vector (log-)probabilities","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-2","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_exponential()$probability( c(1.0, 2.0), with_params = list(rate = 2.0) )"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-5","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$tf_logprobability()"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-5","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Compile TensorFlow function log-probability evaluation","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-4","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"tf_function taking arguments qmin, qmax args returning log-probability Distribution evaluated closed interval [qmin, qmax] parameters args.","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-6","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$quantile( p, lower.tail = TRUE, log.p = FALSE, with_params = list() )"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"arguments-4","dir":"Reference","previous_headings":"","what":"Arguments","title":"Base class for Distributions — Distribution","text":"p Vector probabilities. lower.tail TRUE, return P(X <= q). Otherwise return P(X > q). log.p TRUE, probabilities returned log(p). with_params Distribution parameters use. parameter value can also numeric vector length length(p). case, -th quantile use -th parameters.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-6","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Quantile function Distribution","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-5","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"numeric vector quantiles","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-3","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_exponential()$quantile(c(0.1, 0.5), with_params = list(rate = 2.0))"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-7","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$hazard(x, log = FALSE, with_params = list())"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"arguments-5","dir":"Reference","previous_headings":"","what":"Arguments","title":"Base class for Distributions — Distribution","text":"x Vector points. log Flag. TRUE, return log-hazard instead. with_params Distribution parameters use. parameter value can also numeric vector length length(x). case, -th hazard point use -th parameters.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-7","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Hazard function Distribution","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-6","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"numeric vector (log-)hazards","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-4","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_exponential(rate = 2.0)$hazard(c(1.0, 2.0))"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-8","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$diff_density(x, log = FALSE, with_params = list())"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"arguments-6","dir":"Reference","previous_headings":"","what":"Arguments","title":"Base class for Distributions — Distribution","text":"x Vector points. log Flag. TRUE, return gradient log-density instead. with_params Distribution parameters use. parameter value can also numeric vector length length(x). case, -th density point use -th parameters.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-8","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Gradients density Distribution","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-7","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"list structure containing (log-)density gradients free parameters Distribution evaluated x.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-5","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_exponential()$diff_density( c(1.0, 2.0), with_params = list(rate = 2.0) )"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-9","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$diff_probability( q, lower.tail = TRUE, log.p = FALSE, with_params = list() )"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"arguments-7","dir":"Reference","previous_headings":"","what":"Arguments","title":"Base class for Distributions — Distribution","text":"q Vector points evaluate probability function . lower.tail TRUE, return P(X <= q). Otherwise return P(X > q). log.p TRUE, probabilities returned log(p). with_params Distribution parameters use. parameter value can also numeric vector length length(q). case, -th probability point use -th parameters.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-9","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Gradients cumulative probability Distribution","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-8","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"list structure containing cumulative (log-)probability gradients free parameters Distribution evaluated q.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-6","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_exponential()$diff_probability( c(1.0, 2.0), with_params = list(rate = 2.0) )"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-10","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$is_in_support(x, with_params = list())"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"arguments-8","dir":"Reference","previous_headings":"","what":"Arguments","title":"Base class for Distributions — Distribution","text":"x Vector points with_params Distribution parameters use. parameter value can also numeric vector length length(x). case, -th point use -th parameters.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-10","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Determine value support Distribution","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-9","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"logical vector length x indicating whether x part support distribution given parameters.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-7","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_exponential(rate = 1.0)$is_in_support(c(-1.0, 0.0, 1.0))"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-11","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$is_discrete_at(x, with_params = list())"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"arguments-9","dir":"Reference","previous_headings":"","what":"Arguments","title":"Base class for Distributions — Distribution","text":"x Vector points with_params Distribution parameters use. parameter value can also numeric vector length length(x). case, -th point use -th parameters.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-11","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Determine value positive probability","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-10","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"logical vector length x indicating whether positive probability mass x given Distribution parameters.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-8","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_dirac(point = 0.0)$is_discrete_at(c(0.0, 1.0))"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-12","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$tf_is_discrete_at()"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-12","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Compile TensorFlow function discrete support checking","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-11","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"tf_function taking arguments x args returning whether Distribution point mass x given parameters args.","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-13","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$has_capability(caps)"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"arguments-10","dir":"Reference","previous_headings":"","what":"Arguments","title":"Base class for Distributions — Distribution","text":"caps Character vector capabilities","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-13","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Check capability present","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-12","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"logical vector length caps.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-9","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_exponential()$has_capability(\"density\")"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-14","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$get_type()"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-14","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Get type Distribution. Type can one discrete, continuous mixed.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-13","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"string representing type Distribution.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-10","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_exponential()$get_type() dist_dirac()$get_type() dist_mixture(list(dist_dirac(), dist_exponential()))$get_type() dist_mixture(list(dist_dirac(), dist_binomial()))$get_type()"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-15","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$get_components()"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-15","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Get component Distributions transformed Distribution.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-14","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"possibly empty list Distributions","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-11","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_trunc(dist_exponential())$get_components() dist_dirac()$get_components() dist_mixture(list(dist_exponential(), dist_gamma()))$get_components()"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-16","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$is_discrete()"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-16","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Check Distribution discrete, .e. density respect counting measure.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-15","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"TRUE Distribution discrete, FALSE otherwise. Note mixed distributions discrete can point masses.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-12","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_exponential()$is_discrete() dist_dirac()$is_discrete()"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-17","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$is_continuous()"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-17","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Check Distribution continuous, .e. density respect Lebesgue measure.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-16","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"TRUE Distribution continuous, FALSE otherwise. Note mixed distributions continuous.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-13","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_exponential()$is_continuous() dist_dirac()$is_continuous()"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-18","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$require_capability( caps, fun_name = paste0(sys.call(-1)[[1]], \"()\") )"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"arguments-11","dir":"Reference","previous_headings":"","what":"Arguments","title":"Base class for Distributions — Distribution","text":"caps Character vector Capabilities require fun_name Frienly text use generating error message case failure.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-18","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Ensure Distribution required capabilities. throw error capability missing.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-17","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"Invisibly TRUE.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-14","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_exponential()$require_capability(\"diff_density\")"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-19","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$get_dof()"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-19","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Get number degrees freedom Distribution family. parameters without fixed default considered free.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-18","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"integer representing degrees freedom suitable e.g. AIC calculations.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-15","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_exponential()$get_dof() dist_exponential(rate = 1.0)$get_dof()"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-20","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$get_placeholders()"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-20","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Get Placeholders Distribution family. Returns list free parameters family. values NULL. Distribution Distributions parameters, placeholders computed recursively.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-19","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"named list containing combination (named unnamed) lists NULLs.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-16","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_exponential()$get_placeholders() dist_mixture(list(dist_dirac(), dist_exponential()))$get_placeholders()"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-21","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$get_params(with_params = list())"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"arguments-12","dir":"Reference","previous_headings":"","what":"Arguments","title":"Base class for Distributions — Distribution","text":"with_params Optional parameter overrides structure dist$get_params(). Given Parameter values expected length 1.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-21","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Get full list parameters, possibly including placeholders.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-20","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"list representing (recursive) parameter structure Distribution values specified parameters NULL free parameters missing Distributions parameters with_params.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-17","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_mixture(list(dist_dirac(), dist_exponential()))$get_params( with_params = list(probs = list(0.5, 0.5)) )"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-22","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$tf_make_constants(with_params = list())"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"arguments-13","dir":"Reference","previous_headings":"","what":"Arguments","title":"Base class for Distributions — Distribution","text":"with_params Optional parameter overrides structure dist$tf_make_constants(). Given Parameter values expected length 1.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-22","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Get list constant TensorFlow parameters","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-21","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"list representing (recursive) constant parameters Distribution values sprecified parameters. constant TensorFlow Tensor dtype floatx.","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-23","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$tf_compile_params(input, name_prefix = \"\")"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"arguments-14","dir":"Reference","previous_headings":"","what":"Arguments","title":"Base class for Distributions — Distribution","text":"input keras layer bind outputs name_prefix Prefix use layer names","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-23","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Compile distribution parameters tensorflow outputs","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-22","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"list two elements outputs flat list keras output layers, one parameter. output_inflater function taking keras output layers transforming list structure suitable passing loss function returned tf_compile_model()","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-24","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$get_param_bounds()"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-24","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Get Interval bounds Distribution parameters","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-23","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"list representing free (recursive) parameter structure Distribution Interval objects values representing bounds respective free parameters.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-18","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_mixture( list(dist_dirac(), dist_exponential()), probs = list(0.5, 0.5) )$get_param_bounds() dist_mixture( list(dist_dirac(), dist_exponential()) )$get_param_bounds() dist_genpareto()$get_param_bounds() dist_genpareto1()$get_param_bounds()"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-25","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$get_param_constraints()"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-25","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Get additional (non-linear) equality constraints Distribution parameters","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-24","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"NULL box constraints specified dist$get_param_bounds() sufficient, function taking full Distribution parameters returning either numeric vector (must 0 valid parameter combinations) list elements constraints: numeric vector constraints jacobian: Jacobi matrix constraints respect parameters","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-19","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"dist_mixture( list(dist_dirac(), dist_exponential()) )$get_param_constraints()"},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-26","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$export_functions( name, envir = parent.frame(), with_params = list() )"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"arguments-15","dir":"Reference","previous_headings":"","what":"Arguments","title":"Base class for Distributions — Distribution","text":"name common suffix exported functions envir Environment export functions with_params Optional list parameters use default values exported functions","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"details-26","dir":"Reference","previous_headings":"","what":"Details","title":"Base class for Distributions — Distribution","text":"Export sampling, density, probability quantile functions plain R functions Creates new functions envir named {r,d,p,q} implement dist$sample, dist$density, dist$probability dist$quantile plain functions default arguments specified with_params fixed parameters. resulting functions signatures taking parameters separate arguments.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"returns-25","dir":"Reference","previous_headings":"","what":"Returns","title":"Base class for Distributions — Distribution","text":"Invisibly NULL.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"examples-20","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"tmp_env <- new.env(parent = globalenv()) dist_exponential()$export_functions( name = \"exp\", envir = tmp_env, with_params = list(rate = 2.0) ) evalq( fitdistrplus::fitdist(rexp(100), \"exp\"), envir = tmp_env )"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"method-clone-","dir":"Reference","previous_headings":"","what":"Method clone()","title":"Base class for Distributions — Distribution","text":"objects class cloneable method.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"usage-27","dir":"Reference","previous_headings":"","what":"Usage","title":"Base class for Distributions — Distribution","text":"","code":"Distribution$clone(deep = FALSE)"},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"arguments-16","dir":"Reference","previous_headings":"","what":"Arguments","title":"Base class for Distributions — Distribution","text":"deep Whether make deep clone.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Distribution.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Base class for Distributions — Distribution","text":"","code":"# Example for param_bounds: # Create an Exponential Distribution with rate constrained to (0, 2) # instead of (0, Inf) my_exp <- dist_exponential() my_exp$param_bounds$rate <- interval(c(0, 2)) my_exp$get_param_bounds() #> $rate #> (0, 2) #> fit_dist(my_exp, rexp(100, rate = 3), start = list(rate = 1))$params$rate #> [1] 2 ## ------------------------------------------------ ## Method `Distribution$sample` ## ------------------------------------------------ dist_exponential(rate = 2.0)$sample(10) #> [1] 0.13286615 0.01112249 0.07288815 1.51862540 0.08488557 0.42304793 #> [7] 0.10249917 0.08983756 0.25915838 0.33607531 ## ------------------------------------------------ ## Method `Distribution$density` ## ------------------------------------------------ dist_exponential()$density(c(1.0, 2.0), with_params = list(rate = 2.0)) #> [1] 0.27067057 0.03663128 ## ------------------------------------------------ ## Method `Distribution$probability` ## ------------------------------------------------ dist_exponential()$probability( c(1.0, 2.0), with_params = list(rate = 2.0) ) #> [1] 0.8646647 0.9816844 ## ------------------------------------------------ ## Method `Distribution$quantile` ## ------------------------------------------------ dist_exponential()$quantile(c(0.1, 0.5), with_params = list(rate = 2.0)) #> [1] 0.05268026 0.34657359 ## ------------------------------------------------ ## Method `Distribution$hazard` ## ------------------------------------------------ dist_exponential(rate = 2.0)$hazard(c(1.0, 2.0)) #> [1] 2 2 ## ------------------------------------------------ ## Method `Distribution$diff_density` ## ------------------------------------------------ dist_exponential()$diff_density( c(1.0, 2.0), with_params = list(rate = 2.0) ) #> $rate #> [1] -0.13533528 -0.05494692 #> ## ------------------------------------------------ ## Method `Distribution$diff_probability` ## ------------------------------------------------ dist_exponential()$diff_probability( c(1.0, 2.0), with_params = list(rate = 2.0) ) #> $rate #> [1] 0.13533528 0.03663128 #> ## ------------------------------------------------ ## Method `Distribution$is_in_support` ## ------------------------------------------------ dist_exponential(rate = 1.0)$is_in_support(c(-1.0, 0.0, 1.0)) #> [1] FALSE FALSE TRUE ## ------------------------------------------------ ## Method `Distribution$is_discrete_at` ## ------------------------------------------------ dist_dirac(point = 0.0)$is_discrete_at(c(0.0, 1.0)) #> [1] TRUE FALSE ## ------------------------------------------------ ## Method `Distribution$has_capability` ## ------------------------------------------------ dist_exponential()$has_capability(\"density\") #> [1] TRUE ## ------------------------------------------------ ## Method `Distribution$get_type` ## ------------------------------------------------ dist_exponential()$get_type() #> [1] \"continuous\" dist_dirac()$get_type() #> [1] \"discrete\" dist_mixture(list(dist_dirac(), dist_exponential()))$get_type() #> [1] \"mixed\" dist_mixture(list(dist_dirac(), dist_binomial()))$get_type() #> [1] \"discrete\" ## ------------------------------------------------ ## Method `Distribution$get_components` ## ------------------------------------------------ dist_trunc(dist_exponential())$get_components() #> [[1]] #> An ExponentialDistribution with 1 dof #> dist_dirac()$get_components() #> list() dist_mixture(list(dist_exponential(), dist_gamma()))$get_components() #> [[1]] #> An ExponentialDistribution with 1 dof #> #> [[2]] #> A GammaDistribution with 2 dof #> ## ------------------------------------------------ ## Method `Distribution$is_discrete` ## ------------------------------------------------ dist_exponential()$is_discrete() #> [1] FALSE dist_dirac()$is_discrete() #> [1] TRUE ## ------------------------------------------------ ## Method `Distribution$is_continuous` ## ------------------------------------------------ dist_exponential()$is_continuous() #> [1] TRUE dist_dirac()$is_continuous() #> [1] FALSE ## ------------------------------------------------ ## Method `Distribution$require_capability` ## ------------------------------------------------ dist_exponential()$require_capability(\"diff_density\") ## ------------------------------------------------ ## Method `Distribution$get_dof` ## ------------------------------------------------ dist_exponential()$get_dof() #> [1] 1 dist_exponential(rate = 1.0)$get_dof() #> [1] 0 ## ------------------------------------------------ ## Method `Distribution$get_placeholders` ## ------------------------------------------------ dist_exponential()$get_placeholders() #> $rate #> NULL #> dist_mixture(list(dist_dirac(), dist_exponential()))$get_placeholders() #> $dists #> $dists[[1]] #> $dists[[1]]$point #> NULL #> #> #> $dists[[2]] #> $dists[[2]]$rate #> NULL #> #> #> #> $probs #> $probs[[1]] #> NULL #> #> $probs[[2]] #> NULL #> #> ## ------------------------------------------------ ## Method `Distribution$get_params` ## ------------------------------------------------ dist_mixture(list(dist_dirac(), dist_exponential()))$get_params( with_params = list(probs = list(0.5, 0.5)) ) #> $dists #> $dists[[1]] #> $dists[[1]]$point #> NULL #> #> #> $dists[[2]] #> $dists[[2]]$rate #> NULL #> #> #> #> $probs #> $probs[[1]] #> [1] 0.5 #> #> $probs[[2]] #> [1] 0.5 #> #> ## ------------------------------------------------ ## Method `Distribution$get_param_bounds` ## ------------------------------------------------ dist_mixture( list(dist_dirac(), dist_exponential()), probs = list(0.5, 0.5) )$get_param_bounds() #> $dists #> $dists[[1]] #> $dists[[1]]$point #> (-Inf, Inf) #> #> #> $dists[[2]] #> $dists[[2]]$rate #> (0, Inf) #> #> #> #> $probs #> list() #> dist_mixture( list(dist_dirac(), dist_exponential()) )$get_param_bounds() #> $dists #> $dists[[1]] #> $dists[[1]]$point #> (-Inf, Inf) #> #> #> $dists[[2]] #> $dists[[2]]$rate #> (0, Inf) #> #> #> #> $probs #> $probs[[1]] #> [0, 1] #> #> $probs[[2]] #> [0, 1] #> #> dist_genpareto()$get_param_bounds() #> $u #> (-Inf, Inf) #> #> $sigmau #> (0, Inf) #> #> $xi #> (-Inf, Inf) #> dist_genpareto1()$get_param_bounds() #> $u #> (-Inf, Inf) #> #> $sigmau #> (0, Inf) #> #> $xi #> [0, 1] #> ## ------------------------------------------------ ## Method `Distribution$get_param_constraints` ## ------------------------------------------------ dist_mixture( list(dist_dirac(), dist_exponential()) )$get_param_constraints() #> function (params) #> { #> prob_mat <- do.call(cbind, params$probs) #> nms <- names(flatten_params(params)) #> jac_full <- matrix(0, nrow = nrow(prob_mat), ncol = length(nms)) #> jac_full[, grepl(\"^probs\", nms)] <- 1 #> list(constraints = rowSums(prob_mat) - 1, jacobian = jac_full) #> } #> ## ------------------------------------------------ ## Method `Distribution$export_functions` ## ------------------------------------------------ tmp_env <- new.env(parent = globalenv()) dist_exponential()$export_functions( name = \"exp\", envir = tmp_env, with_params = list(rate = 2.0) ) #> Exported `dexp()`. #> Exported `rexp()`. #> Exported `pexp()`. #> Exported `qexp()`. evalq( fitdistrplus::fitdist(rexp(100), \"exp\"), envir = tmp_env ) #> Fitting of the distribution ' exp ' by maximum likelihood #> Parameters: #> estimate Std. Error #> rate 2.131976 0.2131975"},{"path":"https://ashesitr.github.io/reservr/reference/GenPareto.html","id":null,"dir":"Reference","previous_headings":"","what":"The Generalized Pareto Distribution (GPD) — GenPareto","title":"The Generalized Pareto Distribution (GPD) — GenPareto","text":"functions provide information generalized Pareto distribution threshold u. dgpd gives density, pgpd gives distribution function, qgpd gives quantile function rgpd generates random deviates.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/GenPareto.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"The Generalized Pareto Distribution (GPD) — GenPareto","text":"","code":"rgpd(n = 1L, u = 0, sigmau = 1, xi = 0) dgpd(x, u = 0, sigmau = 1, xi = 0, log = FALSE) pgpd(q, u = 0, sigmau = 1, xi = 0, lower.tail = TRUE, log.p = FALSE) qgpd(p, u = 0, sigmau = 1, xi = 0, lower.tail = TRUE, log.p = FALSE)"},{"path":"https://ashesitr.github.io/reservr/reference/GenPareto.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"The Generalized Pareto Distribution (GPD) — GenPareto","text":"n integer number observations. u threshold parameter (minimum value). sigmau scale parameter (must positive). xi shape parameter x, q vector quantiles. log, log.p logical; TRUE, probabilities/densities p given log(p). lower.tail logical; TRUE (default), probabilities \\(P(X \\le x)\\), otherwise \\(P(X > x)\\). p vector probabilities.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/GenPareto.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"The Generalized Pareto Distribution (GPD) — GenPareto","text":"rgpd generates random deviates. dgpd gives density. pgpd gives distribution function. qgpd gives quantile function.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/GenPareto.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"The Generalized Pareto Distribution (GPD) — GenPareto","text":"u, sigmau xi specified, assume default values 0, 1 0 respectively. generalized Pareto distribution density $$f(x) = 1 / \\sigma_u (1 + \\xi z)^(- 1 / \\xi - 1)$$ \\(z = (x - u) / \\sigma_u\\) \\(f(x) = exp(-z)\\) \\(\\xi\\) 0. support \\(x \\ge u\\) \\(\\xi \\ge 0\\) \\(u \\le x \\le u - \\sigma_u / \\xi\\) \\(\\xi < 0\\). Expected value exists \\(\\xi < 1\\) equal $$E(X) = u + \\sigma_u / (1 - \\xi)$$ k-th moments exist general \\(k\\xi < 1\\).","code":""},{"path":"https://ashesitr.github.io/reservr/reference/GenPareto.html","id":"references","dir":"Reference","previous_headings":"","what":"References","title":"The Generalized Pareto Distribution (GPD) — GenPareto","text":"https://en.wikipedia.org/wiki/Generalized_Pareto_distribution","code":""},{"path":"https://ashesitr.github.io/reservr/reference/GenPareto.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"The Generalized Pareto Distribution (GPD) — GenPareto","text":"","code":"x <- rgpd(1000, u = 1, sigmau = 0.5, xi = 0.1) xx <- seq(-1, 10, 0.01) hist(x, breaks = 100, freq = FALSE, xlim = c(-1, 10)) lines(xx, dgpd(xx, u = 1, sigmau = 0.5, xi = 0.1)) plot(xx, dgpd(xx, u = 1, sigmau = 1, xi = 0), type = \"l\") lines(xx, dgpd(xx, u = 0.5, sigmau = 1, xi = -0.3), col = \"blue\", lwd = 2) lines(xx, dgpd(xx, u = 1.5, sigmau = 1, xi = 0.3), col = \"red\", lwd = 2) plot(xx, dgpd(xx, u = 1, sigmau = 1, xi = 0), type = \"l\") lines(xx, dgpd(xx, u = 1, sigmau = 0.5, xi = 0), col = \"blue\", lwd = 2) lines(xx, dgpd(xx, u = 1, sigmau = 2, xi = 0), col = \"red\", lwd = 2)"},{"path":"https://ashesitr.github.io/reservr/reference/Pareto.html","id":null,"dir":"Reference","previous_headings":"","what":"The Pareto Distribution — Pareto","title":"The Pareto Distribution — Pareto","text":"functions provide information Pareto distribution. dpareto gives density, ppareto gives distribution function, qpareto gives quantile function rpareto generates random deviates.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Pareto.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"The Pareto Distribution — Pareto","text":"","code":"rpareto(n = 1L, shape = 0, scale = 1) dpareto(x, shape = 1, scale = 1, log = FALSE) ppareto(q, shape = 1, scale = 1, lower.tail = TRUE, log.p = FALSE) qpareto(p, shape = 1, scale = 1, lower.tail = TRUE, log.p = FALSE)"},{"path":"https://ashesitr.github.io/reservr/reference/Pareto.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"The Pareto Distribution — Pareto","text":"n integer number observations. shape shape parameter (must positive). scale scale parameter (must positive). x, q vector quantiles. log, log.p logical; TRUE, probabilities/densities p given log(p). lower.tail logical; TRUE (default), probabilities \\(P(X \\le x)\\), otherwise \\(P(X > x)\\). p vector probabilities.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Pareto.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"The Pareto Distribution — Pareto","text":"rpareto generates random deviates. dpareto gives density. ppareto gives distribution function. qpareto gives quantile function.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Pareto.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"The Pareto Distribution — Pareto","text":"shape scale specified, assume default values 1. Pareto distribution scale \\(\\theta\\) shape \\(\\xi\\) density $$f(x) = \\xi \\theta^\\xi / (x + \\theta)^(\\xi + 1)$$ support \\(x \\ge 0\\). Expected value exists \\(\\xi > 1\\) equal $$E(X) = \\theta / (\\xi - 1)$$ k-th moments exist general \\(k < \\xi\\).","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Pareto.html","id":"references","dir":"Reference","previous_headings":"","what":"References","title":"The Pareto Distribution — Pareto","text":"https://en.wikipedia.org/wiki/Pareto_distribution - named Lomax therein.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/Pareto.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"The Pareto Distribution — Pareto","text":"","code":"x <- rpareto(1000, shape = 10, scale = 5) xx <- seq(-1, 10, 0.01) hist(x, breaks = 100, freq = FALSE, xlim = c(-1, 10)) lines(xx, dpareto(xx, shape = 10, scale = 5)) plot(xx, dpareto(xx, shape = 10, scale = 5), type = \"l\") lines(xx, dpareto(xx, shape = 3, scale = 5), col = \"red\", lwd = 2) plot(xx, dpareto(xx, shape = 10, scale = 10), type = \"l\") lines(xx, dpareto(xx, shape = 10, scale = 5), col = \"blue\", lwd = 2) lines(xx, dpareto(xx, shape = 10, scale = 20), col = \"red\", lwd = 2)"},{"path":"https://ashesitr.github.io/reservr/reference/as_params.html","id":null,"dir":"Reference","previous_headings":"","what":"Convert TensorFlow tensors to distribution parameters recursively — as_params","title":"Convert TensorFlow tensors to distribution parameters recursively — as_params","text":"Convert TensorFlow tensors distribution parameters recursively","code":""},{"path":"https://ashesitr.github.io/reservr/reference/as_params.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Convert TensorFlow tensors to distribution parameters recursively — as_params","text":"","code":"as_params(x)"},{"path":"https://ashesitr.github.io/reservr/reference/as_params.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Convert TensorFlow tensors to distribution parameters recursively — as_params","text":"x possibly nested list structure tensorflow.tensors","code":""},{"path":"https://ashesitr.github.io/reservr/reference/as_params.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Convert TensorFlow tensors to distribution parameters recursively — as_params","text":"nested list vectors suitable distribution parameters","code":""},{"path":"https://ashesitr.github.io/reservr/reference/as_params.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Convert TensorFlow tensors to distribution parameters recursively — as_params","text":"","code":"if (interactive()) { tf_params <- list( probs = k_matrix(t(c(0.5, 0.3, 0.2))), shapes = k_matrix(t(c(1L, 2L, 3L)), dtype = \"int32\"), scale = keras3::as_tensor(1.0, keras3::config_floatx()) ) params <- as_params(tf_params) dist <- dist_erlangmix(vector(\"list\", 3L)) dist$sample(10L, with_params = params) }"},{"path":"https://ashesitr.github.io/reservr/reference/blended_transition.html","id":null,"dir":"Reference","previous_headings":"","what":"Transition functions for blended distributions — blended_transition","title":"Transition functions for blended distributions — blended_transition","text":"Transition functions blended distributions","code":""},{"path":"https://ashesitr.github.io/reservr/reference/blended_transition.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Transition functions for blended distributions — blended_transition","text":"","code":"blended_transition(x, u, eps, .gradient = FALSE, .extend_na = FALSE) blended_transition_inv(x, u, eps, .component)"},{"path":"https://ashesitr.github.io/reservr/reference/blended_transition.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Transition functions for blended distributions — blended_transition","text":"x Points evaluate u Sorted vector blending thresholds, rowwise sorted matrix blending thresholds eps Corresponding vector matrix blending bandwidths. Must positive dimensions u, scalar. rowwise blending regions (u - eps, u + eps) may overlap. .gradient Also evaluate gradient respect x? .extend_na Extend -range transitions last -range value (.e. corresponding u) NA? .component Component index (length(u) + 1) invert.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/blended_transition.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Transition functions for blended distributions — blended_transition","text":"blended_transition returns matrix length(x) rows length(u) + 1 columns containing transformed values blending components. .gradient TRUE, attribute \"gradient\" attached dimensions, containing derivative respective transition component respect x. blended_transition_inv returns vector length(x) values containing inverse transformed values .componentth blending component.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/blended_transition.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Transition functions for blended distributions — blended_transition","text":"","code":"library(ggplot2) xx <- seq(from = 0, to = 20, length.out = 101) blend_mat <- blended_transition(xx, u = 10, eps = 3, .gradient = TRUE) ggplot( data.frame( x = rep(xx, 2L), fun = rep(c(\"p\", \"q\"), each = length(xx)), y = as.numeric(blend_mat), relevant = c(xx <= 13, xx >= 7) ), aes(x = x, y = y, color = fun, linetype = relevant) ) %+% geom_line() %+% theme_bw() %+% theme( legend.position = \"bottom\", legend.box = \"horizontal\" ) %+% guides(color = guide_legend(direction = \"horizontal\", title = \"\"), linetype = guide_none()) %+% scale_linetype_manual(values = c(\"TRUE\" = 1, \"FALSE\" = 3)) ggplot( data.frame( x = rep(xx, 2L), fun = rep(c(\"p'\", \"q'\"), each = length(xx)), y = as.numeric(attr(blend_mat, \"gradient\")), relevant = c(xx <= 13, xx >= 7) ), aes(x = x, y = y, color = fun, linetype = relevant) ) %+% geom_line() %+% theme_bw() %+% theme( legend.position = \"bottom\", legend.box = \"horizontal\" ) %+% guides(color = guide_legend(direction = \"horizontal\", title = \"\"), linetype = guide_none()) %+% scale_linetype_manual(values = c(\"TRUE\" = 1, \"FALSE\" = 3))"},{"path":"https://ashesitr.github.io/reservr/reference/callback_adaptive_lr.html","id":null,"dir":"Reference","previous_headings":"","what":"Keras Callback for adaptive learning rate with weight restoration — callback_adaptive_lr","title":"Keras Callback for adaptive learning rate with weight restoration — callback_adaptive_lr","text":"Provides keras callback similar keras3::callback_reduce_lr_on_plateau() also restores weights best seen far whenever learning rate reduction occurs, slightly restrictive improvement detection.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/callback_adaptive_lr.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Keras Callback for adaptive learning rate with weight restoration — callback_adaptive_lr","text":"","code":"callback_adaptive_lr( monitor = \"val_loss\", factor = 0.1, patience = 10L, verbose = 0L, mode = c(\"auto\", \"min\", \"max\"), delta_abs = 1e-04, delta_rel = 0, cooldown = 0L, min_lr = 0, restore_weights = TRUE )"},{"path":"https://ashesitr.github.io/reservr/reference/callback_adaptive_lr.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Keras Callback for adaptive learning rate with weight restoration — callback_adaptive_lr","text":"monitor quantity monitored. factor factor learning rate reduced. new_lr = old_lr * factor. patience number epochs significant improvement learning rate reduced. verbose integer. Set 1 receive update messages. mode Optimisation mode. \"auto\" detects mode name monitor. \"min\" monitors decreasing metrics. \"max\" monitors increasing metrics. delta_abs Minimum absolute metric improvement per epoch. learning rate reduced average improvement less delta_abs per epoch patience epochs. delta_rel Minimum relative metric improvement per epoch. learning rate reduced average improvement less |metric| * delta_rel per epoch patience epochs. cooldown number epochs wait resuming normal operation learning rate reduced. minimum number epochs two learning rate reductions patience + cooldown. min_lr lower bound learning rate. learning rate reduction lower learning rate min_lr, clipped min_lr instead reductions performed. restore_weights Bool. TRUE, best weights restored learning rate reduction. useful metric oscillates.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/callback_adaptive_lr.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Keras Callback for adaptive learning rate with weight restoration — callback_adaptive_lr","text":"KerasCallback suitable passing keras3::fit().","code":""},{"path":"https://ashesitr.github.io/reservr/reference/callback_adaptive_lr.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Keras Callback for adaptive learning rate with weight restoration — callback_adaptive_lr","text":"Note keras3::callback_reduce_lr_on_plateau() automatically logs learning rate metric 'lr', currently impossible R. Thus, want also log learning rate, add keras3::callback_reduce_lr_on_plateau() high min_lr effectively disable callback still monitor learning rate.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/callback_adaptive_lr.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Keras Callback for adaptive learning rate with weight restoration — callback_adaptive_lr","text":"","code":"dist <- dist_exponential() group <- sample(c(0, 1), size = 100, replace = TRUE) x <- dist$sample(100, with_params = list(rate = group + 1)) global_fit <- fit(dist, x) if (interactive()) { library(keras3) l_in <- layer_input(shape = 1L) mod <- tf_compile_model( inputs = list(l_in), intermediate_output = l_in, dist = dist, optimizer = optimizer_adam(), censoring = FALSE, truncation = FALSE ) tf_initialise_model(mod, global_fit$params) fit_history <- fit( mod, x = as_tensor(group, config_floatx()), y = as_trunc_obs(x), epochs = 20L, callbacks = list( callback_adaptive_lr(\"loss\", factor = 0.5, patience = 2L, verbose = 1L, min_lr = 1.0e-4), callback_reduce_lr_on_plateau(\"loss\", min_lr = 1.0) # to track lr ) ) plot(fit_history) predicted_means <- predict(mod, data = as_tensor(c(0, 1), config_floatx())) }"},{"path":"https://ashesitr.github.io/reservr/reference/callback_debug_dist_gradients.html","id":null,"dir":"Reference","previous_headings":"","what":"Callback to monitor likelihood gradient components — callback_debug_dist_gradients","title":"Callback to monitor likelihood gradient components — callback_debug_dist_gradients","text":"Provides keras callback monitor individual components censored truncated likelihood. Useful debugging TensorFlow implementations Distributions.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/callback_debug_dist_gradients.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Callback to monitor likelihood gradient components — callback_debug_dist_gradients","text":"","code":"callback_debug_dist_gradients( object, data, obs, keep_grads = FALSE, stop_on_na = TRUE, verbose = TRUE )"},{"path":"https://ashesitr.github.io/reservr/reference/callback_debug_dist_gradients.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Callback to monitor likelihood gradient components — callback_debug_dist_gradients","text":"object reservr_keras_model created tf_compile_model(). data Input data model. obs Observations associated data. keep_grads Log actual gradients? (memory hungry!) stop_on_na Stop likelihood component NaN gradients? verbose Print message training halted? Message contain information likelihood components NaN gradients.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/callback_debug_dist_gradients.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Callback to monitor likelihood gradient components — callback_debug_dist_gradients","text":"KerasCallback suitable passing keras3::fit().","code":""},{"path":"https://ashesitr.github.io/reservr/reference/callback_debug_dist_gradients.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Callback to monitor likelihood gradient components — callback_debug_dist_gradients","text":"","code":"dist <- dist_exponential() group <- sample(c(0, 1), size = 100, replace = TRUE) x <- dist$sample(100, with_params = list(rate = group + 1)) global_fit <- fit(dist, x) if (interactive()) { library(keras3) l_in <- layer_input(shape = 1L) mod <- tf_compile_model( inputs = list(l_in), intermediate_output = l_in, dist = dist, optimizer = optimizer_adam(), censoring = FALSE, truncation = FALSE ) tf_initialise_model(mod, global_fit$params) gradient_tracker <- callback_debug_dist_gradients( mod, as_tensor(group, config_floatx()), x, keep_grads = TRUE ) fit_history <- fit( mod, x = as_tensor(group, config_floatx()), y = x, epochs = 20L, callbacks = list( callback_adaptive_lr(\"loss\", factor = 0.5, patience = 2L, verbose = 1L, min_lr = 1.0e-4), gradient_tracker, callback_reduce_lr_on_plateau(\"loss\", min_lr = 1.0) # to track lr ) ) gradient_tracker$gradient_logs[[20]]$dens plot(fit_history) predicted_means <- predict(mod, data = as_tensor(c(0, 1), config_floatx())) }"},{"path":"https://ashesitr.github.io/reservr/reference/dist_bdegp.html","id":null,"dir":"Reference","previous_headings":"","what":"Construct a BDEGP-Family — dist_bdegp","title":"Construct a BDEGP-Family — dist_bdegp","text":"Constructs BDEGP-Family distribution fixed number components blending interval.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_bdegp.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Construct a BDEGP-Family — dist_bdegp","text":"","code":"dist_bdegp(n, m, u, epsilon)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_bdegp.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Construct a BDEGP-Family — dist_bdegp","text":"n Number dirac components, starting point mass 0. m Number erlang components, translated n - 0.5. u Blending cut-, must positive real. epsilon Blending radius, must positive real less u. blending interval u - epsilon < x < u + epsilon.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_bdegp.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Construct a BDEGP-Family — dist_bdegp","text":"MixtureDistribution n DiracDistributions 0 .. n - 1 BlendedDistribution object child Distributions TranslatedDistribution offset n - 0.5 ErlangMixtureDistribution m shapes GeneralizedParetoDistribution shape parameter restricted [0, 1] location parameter fixed u break u bandwidth epsilon.","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_bdegp.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Construct a BDEGP-Family — dist_bdegp","text":"","code":"dist <- dist_bdegp(n = 1, m = 2, u = 10, epsilon = 3) params <- list( dists = list( list(), list( dists = list( list( dist = list( shapes = list(1L, 2L), scale = 1.0, probs = list(0.7, 0.3) ) ), list( sigmau = 1.0, xi = 0.1 ) ), probs = list(0.1, 0.9) ) ), probs = list(0.95, 0.05) ) x <- dist$sample(100, with_params = params) plot_distributions( theoretical = dist, empirical = dist_empirical(x), .x = seq(0, 20, length.out = 101), with_params = list(theoretical = params) ) #> Warning: Removed 33 rows containing missing values or values outside the scale range #> (`geom_line()`)."},{"path":"https://ashesitr.github.io/reservr/reference/dist_beta.html","id":null,"dir":"Reference","previous_headings":"","what":"Beta Distribution — dist_beta","title":"Beta Distribution — dist_beta","text":"See stats::Beta","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_beta.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Beta Distribution — dist_beta","text":"","code":"dist_beta(shape1 = NULL, shape2 = NULL, ncp = NULL)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_beta.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Beta Distribution — dist_beta","text":"shape1 First scalar shape parameter, NULL placeholder. shape2 Second scalar shape parameter, NULL placeholder. ncp Scalar non-centrality parameter, NULL placeholder.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_beta.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Beta Distribution — dist_beta","text":"BetaDistribution object.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_beta.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Beta Distribution — dist_beta","text":"parameters can overridden with_params = list(shape = ..., scale = ...).","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_beta.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Beta Distribution — dist_beta","text":"","code":"d_beta <- dist_beta(shape1 = 2, shape2 = 2, ncp = 0) x <- d_beta$sample(100) d_emp <- dist_empirical(x) plot_distributions( empirical = d_emp, theoretical = d_beta, estimated = d_beta, with_params = list( estimated = inflate_params( fitdistrplus::fitdist(x, distr = \"beta\")$estimate ) ), .x = seq(0, 2, length.out = 100) ) #> Warning: Removed 141 rows containing missing values or values outside the scale range #> (`geom_line()`)."},{"path":"https://ashesitr.github.io/reservr/reference/dist_binomial.html","id":null,"dir":"Reference","previous_headings":"","what":"Binomial Distribution — dist_binomial","title":"Binomial Distribution — dist_binomial","text":"See stats::Binomial","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_binomial.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Binomial Distribution — dist_binomial","text":"","code":"dist_binomial(size = NULL, prob = NULL)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_binomial.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Binomial Distribution — dist_binomial","text":"size Number trials parameter (integer), NULL placeholder. prob Success probability parameter, NULL placeholder.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_binomial.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Binomial Distribution — dist_binomial","text":"BinomialDistribution object.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_binomial.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Binomial Distribution — dist_binomial","text":"parameters can overridden with_params = list(size = ..., prob = ...).","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_binomial.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Binomial Distribution — dist_binomial","text":"","code":"d_binom <- dist_binomial(size = 10, prob = 0.5) x <- d_binom$sample(100) d_emp <- dist_empirical(x) plot_distributions( empirical = d_emp, theoretical = d_binom, estimated = d_binom, with_params = list( estimated = list( size = max(x), prob = mean(x) / max(x) ) ), .x = 0:max(x) )"},{"path":"https://ashesitr.github.io/reservr/reference/dist_blended.html","id":null,"dir":"Reference","previous_headings":"","what":"Blended distribution — dist_blended","title":"Blended distribution — dist_blended","text":"Blended distribution","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_blended.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Blended distribution — dist_blended","text":"","code":"dist_blended(dists, probs = NULL, breaks = NULL, bandwidths = NULL)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_blended.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Blended distribution — dist_blended","text":"dists list k >= 2 component Distributions. probs k Mixture weight parameters breaks k - 1 Centers blending zones. dists[] blend dists[+ 1] around breaks[]. bandwidths k - 1 Radii blending zones. -th blending zone begin breaks[] - bandwidths[] end breaks[] + bandwidths[]. bandwidth 0 corresponds hard cut-, .e. jump discontinuity density blended Distribution.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_blended.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Blended distribution — dist_blended","text":"BlendedDistribution object.","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_blended.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Blended distribution — dist_blended","text":"","code":"bd <- dist_blended( list( dist_normal(mean = 0.0, sd = 1.0), dist_genpareto(u = 3.0, sigmau = 1.0, xi = 3.0) ), breaks = list(3.0), bandwidths = list(0.5), probs = list(0.9, 0.1) ) plot_distributions( bd, .x = seq(-3, 10, length.out = 100), plots = c(\"d\", \"p\") )"},{"path":"https://ashesitr.github.io/reservr/reference/dist_dirac.html","id":null,"dir":"Reference","previous_headings":"","what":"Dirac (degenerate point) Distribution — dist_dirac","title":"Dirac (degenerate point) Distribution — dist_dirac","text":"degenerate distribution mass single point.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_dirac.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Dirac (degenerate point) Distribution — dist_dirac","text":"","code":"dist_dirac(point = NULL)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_dirac.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Dirac (degenerate point) Distribution — dist_dirac","text":"point point probability mass 1.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_dirac.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Dirac (degenerate point) Distribution — dist_dirac","text":"DiracDistribution object.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_dirac.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Dirac (degenerate point) Distribution — dist_dirac","text":"parameter can overridden with_params = list(point = ...).","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_dirac.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Dirac (degenerate point) Distribution — dist_dirac","text":"","code":"d_dirac <- dist_dirac(1.5) d_dirac$sample(2L) #> [1] 1.5 1.5 d_dirac$sample(2L, list(point = 42.0)) #> [1] 42 42"},{"path":"https://ashesitr.github.io/reservr/reference/dist_discrete.html","id":null,"dir":"Reference","previous_headings":"","what":"Discrete Distribution — dist_discrete","title":"Discrete Distribution — dist_discrete","text":"full-flexibility discrete distribution values 1 size.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_discrete.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Discrete Distribution — dist_discrete","text":"","code":"dist_discrete(size = NULL, probs = NULL)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_discrete.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Discrete Distribution — dist_discrete","text":"size Number classes parameter (integer). Required probs NULL. probs Vector probabilties parameter, NULL placeholder.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_discrete.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Discrete Distribution — dist_discrete","text":"DiscreteDistribution object.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_discrete.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Discrete Distribution — dist_discrete","text":"Parameters can overridden with_params = list(probs = ...).","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_discrete.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Discrete Distribution — dist_discrete","text":"","code":"d_discrete <- dist_discrete(probs = list(0.5, 0.25, 0.15, 0.1)) x <- d_discrete$sample(100) d_emp <- dist_empirical(x) plot_distributions( empirical = d_emp, theoretical = d_discrete, estimated = d_discrete, with_params = list( estimated = list( size = max(x), probs = as.list(unname(table(x)) / 100) ) ), .x = 0:max(x) )"},{"path":"https://ashesitr.github.io/reservr/reference/dist_empirical.html","id":null,"dir":"Reference","previous_headings":"","what":"Empirical distribution — dist_empirical","title":"Empirical distribution — dist_empirical","text":"Creates empirical distribution object sample. Assumes iid. samples. with_params used distribution estimation relevant indicators happens construction.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_empirical.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Empirical distribution — dist_empirical","text":"","code":"dist_empirical(sample, positive = FALSE, bw = \"nrd0\")"},{"path":"https://ashesitr.github.io/reservr/reference/dist_empirical.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Empirical distribution — dist_empirical","text":"sample Sample build empirical distribution positive underlying distribution known positive? effect density estimation procedure. positive = FALSE uses kernel density estimate produced density(), positive = TRUE uses log-kernel density estimate produced logKDE::logdensity_fft(). latter can improve density estimation near zero. bw Bandwidth parameter density estimation. Passed density estimation function selected positive.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_empirical.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Empirical distribution — dist_empirical","text":"EmpiricalDistribution object.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_empirical.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Empirical distribution — dist_empirical","text":"sample() samples iid. sample. approach similar bootstrapping. density() evaluates kernel density estimate, approximating zero outside known support. estimate either obtained using stats::density logKDE::logdensity_fft, depending positive. probability() evaluates empirical cumulative density function obtained stats::ecdf. quantile() evaluates empirical quantiles using stats::quantile hazard() estimates hazard rate using density estimate empirical cumulative density function: h(t) = df(t) / (1 - cdf(t)).","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_empirical.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Empirical distribution — dist_empirical","text":"","code":"x <- rexp(20, rate = 1) dx <- dist_empirical(sample = x, positive = TRUE) y <- rnorm(20) dy <- dist_empirical(sample = y) plot_distributions( exponential = dx, normal = dy, .x = seq(-3, 3, length.out = 100) )"},{"path":"https://ashesitr.github.io/reservr/reference/dist_erlangmix.html","id":null,"dir":"Reference","previous_headings":"","what":"Erlang Mixture distribution — dist_erlangmix","title":"Erlang Mixture distribution — dist_erlangmix","text":"Erlang Mixture distribution","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_erlangmix.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Erlang Mixture distribution — dist_erlangmix","text":"","code":"dist_erlangmix(shapes, scale = NULL, probs = NULL)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_erlangmix.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Erlang Mixture distribution — dist_erlangmix","text":"shapes Shape parameters, trunc_erlangmix fit, NULL placeholder. scale Common scale parameter, NULL placeholder. probs Mixing probabilities, NULL placeholder.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_erlangmix.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Erlang Mixture distribution — dist_erlangmix","text":"ErlangMixtureDistribution object.","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_erlangmix.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Erlang Mixture distribution — dist_erlangmix","text":"","code":"params <- list(scale = 1.0, probs = list(0.5, 0.3, 0.2), shapes = list(1L, 2L, 3L)) dist <- dist_erlangmix(vector(\"list\", 3L)) x <- dist$sample(20, with_params = params) d_emp <- dist_empirical(x, positive = TRUE) plot_distributions( empirical = d_emp, theoretical = dist, with_params = list( theoretical = params ), .x = seq(1e-4, 5, length.out = 100) )"},{"path":"https://ashesitr.github.io/reservr/reference/dist_exponential.html","id":null,"dir":"Reference","previous_headings":"","what":"Exponential distribution — dist_exponential","title":"Exponential distribution — dist_exponential","text":"See stats::Exponential.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_exponential.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Exponential distribution — dist_exponential","text":"","code":"dist_exponential(rate = NULL)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_exponential.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Exponential distribution — dist_exponential","text":"rate Scalar rate parameter, NULL placeholder.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_exponential.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Exponential distribution — dist_exponential","text":"ExponentialDistribution object.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_exponential.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Exponential distribution — dist_exponential","text":"parameter can overridden with_params = list(rate = ...).","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_exponential.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Exponential distribution — dist_exponential","text":"","code":"rate <- 1 d_exp <- dist_exponential() x <- d_exp$sample(20, with_params = list(rate = rate)) d_emp <- dist_empirical(x, positive = TRUE) plot_distributions( empirical = d_emp, theoretical = d_exp, estimated = d_exp, with_params = list( theoretical = list(rate = rate), estimated = list(rate = 1 / mean(x)) ), .x = seq(1e-4, 5, length.out = 100) ) #> Warning: Removed 27 rows containing missing values or values outside the scale range #> (`geom_line()`)."},{"path":"https://ashesitr.github.io/reservr/reference/dist_gamma.html","id":null,"dir":"Reference","previous_headings":"","what":"Gamma distribution — dist_gamma","title":"Gamma distribution — dist_gamma","text":"See stats::GammaDist.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_gamma.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Gamma distribution — dist_gamma","text":"","code":"dist_gamma(shape = NULL, rate = NULL)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_gamma.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Gamma distribution — dist_gamma","text":"shape Scalar shape parameter, NULL placeholder. rate Scalar rate parameter, NULL placeholder.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_gamma.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Gamma distribution — dist_gamma","text":"GammaDistribution object.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_gamma.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Gamma distribution — dist_gamma","text":"parameters can overridden with_params = list(shape = ..., rate = ...).","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_gamma.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Gamma distribution — dist_gamma","text":"","code":"alpha <- 2 beta <- 2 d_gamma <- dist_gamma(shape = alpha, rate = beta) x <- d_gamma$sample(100) d_emp <- dist_empirical(x, positive = TRUE) plot_distributions( empirical = d_emp, theoretical = d_gamma, estimated = d_gamma, with_params = list( estimated = inflate_params( fitdistrplus::fitdist(x, distr = \"gamma\")$estimate ) ), .x = seq(1e-3, max(x), length.out = 100) )"},{"path":"https://ashesitr.github.io/reservr/reference/dist_genpareto.html","id":null,"dir":"Reference","previous_headings":"","what":"Generalized Pareto Distribution — dist_genpareto","title":"Generalized Pareto Distribution — dist_genpareto","text":"See evmix::gpd","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_genpareto.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Generalized Pareto Distribution — dist_genpareto","text":"","code":"dist_genpareto(u = NULL, sigmau = NULL, xi = NULL) dist_genpareto1(u = NULL, sigmau = NULL, xi = NULL)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_genpareto.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Generalized Pareto Distribution — dist_genpareto","text":"u Scalar location parameter, NULL placeholder. sigmau Scalar scale parameter, NULL placeholder. xi Scalar shape parameter, NULL placeholder.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_genpareto.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Generalized Pareto Distribution — dist_genpareto","text":"GeneralizedParetoDistribution object.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_genpareto.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Generalized Pareto Distribution — dist_genpareto","text":"parameters can overridden with_params = list(u = ..., sigmau = ..., xi = ...). dist_genpareto1 equivalent dist_genpareto enforces bound constraints xi [0, 1]. ensures unboundedness finite expected value unless xi == 1.0.","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_genpareto.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Generalized Pareto Distribution — dist_genpareto","text":"","code":"d_genpareto <- dist_genpareto(u = 0, sigmau = 1, xi = 1) x <- d_genpareto$sample(100) d_emp <- dist_empirical(x) d_genpareto$export_functions(\"gpd\") # so fitdistrplus finds it #> Exported `dgpd()`. #> Exported `rgpd()`. #> Exported `pgpd()`. #> Exported `qgpd()`. plot_distributions( empirical = d_emp, theoretical = d_genpareto, estimated = d_genpareto, with_params = list( estimated = fit(dist_genpareto(), x)$params ), .x = seq(0, 5, length.out = 100) )"},{"path":"https://ashesitr.github.io/reservr/reference/dist_lognormal.html","id":null,"dir":"Reference","previous_headings":"","what":"Log Normal distribution — dist_lognormal","title":"Log Normal distribution — dist_lognormal","text":"See stats::Lognormal.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_lognormal.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Log Normal distribution — dist_lognormal","text":"","code":"dist_lognormal(meanlog = NULL, sdlog = NULL)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_lognormal.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Log Normal distribution — dist_lognormal","text":"meanlog Scalar mean parameter log scale, NULL placeholder. sdlog Scalar standard deviation parameter log scale, NULL placeholder.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_lognormal.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Log Normal distribution — dist_lognormal","text":"LognormalDistribution object.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_lognormal.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Log Normal distribution — dist_lognormal","text":"parameters can overridden with_params = list(meanlog = ..., sdlog = ...).","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_lognormal.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Log Normal distribution — dist_lognormal","text":"","code":"mu <- 0 sigma <- 1 d_lnorm <- dist_lognormal(meanlog = mu, sdlog = sigma) x <- d_lnorm$sample(20) d_emp <- dist_empirical(x, positive = TRUE) plot_distributions( empirical = d_emp, theoretical = d_lnorm, estimated = d_lnorm, with_params = list( estimated = inflate_params( fitdistrplus::fitdist(x, distr = \"lnorm\")$estimate ) ), .x = seq(1e-3, 5, length.out = 100) )"},{"path":"https://ashesitr.github.io/reservr/reference/dist_mixture.html","id":null,"dir":"Reference","previous_headings":"","what":"Mixture distribution — dist_mixture","title":"Mixture distribution — dist_mixture","text":"Parameters mixing components can overridden with_params = list(dists = list(..., ..., ...)). #' Mixing probabilites can overridden with_params = list(probs = list(..., ..., ...)). number components overridden.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_mixture.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Mixture distribution — dist_mixture","text":"","code":"dist_mixture(dists = list(), probs = NULL)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_mixture.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Mixture distribution — dist_mixture","text":"dists list mixing distributions. May contain placeholders duplicates. probs list mixing probabilities length dists. normalized sum one NULL can used placeholder within probs. reduce number required parameters, probs least partly specified (probs = list(NULL, NULL, ..., 1) k - 1 NULLs k number mixing components).","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_mixture.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Mixture distribution — dist_mixture","text":"MixtureDistribution object.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_mixture.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Mixture distribution — dist_mixture","text":"support quantile() capability!","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_mixture.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Mixture distribution — dist_mixture","text":"","code":"# A complicated way to define a uniform distribution on \\[0, 2\\] dist_mixture( dists = list( dist_uniform(min = 0, max = 1), dist_uniform(min = 1, max = 2) ), probs = list(0.5, 0.5) ) #> A Mixture with 0 dof"},{"path":"https://ashesitr.github.io/reservr/reference/dist_negbinomial.html","id":null,"dir":"Reference","previous_headings":"","what":"Negative binomial Distribution — dist_negbinomial","title":"Negative binomial Distribution — dist_negbinomial","text":"See stats::NegBinomial","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_negbinomial.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Negative binomial Distribution — dist_negbinomial","text":"","code":"dist_negbinomial(size = NULL, mu = NULL)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_negbinomial.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Negative binomial Distribution — dist_negbinomial","text":"size Number successful trials parameter, NULL placeholder. Non-integer values > 0 allowed. mu Mean parameter, NULL placeholder.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_negbinomial.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Negative binomial Distribution — dist_negbinomial","text":"NegativeBinomialDistribution object.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_negbinomial.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Negative binomial Distribution — dist_negbinomial","text":"parameters can overridden with_params = list(size = ..., prob = ...).","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_negbinomial.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Negative binomial Distribution — dist_negbinomial","text":"","code":"d_nbinom <- dist_negbinomial(size = 3.5, mu = 8.75) x <- d_nbinom$sample(100) d_emp <- dist_empirical(x) plot_distributions( empirical = d_emp, theoretical = d_nbinom, estimated = d_nbinom, with_params = list( estimated = inflate_params( fitdistrplus::fitdist(x, distr = \"nbinom\")$estimate ) ), .x = 0:max(x) )"},{"path":"https://ashesitr.github.io/reservr/reference/dist_normal.html","id":null,"dir":"Reference","previous_headings":"","what":"Normal distribution — dist_normal","title":"Normal distribution — dist_normal","text":"See stats::Normal.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_normal.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Normal distribution — dist_normal","text":"","code":"dist_normal(mean = NULL, sd = NULL)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_normal.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Normal distribution — dist_normal","text":"mean Scalar mean parameter, NULL placeholder. sd Scalar standard deviation parameter, NULL placeholder.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_normal.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Normal distribution — dist_normal","text":"NormalDistribution object.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_normal.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Normal distribution — dist_normal","text":"parameters can overridden with_params = list(mean = ..., sd = ...).","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_normal.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Normal distribution — dist_normal","text":"","code":"mu <- 0 sigma <- 1 d_norm <- dist_normal(mean = mu, sd = sigma) x <- d_norm$sample(20) d_emp <- dist_empirical(x) plot_distributions( empirical = d_emp, theoretical = d_norm, estimated = d_norm, with_params = list( estimated = list(mean = mean(x), sd = sd(x)) ), .x = seq(-3, 3, length.out = 100) )"},{"path":"https://ashesitr.github.io/reservr/reference/dist_pareto.html","id":null,"dir":"Reference","previous_headings":"","what":"Pareto Distribution — dist_pareto","title":"Pareto Distribution — dist_pareto","text":"See Pareto","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_pareto.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Pareto Distribution — dist_pareto","text":"","code":"dist_pareto(shape = NULL, scale = NULL)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_pareto.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Pareto Distribution — dist_pareto","text":"shape Scalar shape parameter, NULL placeholder. scale Scalar scale parameter, NULL placeholder.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_pareto.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Pareto Distribution — dist_pareto","text":"ParetoDistribution object.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_pareto.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Pareto Distribution — dist_pareto","text":"parameters can overridden with_params = list(shape = ..., scale = ...).","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_pareto.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Pareto Distribution — dist_pareto","text":"","code":"d_pareto <- dist_pareto(shape = 3, scale = 1) x <- d_pareto$sample(100) d_emp <- dist_empirical(x) plot_distributions( empirical = d_emp, theoretical = d_pareto, estimated = d_pareto, with_params = list( estimated = inflate_params( fitdistrplus::fitdist(x, distr = \"pareto\")$estimate ) ), .x = seq(0, 2, length.out = 100) ) #> Warning: The dpareto function should return a vector of with NaN values when input has inconsistent values and not raise an error"},{"path":"https://ashesitr.github.io/reservr/reference/dist_poisson.html","id":null,"dir":"Reference","previous_headings":"","what":"Poisson Distribution — dist_poisson","title":"Poisson Distribution — dist_poisson","text":"See stats::Poisson","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_poisson.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Poisson Distribution — dist_poisson","text":"","code":"dist_poisson(lambda = NULL)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_poisson.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Poisson Distribution — dist_poisson","text":"lambda Scalar rate parameter, NULL placeholder.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_poisson.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Poisson Distribution — dist_poisson","text":"PoissonDistribution object.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_poisson.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Poisson Distribution — dist_poisson","text":"parameter can overridden with_params = list(lambda = ...).","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_poisson.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Poisson Distribution — dist_poisson","text":"","code":"d_pois <- dist_poisson(lambda = 5.0) x <- d_pois$sample(100) d_emp <- dist_empirical(x) plot_distributions( empirical = d_emp, theoretical = d_pois, estimated = d_pois, with_params = list( estimated = inflate_params( fitdistrplus::fitdist(x, distr = \"pois\")$estimate ) ), .x = 0:max(x) )"},{"path":"https://ashesitr.github.io/reservr/reference/dist_translate.html","id":null,"dir":"Reference","previous_headings":"","what":"Tranlsated distribution — dist_translate","title":"Tranlsated distribution — dist_translate","text":"Tranlsated distribution","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_translate.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Tranlsated distribution — dist_translate","text":"","code":"dist_translate(dist = NULL, offset = NULL, multiplier = 1)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_translate.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Tranlsated distribution — dist_translate","text":"dist underlying distribution, NULL placeholder. offset Offset added observation, NULL placeholder. multiplier Factor multiply observation , NULL placeholder.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_translate.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Tranlsated distribution — dist_translate","text":"TranslatedDistribution object.","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_translate.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Tranlsated distribution — dist_translate","text":"","code":"d_norm <- dist_normal(mean = 0, sd = 1) d_tnorm <- dist_translate(dist = d_norm, offset = 1) plot_distributions(d_norm, d_tnorm, .x = seq(-2, 3, length.out = 100))"},{"path":"https://ashesitr.github.io/reservr/reference/dist_trunc.html","id":null,"dir":"Reference","previous_headings":"","what":"Truncated distribution — dist_trunc","title":"Truncated distribution — dist_trunc","text":"Truncated distribution","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_trunc.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Truncated distribution — dist_trunc","text":"","code":"dist_trunc(dist = NULL, min = NULL, max = NULL, offset = 0, max_retry = 100)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_trunc.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Truncated distribution — dist_trunc","text":"dist underlying distribution, NULL placeholder. min Minimum value truncate (exclusive), NULL placeholder. max Maxmimum value truncate (inclusive), NULL placeholder. offset Offset added observation truncation, NULL placeholder. Truncation dist occur (min, max]. offset added deterministically. max_retry Maximum number resample attempts trying sample rejection.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_trunc.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Truncated distribution — dist_trunc","text":"TruncatedDistribution object.","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_trunc.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Truncated distribution — dist_trunc","text":"","code":"d_norm <- dist_normal(mean = 0, sd = 1) d_tnorm <- dist_trunc(dist = d_norm, min = -2, max = 2, offset = 1) plot_distributions(d_norm, d_tnorm, .x = seq(-2, 3, length.out = 100))"},{"path":"https://ashesitr.github.io/reservr/reference/dist_uniform.html","id":null,"dir":"Reference","previous_headings":"","what":"Uniform distribution — dist_uniform","title":"Uniform distribution — dist_uniform","text":"See stats::Uniform","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_uniform.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Uniform distribution — dist_uniform","text":"","code":"dist_uniform(min = NULL, max = NULL)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_uniform.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Uniform distribution — dist_uniform","text":"min Lower limit, NULL placeholder. max Upper limit, NULL placeholder.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_uniform.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Uniform distribution — dist_uniform","text":"UniformDistribution object.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_uniform.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Uniform distribution — dist_uniform","text":"parameters can overridden with_params = list(min = ..., max = ...).","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_uniform.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Uniform distribution — dist_uniform","text":"","code":"d_unif <- dist_uniform(min = 0, max = 1) x <- d_unif$sample(100) d_emp <- dist_empirical(x) plot_distributions( empirical = d_emp, theoretical = d_unif, estimated = d_unif, with_params = list( estimated = inflate_params( fitdistrplus::fitdist(x, distr = \"unif\")$estimate ) ), .x = seq(0, 1, length.out = 100) ) #> Warning: Removed 2 rows containing missing values or values outside the scale range #> (`geom_line()`)."},{"path":"https://ashesitr.github.io/reservr/reference/dist_weibull.html","id":null,"dir":"Reference","previous_headings":"","what":"Weibull Distribution — dist_weibull","title":"Weibull Distribution — dist_weibull","text":"See stats::Weibull","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_weibull.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Weibull Distribution — dist_weibull","text":"","code":"dist_weibull(shape = NULL, scale = NULL)"},{"path":"https://ashesitr.github.io/reservr/reference/dist_weibull.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Weibull Distribution — dist_weibull","text":"shape Scalar shape parameter, NULL placeholder. scale Scalar scale parameter, NULL placeholder.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_weibull.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Weibull Distribution — dist_weibull","text":"WeibullDistribution object.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/dist_weibull.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Weibull Distribution — dist_weibull","text":"parameters can overridden with_params = list(shape = ..., scale = ...).","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/dist_weibull.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Weibull Distribution — dist_weibull","text":"","code":"d_weibull <- dist_weibull(shape = 3, scale = 1) x <- d_weibull$sample(100) d_emp <- dist_empirical(x) plot_distributions( empirical = d_emp, theoretical = d_weibull, estimated = d_weibull, with_params = list( estimated = inflate_params( fitdistrplus::fitdist(x, distr = \"weibull\")$estimate ) ), .x = seq(0, 2, length.out = 100) )"},{"path":"https://ashesitr.github.io/reservr/reference/fit.reservr_keras_model.html","id":null,"dir":"Reference","previous_headings":"","what":"Fit a neural network based distribution model to data — fit.reservr_keras_model","title":"Fit a neural network based distribution model to data — fit.reservr_keras_model","text":"function delegates work keras3::fit.keras.src.models.model.Model() performs additional consistency checks make sure tf_compile_model() called appropriate options support fitting observations y well automatically converting y n x 6 matrix needed compiled loss function.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/fit.reservr_keras_model.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Fit a neural network based distribution model to data — fit.reservr_keras_model","text":"","code":"# S3 method for reservr_keras_model fit( object, x, y, batch_size = NULL, epochs = 10, verbose = getOption(\"keras.fit_verbose\", default = 1), callbacks = NULL, view_metrics = getOption(\"keras.view_metrics\", default = \"auto\"), validation_split = 0, validation_data = NULL, shuffle = TRUE, class_weight = NULL, sample_weight = NULL, initial_epoch = 0, steps_per_epoch = NULL, validation_steps = NULL, ... )"},{"path":"https://ashesitr.github.io/reservr/reference/fit.reservr_keras_model.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Fit a neural network based distribution model to data — fit.reservr_keras_model","text":"object compiled reservr_keras_model obtained tf_compile_model(). x list input tensors (predictors) y trunc_obs tibble observed outcomes, something convertible via as_trunc_obs(). batch_size Integer NULL. Number samples per gradient update. unspecified, batch_size default 32. specify batch_size data form TF Datasets generators, (since generate batches). epochs Integer. Number epochs train model. epoch iteration entire x y data provided (unless steps_per_epoch flag set something NULL). Note conjunction initial_epoch, epochs understood \"final epoch\". model trained number iterations given epochs, merely epoch index epochs reached. verbose \"auto\", 0, 1, 2. Verbosity mode. 0 = silent, 1 = progress bar, 2 = one line per epoch. \"auto\" becomes 1 cases, 2 knitr render running distributed training server. Note progress bar particularly useful logged file, verbose=2 recommended running interactively (e.g., production environment). Defaults \"auto\". callbacks List Callback() instances. List callbacks apply training. See callback_*. view_metrics View realtime plot training metrics (epoch). default (\"auto\") display plot running within RStudio, metrics specified model compile(), epochs > 1 verbose > 0. Set global options(keras.view_metrics = ) option establish different default. validation_split Float 0 1. Fraction training data used validation data. model set apart fraction training data, train , evaluate loss model metrics data end epoch. validation data selected last samples x y data provided, shuffling. argument supported x TF Dataset generator. validation_data validation_split provided, validation_data override validation_split. validation_data Data evaluate loss model metrics end epoch. model trained data. Thus, note fact validation loss data provided using validation_split validation_data affected regularization layers like noise dropout. validation_data override validation_split. : tuple (x_val, y_val) arrays tensors. tuple (x_val, y_val, val_sample_weights) arrays. generator returning (inputs, targets) (inputs, targets, sample_weights). shuffle Boolean, whether shuffle training data epoch. argument ignored x generator TF Dataset. class_weight Optional named list mapping class indices (integers, 0-based) weight (float) value, used weighting loss function (training ). can useful tell model \"pay attention\" samples -represented class. class_weight specified targets rank 2 greater, either y must one-hot encoded, explicit final dimension 1 must included sparse class labels. sample_weight Optional array weights training samples, used weighting loss function (training ). can either pass flat (1D) array/vector length input samples (1:1 mapping weights samples), case temporal data, can pass 2D array (matrix) shape (samples, sequence_length), apply different weight every timestep every sample. argument supported x TF Dataset generator, instead provide sample_weights third element x. Note sample weighting apply metrics specified via metrics argument compile(). apply sample weighting metrics, can specify via weighted_metrics compile() instead. initial_epoch Integer. Epoch start training (useful resuming previous training run). steps_per_epoch Integer NULL. Total number steps (batches samples) declaring one epoch finished starting next epoch. training input tensors backend-native tensors, default NULL equal number samples dataset divided batch size, 1 determined. x TF Dataset, steps_per_epoch NULL, epoch run input dataset exhausted. passing infinitely repeating dataset, must specify steps_per_epoch argument. steps_per_epoch = -1 training run indefinitely infinitely repeating dataset. validation_steps relevant validation_data provided. Total number steps (batches samples) draw stopping performing validation end every epoch. validation_steps NULL, validation run validation_data dataset exhausted. case infinitely repeated dataset, run infinite loop. validation_steps specified part dataset consumed, evaluation start beginning dataset epoch. ensures validation samples used every time. ... Unused. old arguments supplied, error message raised informing fix issue.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/fit.reservr_keras_model.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Fit a neural network based distribution model to data — fit.reservr_keras_model","text":"history object contains information collected training. model object updated -place side-effect.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/fit.reservr_keras_model.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Fit a neural network based distribution model to data — fit.reservr_keras_model","text":"Additionally, default batch_size min(nrow(y), 10000) instead keras default 32 latter bad choice fitting distributions since involved loss much less stable typical losses used machine learning, leading divergence small batch sizes.","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/fit.reservr_keras_model.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Fit a neural network based distribution model to data — fit.reservr_keras_model","text":"","code":"dist <- dist_exponential() params <- list(rate = 1.0) N <- 100L rand_input <- runif(N) x <- dist$sample(N, with_params = params) if (interactive()) { tf_in <- keras3::layer_input(1L) mod <- tf_compile_model( inputs = list(tf_in), intermediate_output = tf_in, dist = dist, optimizer = keras3::optimizer_adam(), censoring = FALSE, truncation = FALSE ) tf_fit <- fit( object = mod, x = k_matrix(rand_input), y = x, epochs = 10L, callbacks = list( callback_debug_dist_gradients(mod, k_matrix(rand_input), x, keep_grads = TRUE) ) ) }"},{"path":"https://ashesitr.github.io/reservr/reference/fit_blended.html","id":null,"dir":"Reference","previous_headings":"","what":"Fit a Blended mixture using an ECME-Algorithm — fit_blended","title":"Fit a Blended mixture using an ECME-Algorithm — fit_blended","text":"Fit Blended mixture using ECME-Algorithm","code":""},{"path":"https://ashesitr.github.io/reservr/reference/fit_blended.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Fit a Blended mixture using an ECME-Algorithm — fit_blended","text":"","code":"fit_blended( dist, obs, start, min_iter = 0L, max_iter = 100L, skip_first_e = FALSE, tolerance = 1e-05, trace = FALSE, ... )"},{"path":"https://ashesitr.github.io/reservr/reference/fit_blended.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Fit a Blended mixture using an ECME-Algorithm — fit_blended","text":"dist BlendedDistribution. assumed, breaks bandwidths placeholder weights estimated. obs Set observations produced trunc_obs() convertible via as_trunc_obs(). start Initial values placeholder parameters. missing, starting values obtained fit_dist_start(). min_iter Minimum number EM-Iterations max_iter Maximum number EM-Iterations (weight updates) skip_first_e Skip first E-Step (update Probability weights)? can help initial values cause mixture component vanish first E-Step starting values can improved. tolerance Numerical tolerance. trace Include tracing information output? TRUE, additional tracing information added result list. ... Passed fit_dist_start() start missing.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/fit_blended.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Fit a Blended mixture using an ECME-Algorithm — fit_blended","text":"list elements params fitted parameters structure init. params_hist (trace TRUE) history parameters (e- m- step) iter number outer EM-iterations logLik final log-likelihood","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/fit_blended.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Fit a Blended mixture using an ECME-Algorithm — fit_blended","text":"","code":"dist <- dist_blended( list( dist_exponential(), dist_genpareto() ) ) params <- list( probs = list(0.9, 0.1), dists = list( list(rate = 2.0), list(u = 1.5, xi = 0.2, sigmau = 1.0) ), breaks = list(1.5), bandwidths = list(0.3) ) x <- dist$sample(100L, with_params = params) dist$default_params$breaks <- params$breaks dist$default_params$bandwidths <- params$bandwidths if (interactive()) { fit_blended(dist, x) }"},{"path":"https://ashesitr.github.io/reservr/reference/fit_dist.html","id":null,"dir":"Reference","previous_headings":"","what":"Fit a general distribution to observations — fit_dist","title":"Fit a general distribution to observations — fit_dist","text":"default implementation performs maximum likelihood estimation placeholder parameters.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/fit_dist.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Fit a general distribution to observations — fit_dist","text":"","code":"fit_dist(dist, obs, start, ...) fit_dist_direct(dist, obs, start, ..., .start_with_default = FALSE) # S3 method for Distribution fit(object, obs, start, ...)"},{"path":"https://ashesitr.github.io/reservr/reference/fit_dist.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Fit a general distribution to observations — fit_dist","text":"dist Distribution object. obs Set observations produced trunc_obs() convertible via as_trunc_obs(). start Initial values placeholder parameters. missing, starting values obtained fit_dist_start(). ... Distribution-specific arguments fitting procedure .start_with_default directly optimising likelihood, use optimised algorithm finding better starting values? object parameter dist","code":""},{"path":"https://ashesitr.github.io/reservr/reference/fit_dist.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Fit a general distribution to observations — fit_dist","text":"list least elements params fitted parameters structure init. logLik final log-likelihood Additional information may provided depending dist.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/fit_dist.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Fit a general distribution to observations — fit_dist","text":"Erlang mixture distributions Mixture distributions, EM-Algorithm instead used improve stability. fit() fit_dist() chose optimisation method optimized specific distribution given. fit_dist_direct() can used force direct maximisation likelihood.","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/fit_dist.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Fit a general distribution to observations — fit_dist","text":"","code":"x <- rexp(100) lambda_hat <- 1 / mean(x) lambda_hat2 <- fit_dist(dist_exponential(), x)$params$rate identical(lambda_hat, lambda_hat2) #> [1] TRUE dist <- dist_mixture(list(dist_normal(), dist_translate(dist_exponential(), offset = 6))) params <- list( dists = list(list(mean = 5, sd = 1), list(dist = list(rate = 1))), probs = list(0.95, 0.05) ) set.seed(2000) u <- runif(100, 10, 20) x <- dist$sample(100, with_params = params) obs <- trunc_obs(x = x[x <= u], tmin = -Inf, tmax = u[x <= u]) default_fit <- fit_dist(dist, obs) direct_fit <- fit_dist_direct(dist, obs) # NB: direct optimisation steps with pre-run take a few seconds # \\donttest{ direct_fit_init <- fit_dist_direct(dist, obs, start = default_fit$params) direct_fit_auto_init <- fit_dist_direct(dist, obs, .start_with_default = TRUE) stopifnot(direct_fit_init$logLik == direct_fit_auto_init$logLik) c(default_fit$logLik, direct_fit$logLik, direct_fit_init$logLik) #> [1] -153.0052 -153.0052 -153.0052 # }"},{"path":"https://ashesitr.github.io/reservr/reference/fit_dist_start.html","id":null,"dir":"Reference","previous_headings":"","what":"Find starting values for distribution parameters — fit_dist_start.MixtureDistribution","title":"Find starting values for distribution parameters — fit_dist_start.MixtureDistribution","text":"Find starting values distribution parameters","code":""},{"path":"https://ashesitr.github.io/reservr/reference/fit_dist_start.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Find starting values for distribution parameters — fit_dist_start.MixtureDistribution","text":"","code":"# S3 method for MixtureDistribution fit_dist_start(dist, obs, dists_start = NULL, ...) fit_dist_start(dist, obs, ...)"},{"path":"https://ashesitr.github.io/reservr/reference/fit_dist_start.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Find starting values for distribution parameters — fit_dist_start.MixtureDistribution","text":"dist Distribution object. obs Observations fit . dists_start List initial parameters component distributions. left empty, initialisation automatically performed using fit_dist_start() observations support respective component. ... Additional arguments initialisation procedure","code":""},{"path":"https://ashesitr.github.io/reservr/reference/fit_dist_start.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Find starting values for distribution parameters — fit_dist_start.MixtureDistribution","text":"list initial parameters suitable passing fit_dist().","code":""},{"path":"https://ashesitr.github.io/reservr/reference/fit_dist_start.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Find starting values for distribution parameters — fit_dist_start.MixtureDistribution","text":"","code":"fit_dist_start(dist_exponential(), rexp(100)) #> $rate #> [1] 1.258531 #>"},{"path":"https://ashesitr.github.io/reservr/reference/fit_erlang_mixture.html","id":null,"dir":"Reference","previous_headings":"","what":"Fit an Erlang mixture using an ECME-Algorithm — fit_erlang_mixture","title":"Fit an Erlang mixture using an ECME-Algorithm — fit_erlang_mixture","text":"Fit Erlang mixture using ECME-Algorithm","code":""},{"path":"https://ashesitr.github.io/reservr/reference/fit_erlang_mixture.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Fit an Erlang mixture using an ECME-Algorithm — fit_erlang_mixture","text":"","code":"fit_erlang_mixture( dist, obs, start, min_iter = 0L, max_iter = 100L, skip_first_e = FALSE, tolerance = 1e-05, trace = FALSE, parallel = FALSE, ... )"},{"path":"https://ashesitr.github.io/reservr/reference/fit_erlang_mixture.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Fit an Erlang mixture using an ECME-Algorithm — fit_erlang_mixture","text":"dist ErlangMixtureDistribution. assumed, probs scale estimated. obs Set observations produced trunc_obs() convertible via as_trunc_obs(). start Initial values placeholder parameters. missing, starting values obtained fit_dist_start(). min_iter Minimum number EM-Iterations max_iter Maximum number EM-Iterations (weight updates) skip_first_e Skip first E-Step (update Probability weights)? can help initial values cause mixture component vanish first E-Step starting values can improved. tolerance Numerical tolerance. trace Include tracing information output? TRUE, additional tracing information added result list. parallel Enable experimental parallel evaluation expected log-likelihood? ... Passed fit_dist_start() start missing.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/fit_erlang_mixture.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Fit an Erlang mixture using an ECME-Algorithm — fit_erlang_mixture","text":"list elements params fitted parameters structure init. params_hist (trace TRUE) history parameters (e- m- step). Otherwise empty list. iter number outer EM-iterations logLik final log-likelihood","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/fit_erlang_mixture.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Fit an Erlang mixture using an ECME-Algorithm — fit_erlang_mixture","text":"","code":"dist <- dist_erlangmix(list(NULL, NULL, NULL)) params <- list( shapes = list(1L, 4L, 12L), scale = 2.0, probs = list(0.5, 0.3, 0.2) ) x <- dist$sample(100L, with_params = params) fit_erlang_mixture(dist, x, init = \"kmeans\") #> $params #> $params$probs #> $params$probs[[1]] #> [1] 0.31 #> #> $params$probs[[2]] #> [1] 0.43 #> #> $params$probs[[3]] #> [1] 0.26 #> #> #> $params$shapes #> $params$shapes[[1]] #> [1] 1 #> #> $params$shapes[[2]] #> [1] 4 #> #> $params$shapes[[3]] #> [1] 13 #> #> #> $params$scale #> [1] 1.686607 #> #> #> $params_hist #> list() #> #> $iter #> [1] 1 #> #> $logLik #> 'log Lik.' -310.162 (df=6) #>"},{"path":"https://ashesitr.github.io/reservr/reference/fit_mixture.html","id":null,"dir":"Reference","previous_headings":"","what":"Fit a generic mixture using an ECME-Algorithm — fit_mixture","title":"Fit a generic mixture using an ECME-Algorithm — fit_mixture","text":"Fit generic mixture using ECME-Algorithm","code":""},{"path":"https://ashesitr.github.io/reservr/reference/fit_mixture.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Fit a generic mixture using an ECME-Algorithm — fit_mixture","text":"","code":"fit_mixture( dist, obs, start, min_iter = 0L, max_iter = 100L, skip_first_e = FALSE, tolerance = 1e-05, trace = FALSE, ... )"},{"path":"https://ashesitr.github.io/reservr/reference/fit_mixture.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Fit a generic mixture using an ECME-Algorithm — fit_mixture","text":"dist MixtureDistribution specifying structure mixture. Free parameters optimised. dominating measure likelihoods must constant, example dist_dirac() may point parameter free. obs Set observations produced trunc_obs() convertible via as_trunc_obs(). start Initial values placeholder parameters. missing, starting values obtained fit_dist_start(). min_iter Minimum number EM-Iterations max_iter Maximum number EM-Iterations (weight updates) skip_first_e Skip first E-Step (update Probability weights)? can help initial values cause mixture component vanish first E-Step starting values can improved. tolerance Numerical tolerance. trace Include tracing information output? TRUE, additional tracing information added result list. ... Passed fit_dist_start() start missing.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/fit_mixture.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Fit a generic mixture using an ECME-Algorithm — fit_mixture","text":"list elements params fitted parameters structure init. params_hist (trace TRUE) history parameters (e- m- step) iter number outer EM-iterations logLik final log-likelihood","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/fit_mixture.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Fit a generic mixture using an ECME-Algorithm — fit_mixture","text":"","code":"dist <- dist_mixture( list( dist_dirac(0.0), dist_exponential() ) ) params <- list( probs = list(0.1, 0.9), dists = list( list(), list(rate = 1.0) ) ) x <- dist$sample(100L, with_params = params) fit_mixture(dist, x) #> $params #> $params$dists #> $params$dists[[1]] #> list() #> #> $params$dists[[2]] #> $params$dists[[2]]$rate #> [1] 0.8578941 #> #> #> #> $params$probs #> $params$probs[[1]] #> [1] 0.11 #> #> $params$probs[[2]] #> [1] 0.89 #> #> #> #> $iter #> [1] 1 #> #> $logLik #> 'log Lik.' -137.293 (df=2) #>"},{"path":"https://ashesitr.github.io/reservr/reference/flatten_params.html","id":null,"dir":"Reference","previous_headings":"","what":"Flatten / Inflate parameter lists / vectors — flatten_params","title":"Flatten / Inflate parameter lists / vectors — flatten_params","text":"Flatten / Inflate parameter lists / vectors","code":""},{"path":"https://ashesitr.github.io/reservr/reference/flatten_params.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Flatten / Inflate parameter lists / vectors — flatten_params","text":"","code":"flatten_params(params) flatten_params_matrix(params) flatten_bounds(bounds) inflate_params(flat_params)"},{"path":"https://ashesitr.github.io/reservr/reference/flatten_params.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Flatten / Inflate parameter lists / vectors — flatten_params","text":"params named list parameters flattened. form passed with_params argument distribution functions. bounds List parameter bounds returned dist$get_param_bounds() flat_params named numeric vector parameters","code":""},{"path":"https://ashesitr.github.io/reservr/reference/flatten_params.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Flatten / Inflate parameter lists / vectors — flatten_params","text":"flatten_params returns 'flattened' vector parameters. intended adapter multi-dimensional optimisation functions distribution objects. flatten_params_matrix returns 'flattened' matrix parameters. intended adapter multi-dimensional optimisation functions distribution objects. column corresponds one input element. flatten_bounds returns named list vectors names lower upper. Containing upper lower bounds parameter. inflate_params returns 'inflated' list parameters. can passed with_params argument distribution functions.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/flatten_params.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Flatten / Inflate parameter lists / vectors — flatten_params","text":"","code":"library(ggplot2) mm <- dist_mixture(list( dist_exponential(NULL), dist_lognormal(0.5, NULL) ), list(NULL, 1)) ph <- mm$get_placeholders() ph_flat <- flatten_params(ph) ph_reinflated <- inflate_params(ph_flat) ph_flat[] <- c(1, 1, 6) ph_sample <- inflate_params(ph_flat) x <- mm$sample( 100, with_params = ph_sample ) emp_cdf <- ecdf(x) ggplot(data.frame(t = seq(from = min(x), to = max(x), length.out = 100))) %+% geom_point(aes(x = t, y = emp_cdf(t))) %+% geom_line(aes(x = t, y = mm$probability(t, with_params = ph_sample)), linetype = 2)"},{"path":"https://ashesitr.github.io/reservr/reference/integrate_gk.html","id":null,"dir":"Reference","previous_headings":"","what":"Adaptive Gauss-Kronrod Quadrature for multiple limits — integrate_gk","title":"Adaptive Gauss-Kronrod Quadrature for multiple limits — integrate_gk","text":"Integrates fun bounds [ lower, upper ] vectorized lower upper. Vectorized list structures parameters can also passed.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/integrate_gk.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Adaptive Gauss-Kronrod Quadrature for multiple limits — integrate_gk","text":"","code":"integrate_gk( fun, lower, upper, params = list(), .tolerance = .Machine$double.eps^0.25, .max_iter = 100L )"},{"path":"https://ashesitr.github.io/reservr/reference/integrate_gk.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Adaptive Gauss-Kronrod Quadrature for multiple limits — integrate_gk","text":"fun function integrate. Must vectorized take one two arguments, first points evaluate second (optionally) parameters apply. must return numeric vector length first input. Currently, infinite bounds supported. lower, upper Integration bounds. Must length. params Parameters pass second argument fun. actual parameters must length number integrals compute. Can possibly nested list structures containing numeric vectors. Alternatively, can matrix number rows number integrals compute. .tolerance Absolute element-wise tolerance. .max_iter Maximum number iterations. number integration intervals length(lower) * .max_iter. Therefor maximum number function evaluations per integration interval 15 * .max_iter.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/integrate_gk.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Adaptive Gauss-Kronrod Quadrature for multiple limits — integrate_gk","text":"vector integrals -th entry containing approximation integral fun(t, pick_params_at(params, )) dt interval lower[] upper[]","code":""},{"path":"https://ashesitr.github.io/reservr/reference/integrate_gk.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Adaptive Gauss-Kronrod Quadrature for multiple limits — integrate_gk","text":"integration error estimated Gauss-Kronrod quadrature absolute difference 7-point quadrature 15-point quadrature. Integrals converge bisected midpoint. params object recursively subsetted numeric vectors length number observations.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/integrate_gk.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Adaptive Gauss-Kronrod Quadrature for multiple limits — integrate_gk","text":"","code":"# Argument recycling and parallel integration of two intervals integrate_gk(sin, 0, c(pi, 2 * pi)) #> [1] 2.000000e+00 -3.141135e-16 dist <- dist_exponential() integrate_gk( function(x, p) dist$density(x, with_params = p), lower = 0, upper = 1:10, params = list(rate = 1 / 1:10) ) #> [1] 0.6321206 0.6321206 0.6321206 0.6321206 0.6321206 0.6321206 0.6321206 #> [8] 0.6321206 0.6321206 0.6321206 dist$probability(1:10, with_params = list(rate = 1 / 1:10)) #> [1] 0.6321206 0.6321206 0.6321206 0.6321206 0.6321206 0.6321206 0.6321206 #> [8] 0.6321206 0.6321206 0.6321206"},{"path":"https://ashesitr.github.io/reservr/reference/interval-operations.html","id":null,"dir":"Reference","previous_headings":"","what":"Convex union and intersection of intervals — interval-operations","title":"Convex union and intersection of intervals — interval-operations","text":"Convex union intersection intervals","code":""},{"path":"https://ashesitr.github.io/reservr/reference/interval-operations.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Convex union and intersection of intervals — interval-operations","text":"","code":"interval_union(..., intervals = list()) interval_intersection(..., intervals = list())"},{"path":"https://ashesitr.github.io/reservr/reference/interval-operations.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Convex union and intersection of intervals — interval-operations","text":"... appened intervals present. intervals list Intervals.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/interval-operations.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Convex union and intersection of intervals — interval-operations","text":"interval_union returns convex union intervals intervals. smallest interval completely containing intervals. interval_intersection returns set intersection intervals intervals. empty set represented open interval (0, 0).","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/interval-operations.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Convex union and intersection of intervals — interval-operations","text":"","code":"interval_union( interval(c(0, 1), closed = TRUE), interval(c(1, 2)) ) #> [0, 2) interval_union( interval(c(0, 5)), interval(c(1, 4), closed = TRUE) ) #> (0, 5) # Convex union is not equal to set union: interval_union( interval(c(0, 1)), interval(c(2, 3)) ) #> (0, 3) # The empty union is {} interval_union() #> {} interval_intersection( interval(c(0, 1)), interval(c(0.5, 2)) ) #> (0.5, 1) interval_intersection( interval(c(0, Inf)), interval(c(-Inf, 0)) ) #> {} interval_intersection( interval(c(0, Inf), include_lowest = TRUE), interval(c(-Inf, 0), include_highest = TRUE) ) #> {0} interval_intersection( interval(c(0, 5)), interval(c(1, 6), closed = TRUE) ) #> [1, 5) # The empty intersection is (-Inf, Inf) interval_intersection() #> (-Inf, Inf)"},{"path":"https://ashesitr.github.io/reservr/reference/interval.html","id":null,"dir":"Reference","previous_headings":"","what":"Intervals — interval","title":"Intervals — interval","text":"Intervals","code":""},{"path":"https://ashesitr.github.io/reservr/reference/interval.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Intervals — interval","text":"","code":"interval( range = c(-Inf, Inf), ..., include_lowest = closed, include_highest = closed, closed = FALSE, integer = FALSE, read_only = FALSE ) is.Interval(x)"},{"path":"https://ashesitr.github.io/reservr/reference/interval.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Intervals — interval","text":"range interval boundaries sorted two-element numeric vector. ... First argument used endpoint range length 1. Additional arguments, range length 2, cause warning ignored. include_lowest lower boundary part interval? include_highest upper boundary part interval? closed interval closed? integer interval integers? read_only Make interval object read-? x object.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/interval.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Intervals — interval","text":"interval returns Interval. .Interval returns TRUE x Interval, FALSE otherwise.","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/interval.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Intervals — interval","text":"","code":"# The real line interval() #> (-Inf, Inf) # Closed unit interval interval(c(0, 1), closed = TRUE) #> [0, 1] # Alternative form interval(0, 1, closed = TRUE) #> [0, 1] # Non-negative real line interval(c(0, Inf), include_lowest = TRUE) #> [0, Inf)"},{"path":"https://ashesitr.github.io/reservr/reference/is.Distribution.html","id":null,"dir":"Reference","previous_headings":"","what":"Test if object is a Distribution — is.Distribution","title":"Test if object is a Distribution — is.Distribution","text":"Test object Distribution","code":""},{"path":"https://ashesitr.github.io/reservr/reference/is.Distribution.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Test if object is a Distribution — is.Distribution","text":"","code":"is.Distribution(object)"},{"path":"https://ashesitr.github.io/reservr/reference/is.Distribution.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Test if object is a Distribution — is.Distribution","text":"object R object.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/is.Distribution.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Test if object is a Distribution — is.Distribution","text":"TRUE object Distribution, FALSE otherwise.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/is.Distribution.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Test if object is a Distribution — is.Distribution","text":"","code":"is.Distribution(dist_dirac()) #> [1] TRUE"},{"path":"https://ashesitr.github.io/reservr/reference/k_matrix.html","id":null,"dir":"Reference","previous_headings":"","what":"Cast to a TensorFlow matrix — k_matrix","title":"Cast to a TensorFlow matrix — k_matrix","text":"Cast TensorFlow matrix","code":""},{"path":"https://ashesitr.github.io/reservr/reference/k_matrix.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Cast to a TensorFlow matrix — k_matrix","text":"","code":"k_matrix(x, dtype = NULL)"},{"path":"https://ashesitr.github.io/reservr/reference/k_matrix.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Cast to a TensorFlow matrix — k_matrix","text":"x Numeric object converted matrix Tensor. dtype Type elements resulting tensor. Defaults keras3::config_floatx().","code":""},{"path":"https://ashesitr.github.io/reservr/reference/k_matrix.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Cast to a TensorFlow matrix — k_matrix","text":"two-dimensional tf.Tensor values x. shape (nrow(x), ncol(x)) x first converted R matrix via .matrix().","code":""},{"path":"https://ashesitr.github.io/reservr/reference/k_matrix.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Cast to a TensorFlow matrix — k_matrix","text":"","code":"if (interactive()) { k_matrix(diag(1:3)) k_matrix(diag(1:3), dtype = \"int32\") # Vectors are converted to columns: k_matrix(1:3) }"},{"path":"https://ashesitr.github.io/reservr/reference/plot_distributions.html","id":null,"dir":"Reference","previous_headings":"","what":"Plot several distributions — plot_distributions","title":"Plot several distributions — plot_distributions","text":"Plot several distributions","code":""},{"path":"https://ashesitr.github.io/reservr/reference/plot_distributions.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Plot several distributions — plot_distributions","text":"","code":"plot_distributions( ..., distributions = list(), .x, plots = c(\"density\", \"probability\", \"hazard\"), with_params = list(), as_list = FALSE )"},{"path":"https://ashesitr.github.io/reservr/reference/plot_distributions.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Plot several distributions — plot_distributions","text":"... distribution objects (must named) distributions Named list distribution objects. concatenated .... .x Numeric vector points evaluate . plots Plots created. May abbreviated. plots stacked order given top bottom. with_params list distribution parameters given distribution using with_params. named, names matched distribution names. Otherwise, allocated positionally, index 1 corresponding first element distributions, elements distributions followed arguments ... order. as_list return list ggplots instead patchwork?","code":""},{"path":"https://ashesitr.github.io/reservr/reference/plot_distributions.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Plot several distributions — plot_distributions","text":"stacked patchwork requested ggplots","code":""},{"path":"https://ashesitr.github.io/reservr/reference/plot_distributions.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Plot several distributions — plot_distributions","text":"","code":"rate <- 1 x <- rexp(20, rate) d_emp <- dist_empirical(x, positive = TRUE) d_exp <- dist_exponential() plot_distributions( empirical = d_emp, theoretical = d_exp, estimated = d_exp, with_params = list( theoretical = list(rate = rate), estimated = list(rate = 1 / mean(x)) ), .x = seq(1e-4, 5, length.out = 100) )"},{"path":"https://ashesitr.github.io/reservr/reference/predict.reservr_keras_model.html","id":null,"dir":"Reference","previous_headings":"","what":"Predict individual distribution parameters — predict.reservr_keras_model","title":"Predict individual distribution parameters — predict.reservr_keras_model","text":"Predict individual distribution parameters","code":""},{"path":"https://ashesitr.github.io/reservr/reference/predict.reservr_keras_model.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Predict individual distribution parameters — predict.reservr_keras_model","text":"","code":"# S3 method for reservr_keras_model predict(object, data, as_matrix = FALSE, ...)"},{"path":"https://ashesitr.github.io/reservr/reference/predict.reservr_keras_model.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Predict individual distribution parameters — predict.reservr_keras_model","text":"object compiled trained reservr_keras_model. data Input data compatible model. as_matrix Return parameter matrix instead list structure? ... ignored","code":""},{"path":"https://ashesitr.github.io/reservr/reference/predict.reservr_keras_model.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Predict individual distribution parameters — predict.reservr_keras_model","text":"parameter list suitable with_params argument distribution family used model. Contains one set parameters per row data.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/predict.reservr_keras_model.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Predict individual distribution parameters — predict.reservr_keras_model","text":"","code":"if (interactive()) { dist <- dist_exponential() params <- list(rate = 1.0) N <- 100L rand_input <- runif(N) x <- dist$sample(N, with_params = params) tf_in <- keras3::layer_input(1L) mod <- tf_compile_model( inputs = list(tf_in), intermediate_output = tf_in, dist = dist, optimizer = keras3::optimizer_adam(), censoring = FALSE, truncation = FALSE ) tf_fit <- fit( object = mod, x = k_matrix(rand_input), y = x, epochs = 10L, callbacks = list( callback_debug_dist_gradients(mod, k_matrix(rand_input), x) ) ) tf_preds <- predict(mod, data = k_matrix(rand_input)) }"},{"path":"https://ashesitr.github.io/reservr/reference/prob_report.html","id":null,"dir":"Reference","previous_headings":"","what":"Determine probability of reporting under a Poisson arrival Process — prob_report","title":"Determine probability of reporting under a Poisson arrival Process — prob_report","text":"Determines probability claims occuring Poisson process arrival intensity expo reporting delay distribution dist time t_min t_max reported tau_min tau_max.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/prob_report.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Determine probability of reporting under a Poisson arrival Process — prob_report","text":"","code":"prob_report( dist, intervals, expo = NULL, with_params = list(), .tolerance = .Machine$double.eps^0.5, .max_iter = 100L, .try_compile = TRUE )"},{"path":"https://ashesitr.github.io/reservr/reference/prob_report.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Determine probability of reporting under a Poisson arrival Process — prob_report","text":"dist reporting delay Distribution, compiled interval probability function. intervals data frame columns xmin, xmax, tmin, tmax. Claims occur within [xmin, xmax] reported within [tmin, tmax]. expo Poisson intensity. given, must vectorised function yields intensity claim arrival process specified time. expo = NULL equivalent constant intensity function. expo relevant multiplicative constant. with_params Parameters dist use. Can parameter set different values interval. dist compiled interval probability function, with_params can matrix instead. .tolerance Absolute element-wise tolerance. .max_iter Maximum number iterations. number integration intervals length(lower) * .max_iter. Therefor maximum number function evaluations per integration interval 15 * .max_iter. .try_compile Try compiling distributions probability function speed integration?","code":""},{"path":"https://ashesitr.github.io/reservr/reference/prob_report.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Determine probability of reporting under a Poisson arrival Process — prob_report","text":"vector reporting probabilities, one entry per row intervals.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/prob_report.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Determine probability of reporting under a Poisson arrival Process — prob_report","text":"reporting probability given P(x + d [tmin, tmax] | x [xmin, xmax]) = E(P(x + d [tmin, tmax] | x) | x [xmin, xmax]) / P(x [xmin, xmax]) = int_[xmin, xmax] expo(x) P(x + d [tmin, tmax]) dx = int_[xmin, xmax] expo(x) P(d [tmin - x, tmax - x]) dx / int_[xmin, xmax] expo(x) dx prob_report uses integrate_gk() compute two integrals.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/prob_report.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Determine probability of reporting under a Poisson arrival Process — prob_report","text":"","code":"dist <- dist_exponential() ints <- data.frame( xmin = 0, xmax = 1, tmin = seq_len(10) - 1.0, tmax = seq_len(10) ) params <- list(rate = rep(c(1, 0.5), each = 5)) prob_report(dist, ints, with_params = params) #> [1] 0.367879441 0.399576401 0.146995943 0.054076785 0.019893738 0.041904709 #> [7] 0.025416491 0.015415881 0.009350204 0.005671186"},{"path":"https://ashesitr.github.io/reservr/reference/quantile.Distribution.html","id":null,"dir":"Reference","previous_headings":"","what":"Quantiles of Distributions — quantile.Distribution","title":"Quantiles of Distributions — quantile.Distribution","text":"Produces quantiles corresponding given probabilities configurable distribution parameters.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/quantile.Distribution.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Quantiles of Distributions — quantile.Distribution","text":"","code":"# S3 method for Distribution quantile(x, probs = seq(0, 1, 0.25), with_params = list(), ..., .start = 0)"},{"path":"https://ashesitr.github.io/reservr/reference/quantile.Distribution.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Quantiles of Distributions — quantile.Distribution","text":"x Distribution. probs Quantiles compute. with_params Optional list distribution parameters. Note x$has_capability(\"quantile\") false, with_params assumed contain one set parameters. ... ignored .start Starting value quantiles computed numerically. Must within support x.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/quantile.Distribution.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Quantiles of Distributions — quantile.Distribution","text":"quantiles x corresponding probs parameters with_params.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/quantile.Distribution.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Quantiles of Distributions — quantile.Distribution","text":"x$has_capability(\"quantile\") true, returns x$quantile(probs, with_params = with_params). case, with_params may contain separate sets parameters quantile determined. Otherwise, numerical estimation quantiles done using density probability function. method assumes with_params cantain one set parameters. strategy uses two steps: Find smallest largest quantiles probs using newton method starting .start. Find remaining quantiles bisection using stats::uniroot().","code":""},{"path":"https://ashesitr.github.io/reservr/reference/quantile.Distribution.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Quantiles of Distributions — quantile.Distribution","text":"","code":"# With quantiles available dist <- dist_normal(sd = 1) qqs <- quantile(dist, probs = rep(0.5, 3), with_params = list(mean = 1:3)) stopifnot(all.equal(qqs, 1:3)) # Without quantiles available dist <- dist_erlangmix(shapes = list(1, 2, 3), scale = 1.0) my_probs <- c(0, 0.01, 0.25, 0.5, 0.75, 1) qqs <- quantile( dist, probs = my_probs, with_params = list(probs = list(0.5, 0.3, 0.2)), .start = 2 ) all.equal(dist$probability(qqs, with_params = list(probs = list(0.5, 0.3, 0.2))), my_probs) #> [1] \"Mean relative difference: 2.890015e-06\" # Careful: Numerical estimation of extreme quantiles can result in out-of-bounds values. # The correct 0-quantile would be 0 in this case, but it was estimated < 0. qqs[1L] #> [1] -1.138089"},{"path":"https://ashesitr.github.io/reservr/reference/reexports.html","id":null,"dir":"Reference","previous_headings":"","what":"Objects exported from other packages — reexports","title":"Objects exported from other packages — reexports","text":"objects imported packages. Follow links see documentation. generics fit","code":""},{"path":"https://ashesitr.github.io/reservr/reference/softmax.html","id":null,"dir":"Reference","previous_headings":"","what":"Soft-Max function — softmax","title":"Soft-Max function — softmax","text":"Softmax vector x defined ","code":""},{"path":"https://ashesitr.github.io/reservr/reference/softmax.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Soft-Max function — softmax","text":"","code":"softmax(x) dsoftmax(x)"},{"path":"https://ashesitr.github.io/reservr/reference/softmax.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Soft-Max function — softmax","text":"x numeric vector matrix","code":""},{"path":"https://ashesitr.github.io/reservr/reference/softmax.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Soft-Max function — softmax","text":"softmax returns softmax x; rowwise x matrix. dsoftmax returns Jacobi-matrix softmax(x) x. x must vector.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/softmax.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Soft-Max function — softmax","text":"\\(s_i = \\exp(x_i) / \\sum_k \\exp(x_k)\\) satisfies sum(s) == 1.0 can used smoothly enforce sum constraint.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/softmax.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Soft-Max function — softmax","text":"","code":"softmax(c(5, 5)) #> [1] 0.5 0.5 softmax(diag(nrow = 5, ncol = 6)) #> [,1] [,2] [,3] [,4] [,5] [,6] #> [1,] 0.3521874 0.1295625 0.1295625 0.1295625 0.1295625 0.1295625 #> [2,] 0.1295625 0.3521874 0.1295625 0.1295625 0.1295625 0.1295625 #> [3,] 0.1295625 0.1295625 0.3521874 0.1295625 0.1295625 0.1295625 #> [4,] 0.1295625 0.1295625 0.1295625 0.3521874 0.1295625 0.1295625 #> [5,] 0.1295625 0.1295625 0.1295625 0.1295625 0.3521874 0.1295625"},{"path":"https://ashesitr.github.io/reservr/reference/tf_compile_model.html","id":null,"dir":"Reference","previous_headings":"","what":"Compile a Keras model for truncated data under dist — tf_compile_model","title":"Compile a Keras model for truncated data under dist — tf_compile_model","text":"Compile Keras model truncated data dist","code":""},{"path":"https://ashesitr.github.io/reservr/reference/tf_compile_model.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compile a Keras model for truncated data under dist — tf_compile_model","text":"","code":"tf_compile_model( inputs, intermediate_output, dist, optimizer, censoring = TRUE, truncation = TRUE, metrics = NULL, weighted_metrics = NULL )"},{"path":"https://ashesitr.github.io/reservr/reference/tf_compile_model.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compile a Keras model for truncated data under dist — tf_compile_model","text":"inputs List keras input layers intermediate_output Intermediate model layer used input distribution parameters dist Distribution use compiling loss parameter outputs optimizer String (name optimizer) optimizer instance. See optimizer_* family. censoring flag, whether compiled model support censored observations. Set FALSE higher efficiency. fit(...) error resulting model used fit censored observations. truncation flag, whether compiled model support truncated observations. Set FALSE higher efficiency. fit(...) warn resuting model used fit truncated observations. metrics List metrics evaluated model training testing. can : string (name built-function), function, optionally \"name\" attribute Metric() instance. See metric_* family functions. Typically use metrics = c('accuracy'). function callable signature result = fn(y_true, y_pred). specify different metrics different outputs multi-output model, also pass named list, metrics = list(= 'accuracy', b = c('accuracy', 'mse')). can also pass list specify metric list metrics output, metrics = list(c('accuracy'), c('accuracy', 'mse')) metrics = list('accuracy', c('accuracy', 'mse')). pass strings 'accuracy' 'acc', convert one metric_binary_accuracy(), metric_categorical_accuracy(), metric_sparse_categorical_accuracy() based shapes targets model output. similar conversion done strings \"crossentropy\" \"ce\" well. metrics passed evaluated without sample weighting; like sample weighting apply, can specify metrics via weighted_metrics argument instead. providing anonymous R function, can customize printed name training assigning attr(, \"name\") <- \"my_custom_metric_name\", calling custom_metric(\"my_custom_metric_name\", ) weighted_metrics List metrics evaluated weighted sample_weight class_weight training testing.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/tf_compile_model.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compile a Keras model for truncated data under dist — tf_compile_model","text":"reservr_keras_model can used train truncated censored observations dist based input data inputs.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/tf_compile_model.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compile a Keras model for truncated data under dist — tf_compile_model","text":"","code":"dist <- dist_exponential() params <- list(rate = 1.0) N <- 100L rand_input <- runif(N) x <- dist$sample(N, with_params = params) if (interactive()) { tf_in <- keras3::layer_input(1L) mod <- tf_compile_model( inputs = list(tf_in), intermediate_output = tf_in, dist = dist, optimizer = keras3::optimizer_adam(), censoring = FALSE, truncation = FALSE ) }"},{"path":"https://ashesitr.github.io/reservr/reference/tf_initialise_model.html","id":null,"dir":"Reference","previous_headings":"","what":"Initialise model weights to a global parameter fit — tf_initialise_model","title":"Initialise model weights to a global parameter fit — tf_initialise_model","text":"Initialises compiled reservr_keras_model weights predictions equal , close , distribution parameters given params.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/tf_initialise_model.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Initialise model weights to a global parameter fit — tf_initialise_model","text":"","code":"tf_initialise_model( model, params, mode = c(\"scale\", \"perturb\", \"zero\", \"none\") )"},{"path":"https://ashesitr.github.io/reservr/reference/tf_initialise_model.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Initialise model weights to a global parameter fit — tf_initialise_model","text":"model reservr_compiled_model obtained tf_compile_model(). params list distribution parameters compatible model. mode initialisation mode scale Initialise biases according params kernels uniform [-0.1, 0.1] * bias scale. perturb Initialise biases according params leave kernels . zero Initialise biases according params set kernel zero. none modify weights.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/tf_initialise_model.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Initialise model weights to a global parameter fit — tf_initialise_model","text":"Invisibly model changed weights","code":""},{"path":"https://ashesitr.github.io/reservr/reference/tf_initialise_model.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Initialise model weights to a global parameter fit — tf_initialise_model","text":"","code":"dist <- dist_exponential() group <- sample(c(0, 1), size = 100, replace = TRUE) x <- dist$sample(100, with_params = list(rate = group + 1)) global_fit <- fit(dist, x) if (interactive()) { library(keras3) l_in <- layer_input(shape = 1L) mod <- tf_compile_model( inputs = list(l_in), intermediate_output = l_in, dist = dist, optimizer = optimizer_adam(), censoring = FALSE, truncation = FALSE ) tf_initialise_model(mod, global_fit$params) fit_history <- fit( mod, x = group, y = x, epochs = 200L ) predicted_means <- predict(mod, data = as_tensor(c(0, 1), config_floatx())) }"},{"path":"https://ashesitr.github.io/reservr/reference/trunc_obs.html","id":null,"dir":"Reference","previous_headings":"","what":"Define a set of truncated observations — trunc_obs","title":"Define a set of truncated observations — trunc_obs","text":"x missing, xmin xmax must specified.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/trunc_obs.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Define a set of truncated observations — trunc_obs","text":"","code":"trunc_obs(x, xmin = x, xmax = x, tmin = -Inf, tmax = Inf, w = 1) as_trunc_obs(.data) truncate_obs(.data, tmin_new = -Inf, tmax_new = Inf, .partial = FALSE) repdel_obs(.data, accident, delay, time, .truncate = FALSE)"},{"path":"https://ashesitr.github.io/reservr/reference/trunc_obs.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Define a set of truncated observations — trunc_obs","text":"x Observations xmin, xmax Censoring bounds. xmin != xmax, x must NA. tmin, tmax Truncation bounds. May vary per observation. w Case weights .data data frame numeric vector. tmin_new New truncation minimum tmax_new New truncation maximum .partial Enable partial truncation censored observations? potentially create inconsistent data actual observation lies outside truncation bounds censoring interval overlaps. accident accident time (unquoted, evaluated .data) delay reporting delay (unquoted, evaluated .data) time evaluation time (unquoted, evaluated .data) .truncate claims reported time silently discarded? claims reported time .truncate FALSE, error raised.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/trunc_obs.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Define a set of truncated observations — trunc_obs","text":"trunc_obs: trunc_obs tibble columns x, xmin, xmax, tmin tmax describing possibly interval-censored observations truncation as_trunc_obs returns trunc_obs tibble. truncate_obs returns trunc_obs tibble possibly fewer observations .data updated truncation bounds. repdel_obs returns trunc_obs tibble corresponding reporting delay observations claim. .truncate FALSE, result guaranteed number rows .data.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/trunc_obs.html","id":"details","dir":"Reference","previous_headings":"","what":"Details","title":"Define a set of truncated observations — trunc_obs","text":"Uncensored observations must satisfy tmin <= xmin = x = xmax <= tmax. Censored observations must satisfy tmin <= xmin < xmax <= tmax x = NA.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/trunc_obs.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Define a set of truncated observations — trunc_obs","text":"","code":"N <- 100 x <- rexp(N, 0.5) # Random, observation dependent truncation intervals tmin <- runif(N, 0, 1) tmax <- tmin + runif(N, 1, 2) oob <- x < tmin | x > tmax x <- x[!oob] tmin <- tmin[!oob] tmax <- tmax[!oob] # Number of observations after truncation N <- length(x) # Randomly interval censor 30% of observations cens <- rbinom(N, 1, 0.3) == 1L xmin <- x xmax <- x xmin[cens] <- pmax(tmin[cens], floor(x[cens])) xmax[cens] <- pmin(tmax[cens], ceiling(x[cens])) x[cens] <- NA trunc_obs(x, xmin, xmax, tmin, tmax) #> # A tibble: 44 × 6 #> x xmin xmax tmin tmax w #> #> 1 NA 0.832 1 0.832 2.08 1 #> 2 NA 1 2 0.464 2.24 1 #> 3 1.46 1.46 1.46 0.450 2.36 1 #> 4 0.665 0.665 0.665 0.487 1.80 1 #> 5 0.979 0.979 0.979 0.0436 1.11 1 #> 6 1.03 1.03 1.03 0.560 2.19 1 #> 7 0.657 0.657 0.657 0.185 1.98 1 #> 8 NA 1 2 0.612 2.36 1 #> 9 0.526 0.526 0.526 0.240 2.03 1 #> 10 1.60 1.60 1.60 0.668 2.38 1 #> # ℹ 34 more rows as_trunc_obs(c(1, 2, 3)) #> # A tibble: 3 × 6 #> x xmin xmax tmin tmax w #> #> 1 1 1 1 -Inf Inf 1 #> 2 2 2 2 -Inf Inf 1 #> 3 3 3 3 -Inf Inf 1 as_trunc_obs(data.frame(x = 1:3, tmin = 0, tmax = 10)) #> # A tibble: 3 × 6 #> x xmin xmax tmin tmax w #> #> 1 1 1 1 0 10 1 #> 2 2 2 2 0 10 1 #> 3 3 3 3 0 10 1 as_trunc_obs(data.frame(x = c(1, NA), xmin = c(1, 2), xmax = c(1, 3))) #> # A tibble: 2 × 6 #> x xmin xmax tmin tmax w #> #> 1 1 1 1 -Inf Inf 1 #> 2 NA 2 3 -Inf Inf 1 truncate_obs(1:10, tmin_new = 2.0, tmax_new = 8.0) #> # A tibble: 7 × 6 #> x xmin xmax tmin tmax w #> #> 1 2 2 2 2 8 1 #> 2 3 3 3 2 8 1 #> 3 4 4 4 2 8 1 #> 4 5 5 5 2 8 1 #> 5 6 6 6 2 8 1 #> 6 7 7 7 2 8 1 #> 7 8 8 8 2 8 1"},{"path":"https://ashesitr.github.io/reservr/reference/truncate_claims.html","id":null,"dir":"Reference","previous_headings":"","what":"Truncate claims data subject to reporting delay — truncate_claims","title":"Truncate claims data subject to reporting delay — truncate_claims","text":"Truncate claims data subject reporting delay","code":""},{"path":"https://ashesitr.github.io/reservr/reference/truncate_claims.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Truncate claims data subject to reporting delay — truncate_claims","text":"","code":"truncate_claims(data, accident, delay, time, .report_col = \"report\")"},{"path":"https://ashesitr.github.io/reservr/reference/truncate_claims.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Truncate claims data subject to reporting delay — truncate_claims","text":"data Full claims data including IBNR accident Accident times. May unquoted column name data. delay Reporting delays. May unquoted column name data. time Observation time (scalar number one per claim). Claims accident + delay > time truncated. Set time = Inf compute reporting times perform truncation. .report_col NULL column name store reporting time report = accident + delay.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/truncate_claims.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Truncate claims data subject to reporting delay — truncate_claims","text":"Truncated data. reporting time stored colnumn named .report_col unless .report_col NULL. .report_col NULL time contains Infs, warning issued since data returned unchanged work done.","code":""},{"path":"https://ashesitr.github.io/reservr/reference/truncate_claims.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Truncate claims data subject to reporting delay — truncate_claims","text":"","code":"claims_full <- data.frame( acc = runif(100), repdel = rexp(100) ) tau <- 2.0 truncate_claims(claims_full, acc, repdel, tau) #> acc repdel report #> 1 1.341272e-01 0.028235584 0.16236281 #> 2 7.952195e-02 0.008358716 0.08788066 #> 3 3.443970e-02 0.140290387 0.17473009 #> 4 7.098971e-01 1.152279972 1.86217711 #> 5 2.960615e-01 1.574453000 1.87051447 #> 6 7.289446e-01 0.088894959 0.81783959 #> 7 1.101838e-01 1.216726908 1.32691067 #> 8 5.654015e-01 1.387287179 1.95268864 #> 9 7.189693e-01 0.069208830 0.78817813 #> 10 8.909014e-01 0.152164621 1.04306601 #> 11 4.515028e-01 0.206191922 0.65769476 #> 13 1.224872e-01 0.514714528 0.63720168 #> 14 1.555099e-05 0.319640385 0.31965594 #> 15 6.111351e-01 0.932906239 1.54404130 #> 17 3.713955e-02 0.471633688 0.50877323 #> 18 7.302117e-01 0.470371407 1.20058307 #> 19 4.125354e-01 0.825975930 1.23851137 #> 21 5.602826e-01 0.078439292 0.63872193 #> 23 5.899033e-01 0.356895571 0.94679888 #> 24 5.889085e-01 0.886226820 1.47513534 #> 25 1.670153e-01 0.177537690 0.34455295 #> 26 6.828304e-01 0.163108702 0.84593913 #> 27 3.082466e-01 0.319356196 0.62760279 #> 28 7.793431e-01 0.952858891 1.73220199 #> 29 3.284492e-01 1.587426414 1.91587561 #> 30 4.171246e-02 1.250061904 1.29177437 #> 31 1.383127e-01 1.177008377 1.31532110 #> 32 2.560620e-01 0.055133459 0.31119549 #> 33 4.429471e-01 0.267354952 0.71030207 #> 34 2.050347e-01 0.464885217 0.66991992 #> 35 7.792969e-01 0.157671728 0.93696860 #> 37 7.766178e-01 1.075450784 1.85206859 #> 39 1.776113e-01 1.112906751 1.29051807 #> 42 5.313195e-01 0.137196474 0.66851598 #> 43 4.677886e-01 0.664616256 1.13240482 #> 44 1.206373e-01 0.297930363 0.41856766 #> 45 6.348620e-01 1.141797280 1.77665926 #> 46 8.912517e-01 0.680814184 1.57206585 #> 47 8.781296e-01 0.540906635 1.41903623 #> 48 2.148099e-02 0.518280302 0.53976129 #> 50 5.886034e-01 0.705613201 1.29421661 #> 51 1.850858e-01 0.508072750 0.69315858 #> 52 5.010168e-01 0.352654210 0.85367102 #> 54 9.485277e-01 0.332760828 1.28128852 #> 55 1.860526e-01 0.110473518 0.29652609 #> 56 9.358294e-01 0.568551742 1.50438114 #> 57 7.175151e-01 0.091612742 0.80912781 #> 58 3.182253e-01 0.436845455 0.75507071 #> 59 5.823165e-01 0.114667189 0.69698367 #> 61 6.969247e-01 0.476378988 1.17330367 #> 62 7.630700e-01 0.010118335 0.77318837 #> 63 1.971220e-01 1.478337746 1.67545979 #> 64 1.982628e-01 1.151255731 1.34951853 #> 65 3.902595e-01 1.343615915 1.73387538 #> 66 6.074760e-01 0.774707003 1.38218298 #> 67 5.993708e-01 0.646028800 1.24539964 #> 68 7.847457e-01 0.986775468 1.77152118 #> 69 5.605707e-01 0.550733615 1.11130433 #> 70 4.787836e-01 0.947700852 1.42648449 #> 71 9.189584e-01 0.396267166 1.31522557 #> 72 1.237870e-02 0.390503512 0.40288222 #> 74 5.759545e-01 0.819434726 1.39538924 #> 75 3.667410e-01 0.043780853 0.41052181 #> 76 1.393693e-01 0.232285622 0.37165496 #> 78 8.949839e-01 0.206483914 1.10146782 #> 79 2.749953e-01 0.453207363 0.72820271 #> 80 2.195253e-01 0.777957749 0.99748306 #> 81 7.483612e-02 0.264012017 0.33884814 #> 82 2.389014e-01 0.022741144 0.26164256 #> 84 2.821164e-01 0.205027999 0.48714435 #> 86 2.685969e-01 0.517190858 0.78578778 #> 87 3.688022e-01 0.114156616 0.48295881 #> 90 1.491891e-01 1.768341100 1.91753016 #> 91 2.765223e-01 0.306067371 0.58258966 #> 92 7.019701e-01 0.056862872 0.75883296 #> 94 6.576286e-01 1.223552487 1.88118109 #> 95 4.837228e-01 0.368050174 0.85177300 #> 97 6.680541e-01 0.381928012 1.04998212 #> 98 1.240903e-01 0.434685851 0.55877612 #> 99 4.515315e-01 0.622061988 1.07359347 #> 100 3.454657e-01 0.470663017 0.81612874"},{"path":"https://ashesitr.github.io/reservr/reference/weighted_moments.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute weighted moments — weighted_moments","title":"Compute weighted moments — weighted_moments","text":"Compute weighted moments","code":""},{"path":"https://ashesitr.github.io/reservr/reference/weighted_moments.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute weighted moments — weighted_moments","text":"","code":"weighted_moments(x, w, n = 2L, center = TRUE)"},{"path":"https://ashesitr.github.io/reservr/reference/weighted_moments.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute weighted moments — weighted_moments","text":"x Observations w Case weights (optional) n Number moments calculate center Calculate centralized moments (default) noncentralized moments, .e. E((X - E(X))^k) E(X^k).","code":""},{"path":"https://ashesitr.github.io/reservr/reference/weighted_moments.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute weighted moments — weighted_moments","text":"vector length n kth entry kth weighted moment x weights w. center TRUE moments centralized, .e. E((X - E(X))^k). first moment never centralized. moments scaled 1 / sum(w), de-biased. e.g. second central weighted moment weighted_moment(x, w)[2L] equal var(rep(x, w)) * (sum(w) - 1) / sum(w) integer w","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/weighted_moments.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute weighted moments — weighted_moments","text":"","code":"weighted_moments(rexp(100)) #> [1] 0.9877458 1.2957378 weighted_moments(c(1, 2, 3), c(1, 2, 3)) #> [1] 2.3333333 0.5555556 c(mean(rep(1:3, 1:3)), var(rep(1:3, 1:3)) * 5 / 6) #> [1] 2.3333333 0.5555556"},{"path":"https://ashesitr.github.io/reservr/reference/weighted_quantile.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute weighted quantiles — weighted_quantile","title":"Compute weighted quantiles — weighted_quantile","text":"Compute weighted quantiles","code":""},{"path":"https://ashesitr.github.io/reservr/reference/weighted_quantile.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute weighted quantiles — weighted_quantile","text":"","code":"weighted_quantile(x, w, probs) weighted_median(x, w)"},{"path":"https://ashesitr.github.io/reservr/reference/weighted_quantile.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute weighted quantiles — weighted_quantile","text":"x Observations w Case weights (optional) probs Quantiles calculate","code":""},{"path":"https://ashesitr.github.io/reservr/reference/weighted_quantile.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute weighted quantiles — weighted_quantile","text":"vector length probs corresponding weighted quantiles x weight w. integer weights, equivalent quantile(rep(x, w), probs) weighted median x weights w. integer weights, equivalent median(rep(x, w))","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/weighted_quantile.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute weighted quantiles — weighted_quantile","text":"","code":"weighted_median(1:6) #> [1] 3.5 weighted_median(1:3, c(1, 4, 9)) #> [1] 3 weighted_median(1:3, c(9, 4, 1)) #> [1] 1 weighted_quantile(1:3, c(1, 4, 9), seq(0.0, 1.0, by = 0.25)) #> [1] 1 2 3 3 3 quantile(rep(1:3, c(1, 4, 9)), seq(0.0, 1.0, by = 0.25)) #> 0% 25% 50% 75% 100% #> 1 2 3 3 3"},{"path":"https://ashesitr.github.io/reservr/reference/weighted_tabulate.html","id":null,"dir":"Reference","previous_headings":"","what":"Compute weighted tabulations — weighted_tabulate","title":"Compute weighted tabulations — weighted_tabulate","text":"Computes sum w grouped bin. w missing result equivalent tabulate(bin, nbins)","code":""},{"path":"https://ashesitr.github.io/reservr/reference/weighted_tabulate.html","id":"ref-usage","dir":"Reference","previous_headings":"","what":"Usage","title":"Compute weighted tabulations — weighted_tabulate","text":"","code":"weighted_tabulate(bin, w, nbins = max(1L, bin, na.rm = TRUE))"},{"path":"https://ashesitr.github.io/reservr/reference/weighted_tabulate.html","id":"arguments","dir":"Reference","previous_headings":"","what":"Arguments","title":"Compute weighted tabulations — weighted_tabulate","text":"bin integer vector values 1L nbins w Weights per entry bin. nbins Number bins","code":""},{"path":"https://ashesitr.github.io/reservr/reference/weighted_tabulate.html","id":"value","dir":"Reference","previous_headings":"","what":"Value","title":"Compute weighted tabulations — weighted_tabulate","text":"vector length nbins ith result equal sum(w[bin == ]) sum(bin == ) w missing. integer weights, equivalent tabulate(rep(bin, w), nbins).","code":""},{"path":[]},{"path":"https://ashesitr.github.io/reservr/reference/weighted_tabulate.html","id":"ref-examples","dir":"Reference","previous_headings":"","what":"Examples","title":"Compute weighted tabulations — weighted_tabulate","text":"","code":"weighted_tabulate(c(1, 1, 2)) #> [1] 2 1 weighted_tabulate(c(1, 1, 2), nbins = 3L) #> [1] 2 1 0 weighted_tabulate(c(1, 1, 2), w = c(0.5, 0.5, 1), nbins = 3L) #> [1] 1 1 0"},{"path":"https://ashesitr.github.io/reservr/news/index.html","id":"reservr-003","dir":"Changelog","previous_headings":"","what":"reservr 0.0.3","title":"reservr 0.0.3","text":"Fixed segfaults r-devel caused zero-length input C++ routines. Migrated keras3 keras support.","code":""},{"path":"https://ashesitr.github.io/reservr/news/index.html","id":"reservr-002","dir":"Changelog","previous_headings":"","what":"reservr 0.0.2","title":"reservr 0.0.2","text":"CRAN release: 2023-10-18 Fixed tensorflow log-density implementation dist_erlangmix() dist_exponential() work censored data. Multiple bug fixes related tensorflow training integration, input tensor shapes can unknown. Improved testing tensorflow integration.","code":""},{"path":"https://ashesitr.github.io/reservr/news/index.html","id":"reservr-001","dir":"Changelog","previous_headings":"","what":"reservr 0.0.1","title":"reservr 0.0.1","text":"CRAN release: 2022-12-09 Initial CRAN release","code":""}]