From 7fc353e9e0fd6de68f90abc10b650c451354a604 Mon Sep 17 00:00:00 2001 From: matthieugomez Date: Sun, 1 Dec 2019 15:07:19 -0500 Subject: [PATCH] use MarkovProcess type --- Project.toml | 9 +- src/InfinitesimalGenerators.jl | 215 +++++++++++++++++++++------------ src/feynman_kac.jl | 101 +++++++++------- src/operators.jl | 52 ++------ src/special.jl | 26 ++++ test/runtests.jl | 115 +++++++++--------- 6 files changed, 302 insertions(+), 216 deletions(-) create mode 100644 src/special.jl diff --git a/Project.toml b/Project.toml index d5d97e4..f00db2a 100644 --- a/Project.toml +++ b/Project.toml @@ -7,11 +7,18 @@ LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" Arpack = "7d9fca2a-8960-54d3-9f78-7d1dccf2cb97" KrylovKit = "0b1a1467-8014-51b9-945f-bf0ae24f4b77" Roots = "f2b01f46-fcfa-551c-844a-d8ac1e96c665" - +Distributions = "31c24e10-a181-5473-b8eb-7969acd0382f" +DiffEqDiffTools = "01453d9d-ee7c-5054-8395-0335cb756afa" +FillArrays = "1a297f60-69ca-5386-bcde-b61e274b549b" [compat] julia = "1" +Arpack = "0" KrylovKit = "0.4" +Roots = "0.8" +Distributions = "0.21" +DiffEqDiffTools = "1" +FillArrays = "0" [extras] Expokit = "a1e7a1ef-7a5d-5822-a38c-be74e1bb89f4" diff --git a/src/InfinitesimalGenerators.jl b/src/InfinitesimalGenerators.jl index edaeacf..855602d 100644 --- a/src/InfinitesimalGenerators.jl +++ b/src/InfinitesimalGenerators.jl @@ -1,8 +1,8 @@ module InfinitesimalGenerators -using LinearAlgebra, Arpack, KrylovKit, Roots +using LinearAlgebra, Arpack, KrylovKit, Roots, Distributions, DiffEqDiffTools, FillArrays include("operators.jl") -include("feynman_kac.jl") + #======================================================================================== @@ -11,109 +11,130 @@ dx = μx dt + σx dZ_t ========================================================================================# +mutable struct MarkovProcess + x::AbstractVector{<:Real} + μx::AbstractVector{<:Real} + σx::AbstractVector{<:Real} + 𝔸::Tridiagonal + Δ::Tuple{<:AbstractVector, <:AbstractVector, <:AbstractVector, <:AbstractVector} +end + +function MarkovProcess(x::AbstractVector{<:Real}, μx::AbstractVector{<:Real}, σx::AbstractVector{<:Real}) + length(x) == length(μx) || error("Vector for grid, drift, and volatility should have the same size") + length(μx) == length(σx) || error("Vector for grid, drift, and volatility should have the same size") + n = length(x) + 𝔸 = Tridiagonal(zeros(n-1), zeros(n), zeros(n-1)) + Δ = make_Δ(x) + MarkovProcess(x, μx, σx, 𝔸, Δ) +end + +Base.length(x::MarkovProcess) = length(x.x) + +function make_Δ(x) + n = length(x) + Δxm = zero(x) + Δxm[1] = x[2] - x[1] + for i in 2:n + Δxm[i] = x[i] - x[i-1] + end + Δxp = zero(x) + for i in 1:(n-1) + Δxp[i] = x[i+1] - x[i] + end + Δxp[end] = x[n] - x[n-1] + Δx = (Δxm .+ Δxp) / 2 + return x, 1 ./ Δx, 1 ./ Δxm, 1 ./ Δxp +end + +# it's important to take 1e-6 to have the right tail index of multiplicative functional (see tests) +function OrnsteinUhlenbeck(; xbar = 0.0, κ = 0.1, σ = 1.0, p = 1e-10, length = 100, + xmin = quantile(Normal(xbar, σ / sqrt(2 * κ)), p), xmax = quantile(Normal(xbar, σ / sqrt(2 * κ)), 1 - p)) + x = range(xmin, stop = xmax, length = length) + μx = κ .* (xbar .- x) + σx = σ .* Ones(Base.length(x)) + MarkovProcess(x, μx, σx) +end + +function CoxIngersollRoss(; xbar = 0.1, κ = 0.1, σ = 1.0, p = 1e-10, length = 100, α = 2 * κ * xbar / σ^2, β = σ^2 / (2 * κ), xmin = quantile(Gamma(α, β), p), xmax = quantile(Gamma(α, β), 1 - p)) + x = range(xmin, stop = xmax, length = length) + μx = κ .* (xbar .- x) + σx = σ .* sqrt.(x) + MarkovProcess(x, μx, σx) +end + # Compute generator 𝔸f = E[df(x)] -function generator(x::AbstractVector{<:Number}, μx::AbstractVector{<:Number}, σx::AbstractVector{<:Number}) - operator(x, zeros(length(x)), μx, 0.5 * σx.^2) +function generator!(x::MarkovProcess) + operator!(x.𝔸, x.Δ, Zeros(length(x.x)), x.μx, 0.5 * x.σx.^2) +end + +function generator(x::MarkovProcess) + deepcopy(generator!(x)) end # Stationary Distribution of x -function stationary_distribution(x::AbstractVector{<:Number}, μx::AbstractVector{<:Number}, σx::AbstractVector{<:Number}) - g, η, _ = principal_eigenvalue(generator(x, μx, σx); which = :SM, eigenvector = :left) - abs(η) >= 1e-5 && @warn "Principal Eigenvalue does not seem to be zero" +function stationary_distribution(x::MarkovProcess) + g, η, _ = principal_eigenvalue(generator!(x); which = :SM, eigenvector = :left) + abs(η) <= 1e-5 || @warn "Principal Eigenvalue does not seem to be zero" return g end # Stationary Distribution of x with death rate δ and reinjection ψ -function stationary_distribution(x::AbstractVector{<:Number}, μx::AbstractVector{<:Number}, σx::AbstractVector{<:Number}, δ::Number, ψ::AbstractVector{<:Number}) - clean_eigenvector_left((δ * I - adjoint(generator(x, μx, σx))) \ (δ * ψ)) +function stationary_distribution(x::MarkovProcess, δ::Number, ψ::AbstractVector{<:Number}) + clean_eigenvector_left((δ * I - generator!(x)') \ (δ * ψ)) end -# Compute u(x_t, t) = E[∫t^T e^{-∫ts V(x_τ, τ)dτ}f(x_s, s)ds + e^{-∫tT V(x_τ, τ)dτ}ψ(x_T)|x_t = x] -function feynman_kac_backward(x::AbstractVector{<:Number}, μx::AbstractVector{<:Number}, σx::AbstractVector{<:Number}; kwargs...) - feynman_kac_backward(generator(x, μx, σx); kwargs...) +function stationary_distribution(x::AbstractVector, μx::AbstractVector, σx::AbstractVector, args...) + stationary_distribution(MarkovProcess(x, μx, σx)) end -# Compute u(x, t)= E[∫0^t e^{-∫0^s V(x_τ)dτ}f(x_s)ds + e^{-∫0^tV(x_τ)dτ} ψ(x_t)|x_0 = x] -function feynman_kac_forward(x::AbstractVector{<:Number}, μx::AbstractVector{<:Number}, σx::AbstractVector{<:Number}; kwargs...) - feynman_kac_forward(generator(x, μx, σx); kwargs...) +function ∂(x::MarkovProcess, f::AbstractVector) + operator!(x.𝔸, x.Δ, Zeros(length(x.x)), Ones(length(x.x)), Zeros(length(x.x))) * f end #======================================================================================== -For a Markov Process x: -dx = μx dt + σx dZt -and a multiplicative functional M: -dM/M = μM dt + σM dZt +For a multiplicative functional M: +dM/M = μM(x) dt + σM(x) dZt ========================================================================================# - - -############################################################################## -## -## MultiiplicativeFunctionals -## -############################################################################## -struct MultiplicativeFunctional - 𝔸::Tridiagonal - Δ::Tuple{<:AbstractVector, <:AbstractVector, <:AbstractVector, <:AbstractVector} - μx::AbstractVector{<:Number} - σx::AbstractVector{<:Number} +mutable struct MultiplicativeFunctional + x::MarkovProcess μM::AbstractVector{<:Number} σM::AbstractVector{<:Number} - δ::Number ρ::Number + δ::Number end -function MultiplicativeFunctional(x::AbstractVector{<:Number}, μx::AbstractVector{<:Number}, σx::AbstractVector{<:Number}, μM::AbstractVector{<:Number}, σM::AbstractVector{<:Number}; δ::Number = 0.0, ρ::Number = 0.0) - n = length(x) - 𝔸 = Tridiagonal(zeros(n-1), zeros(n), zeros(n-1)) - Δ = make_Δ(x) - MultiplicativeFunctional(𝔸, Δ, μx, σx, μM, σM, δ, ρ) +function MultiplicativeFunctional(x::MarkovProcess, μM::AbstractVector{<:Number}, σM::AbstractVector{<:Number}; ρ::Number = 0.0, δ::Number = 0.0) + length(x.x) == length(μM) || error("Vector for grid and μM should have the same size") + length(x.x) == length(σM) || error("Vector for grid and σM should have the same size") + MultiplicativeFunctional(x, μM, σM, ρ, δ) end -# ξ -> 𝔸(ξ) -function generator(M::MultiplicativeFunctional) - operator!(M.𝔸, M.Δ, M.μM .- M.δ, M.μx .+ M.σM .* M.ρ .* M.σx, 0.5 * M.σx.^2) +# Generator for long run CGF +function generator!(M::MultiplicativeFunctional, ξ = 1.0) + operator!(M.x.𝔸, M.x.Δ, ξ .* M.μM .+ 0.5 * ξ * (ξ - 1) .* M.σM.^2 .- M.δ, M.x.μx .+ ξ .* M.σM .* M.ρ .* M.x.σx, 0.5 * M.x.σx.^2) end - -# Compute Hansen Scheinkmann decomposition M_t= e^{ηt}f(x_t)\hat{M}_t -function hansen_scheinkman(x::AbstractVector{<:Number}, μx::AbstractVector{<:Number}, σx::AbstractVector{<:Number}, μM::AbstractVector{<:Number}, σM::AbstractVector{<:Number}; δ::Number = 0.0, ρ::Number = 0.0, eigenvector = :right) - hansen_scheinkman(MultiplicativeFunctional(x, μx, σx, μM, σM; δ = δ, ρ = ρ), eigenvector = eigenvector) -end -function hansen_scheinkman(M::MultiplicativeFunctional; eigenvector = :right) - principal_eigenvalue(generator(M); which = :LR, eigenvector = eigenvector) -end - -# Compute E[M_t ψ(x_t)|x_0 = x] -function feynman_kac_forward(x::AbstractVector{<:Number}, μx::AbstractVector{<:Number}, σx::AbstractVector{<:Number}, μM::AbstractVector{<:Number}, σM::AbstractVector{<:Number}; δ::Number = 0.0, ρ::Number = 0.0, kwargs...) - feynman_kac_forward(MultiplicativeFunctional(x, μx, σx, μM, σM; δ = δ, ρ = ρ); kwargs...) +function generator(M::MultiplicativeFunctional, ξ = 1.0) + deepcopy(generator!(M, ξ)) end -function feynman_kac_forward(M::MultiplicativeFunctional; kwargs...) - feynman_kac_forward(generator(M); kwargs...) +# ξ -> lim log(E[M^\xi]) / t +function cgf_longrun(M::MultiplicativeFunctional; which = :LR, eigenvector = :right, r0 = Ones(length(M.x))) + ξ -> principal_eigenvalue(generator!(M, ξ), which = which, eigenvector = eigenvector, r0 = r0) end -############################################################################## -## -## CGF -## -############################################################################## - -function generator(M::MultiplicativeFunctional, ξ) - operator!(M.𝔸, M.Δ, ξ .* M.μM .+ 0.5 * ξ * (ξ - 1) .* M.σM.^2 .- M.δ, M.μx .+ ξ .* M.σM .* M.ρ .* M.σx, 0.5 * M.σx.^2) +function cgf_longrun(x::AbstractVector, μx::AbstractVector, σx::AbstractVector, μM::AbstractVector, σM::AbstractVector; ρ = 0.0, δ = 0.0, kwargs...) + cgf_longrun(MultiplicativeFunctional(MarkovProcess(x, μx, σx), μM, σM, ρ, δ); kwargs...) end -# ξ -> \lim log(E[M^\xi]) / t -function cgf_longrun(M::MultiplicativeFunctional, ξ; eigenvector = :right) - principal_eigenvalue(generator(M, ξ), which = :LR, eigenvector = eigenvector) +# Compute Hansen Scheinkmann decomposition M_t= e^{ηt}f(x_t)\hat{M}_t +function hansen_scheinkman(M::MultiplicativeFunctional; which = :LR, eigenvector = :right) + cgf_longrun(M, eigenvector = eigenvector)(1.0) end - -# Compute first derivative of ξ -> lim(log(E[M_t^ξ|x_0 = x])/t) -function ∂cgf_longrun(M::MultiplicativeFunctional, ξ::Number) - g, η, f = principal_eigenvalue(generator(M, ξ); which = :LR, eigenvector = :both) - ∂𝔸 = operator(x, μM .+ (η - 1/2) .* σM.^2, σM .* ρ .* σx, zeros(length(x))) - return (g' * ∂𝔸 * f) / (g' * f) +function hansen_scheinkman(x::AbstractVector, μx::AbstractVector, σx::AbstractVector, μM::AbstractVector, σM::AbstractVector; ρ = 0.0, δ = 0.0, kwargs...) + hansen_scheinkman(MultiplicativeFunctional(MarkovProcess(x, μx, σx), μM, σM, ρ, δ); kwargs...) end ############################################################################## @@ -137,11 +158,47 @@ end # dM/M = μM(x) dt + σM(x) dZt # dx = μx dt + σx dZt # with death rate δ -function tail_index(x::AbstractVector{<:Number}, μx::AbstractVector{<:Number}, σx::AbstractVector{<:Number}, μM::AbstractVector{<:Number}, σM::AbstractVector{<:Number}; δ::Number = 0.0, ρ::Number = 0.0) - M = MultiplicativeFunctional(x, μx, σx, μM, σM; δ = δ, ρ = ρ) - find_zero(ξ -> cgf_longrun(M, ξ)[2], (1e-3, 40.0)) +function tail_index(M::MultiplicativeFunctional; which = :SM, xatol = 1e-2, kwargs...) + out = 0.0 + r0 = ones(length(M.x)) + if which == :SM + try + # SM is so much faster. So try if it works. + f = ξ -> begin + out = cgf_longrun(M; which = :SM, r0 = r0)(ξ) + eltype(out[3]) <: Float64 && copyto!(r0, out[3]) + return out[2] + end + D = ξ -> DiffEqDiffTools.finite_difference_derivative(f, ξ) + out = find_zero((f, D), 1.0, Roots.Newton(); xatol = xatol, kwargs...) + abs(cgf_longrun(M; which = :LR, r0 = r0)(out)[2]) <= 1e-2 || throw("there is an error") + catch + which = :LR + end + end + if which == :LR + f = ξ -> begin + out = cgf_longrun(M; which = :LR, r0 = r0)(ξ) + eltype(out[3]) <: Float64 && copyto!(r0, out[3]) + return out[2] + end + D = ξ -> DiffEqDiffTools.finite_difference_derivative(f, ξ) + out = find_zero((f, D), 1.0, Roots.Newton(); xatol = xatol, kwargs...) + end + return out end +function tail_index(x::AbstractVector{<:Number}, μx::AbstractVector{<:Number}, σx::AbstractVector{<:Number}, μM::AbstractVector{<:Number}, σM::AbstractVector{<:Number}; ρ::Number = 0.0, δ::Number = 0.0, kwargs...) + tail_index(MultiplicativeFunctional(MarkovProcess(x, μx, σx), μM, σM, ρ, δ); kwargs...) +end + +############################################################################## +## +## Feynman Kac +## +############################################################################## + +include("feynman_kac.jl") ############################################################################## ## @@ -150,14 +207,14 @@ end ############################################################################## export +MarkovProcess, +OrnsteinUhlenbeck, +CoxIngersollRoss, +MultiplicativeFunctional, generator, -principal_eigenvalue, -feynman_kac_backward, -feynman_kac_forward, stationary_distribution, -MultiplicativeFunctional, +feynman_kac, hansen_scheinkman, cgf_longrun, -∂cgf_longrun, tail_index end \ No newline at end of file diff --git a/src/feynman_kac.jl b/src/feynman_kac.jl index 2835147..c0b260e 100644 --- a/src/feynman_kac.jl +++ b/src/feynman_kac.jl @@ -1,62 +1,79 @@ - -#======================================================================================== +""" +With direction = :backward Solve the PDE backward in time u(x, T) = ψ(x) 0 = u_t + 𝔸u_t - V(x, t)u + f(x, t) -using an implicit finite difference scheme, that is -u_T = ψ -u_t = (I - 𝔸dt) \ (u_{t+1} + f dt) -========================================================================================# -function feynman_kac_backward(𝔸::AbstractMatrix; +With direction = :forward +Solve the PDE forward in time +u(x, 0) = ψ(x) +u_t = 𝔸u - V(x)u + f(x) +""" +function feynman_kac(𝔸::AbstractMatrix; t::AbstractVector = range(0, 100, step = 1/12), ψ::AbstractVector = ones(size(𝔸, 1)), f::Union{AbstractVector, AbstractMatrix} = zeros(size(𝔸, 1)), - V::Union{AbstractVector, AbstractMatrix} = zeros(size(𝔸, 1))) - u = zeros(size(𝔸, 1), length(t)) - u[:, length(t)] = ψ - if isa(f, AbstractVector) && isa(V, AbstractVector) - if isa(t, AbstractRange) - dt = step(t) - 𝔹 = factorize(I + (Diagonal(V) - 𝔸) * dt) - for i in (length(t)-1):(-1):1 - ψ = ldiv!(𝔹, u[:, i+1] .+ f .* dt) - u[:, i] .= ψ + V::Union{AbstractVector, AbstractMatrix} = zeros(size(𝔸, 1)), + direction= :backward) + if direction == :backward + u = zeros(size(𝔸, 1), length(t)) + u[:, end] = ψ + if isa(f, AbstractVector) && isa(V, AbstractVector) + if isa(t, AbstractRange) + dt = step(t) + 𝔹 = factorize(I + (Diagonal(V) - 𝔸) * dt) + for i in (length(t)-1):(-1):1 + ψ = ldiv!(𝔹, u[:, i+1] .+ f .* dt) + u[:, i] = ψ + end + else + for i in (length(t)-1):(-1):1 + dt = t[i+1] - t[i] + 𝔹 = I + (Diagonal(V) - 𝔸) * dt + u[:, i] = 𝔹 \ (u[:, i+1] .+ f .* dt) + end end - else + elseif isa(f, AbstractMatrix) && isa(V, AbstractMatrix) for i in (length(t)-1):(-1):1 dt = t[i+1] - t[i] - 𝔹 = I + (Diagonal(V) - 𝔸) * dt - ψ = 𝔹 \ (u[:, i+1] .+ f .* dt) - u[:, i] .= ψ + 𝔹 = I + (Diagonal(view(V, :, i)) - 𝔸) * dt + u[:, i] = 𝔹 \ (u[:, i+1] .+ f[:, i] .* dt) end + else + error("f and V must be Vectors or Matrices") end - elseif isa(f, AbstractMatrix) && isa(V, AbstractMatrix) - for i in (length(t)-1):(-1):1 - dt = t[i+1] - t[i] - 𝔹 = I + (Diagonal(view(V, :, i)) - 𝔸) * dt - ψ = 𝔹 \ (u[:, i+1] .+ f[:, i] .* dt) - u[:, i] .= ψ - end + return u + elseif direction == :forward + u = feynman_kac(𝔸; t = - reverse(t), ψ = ψ, f = f, V = V, direction = :backward) + return u[:,end:-1:1] else - error("f and V must be Vectors or Matrices") + error("Direction must be :backward or :forward") end - return u end -#======================================================================================== -Solve the PDE forward in time -u(x, 0) = ψ(x) -u_t = 𝔸u - V(x)u + f(x) +""" +If direction = :backward +compute `u(x, t) = E[∫t^T e^{-∫ts V(x_τ, τ)dτ}f(x_s, s)ds + e^{-∫tT V(x_τ, τ)dτ}ψ(x_T)|x_t = x]` +If direction = :forward +compute `u(x, t)= E[∫0^t e^{-∫0^s V(x_τ)dτ}f(x_s)ds + e^{-∫0^tV(x_τ)dτ}ψ(x_t)|x_0 = x]` +""" +function feynman_kac(x::MarkovProcess; kwargs...) + feynman_kac(generator!(x); kwargs...) +end + +function feynman_kac(x::AbstractVector, μx::AbstractVector, σx::AbstractVector; kwargs...) + feynman_kac(MarkovProcess(x, μx, σx); kwargs...) +end -using implicit finite difference scheme, that is -u_0 = ψ -u_t = (I - 𝔸dt) \ (u_{t+1} + f dt) -========================================================================================# -function feynman_kac_forward(𝔸::AbstractMatrix; - t::AbstractVector = range(0, 100, step = 1/12), kwargs...) - u = feynman_kac_backward(𝔸; t = - reverse(t), kwargs...) - return u[:,end:-1:1] +""" +If direction = :forward +compute `E[M_t ψ(x_t)|x_0 = x]` +""" +function feynman_kac(M::MultiplicativeFunctional; kwargs...) + feynman_kac(generator!(M); kwargs...) +end +function feynman_kac(x::AbstractVector{<:Number}, μx::AbstractVector{<:Number}, σx::AbstractVector{<:Number}, μM::AbstractVector{<:Number}, σM::AbstractVector{<:Number}; ρ::Number = 0.0, δ::Number = 0.0, kwargs...) + feynman_kac(MultiplicativeFunctional(MarkovProcess(x, μx, σx), μM, σM; ρ = ρ, δ = δ); kwargs...) end diff --git a/src/operators.jl b/src/operators.jl index b95cce5..9561551 100644 --- a/src/operators.jl +++ b/src/operators.jl @@ -7,30 +7,6 @@ Note that 𝔸'g = v_0 * g - ∂(v1 * g) + ∂∂(v2 * g) ========================================================================================# - -function operator(x::AbstractVector, v0::AbstractVector, v1::AbstractVector, v2::AbstractVector) - n = length(x) - 𝔸 = Tridiagonal(zeros(n-1), zeros(n), zeros(n-1)) - Δ = make_Δ(x) - operator!(𝔸, Δ, v0, v1, v2) -end - -function make_Δ(x) - n = length(x) - Δxm = zero(x) - Δxm[1] = x[2] - x[1] - for i in 2:n - Δxm[i] = x[i] - x[i-1] - end - Δxp = zero(x) - for i in 1:(n-1) - Δxp[i] = x[i+1] - x[i] - end - Δxp[end] = x[n] - x[n-1] - Δx = (Δxm .+ Δxp) / 2 - return x, 1 ./ Δx, 1 ./ Δxm, 1 ./ Δxp -end - function operator!(𝔸, Δ, v0::AbstractVector, v1::AbstractVector, v2::AbstractVector) # The key is that sum of each column = 0.0 and off diagonals are positive (singular M-matrix) x, invΔx, invΔxm, invΔxp = Δ @@ -52,11 +28,9 @@ function operator!(𝔸, Δ, v0::AbstractVector, v1::AbstractVector, v2::Abstrac for i in 1:n 𝔸[i, i] += v0[i] - c[i] end - return adjoint(𝔸) + adjoint(𝔸) end - - #======================================================================================== Compute the principal eigenvector and eigenvalue of 𝔸 @@ -71,11 +45,11 @@ Note that, in particular, it is the eigenvalue with largest real part, which mea If, moreover, B, is a M-matrix, then all its eigenvalues have positive real part. Therefore, all the eigenvalues of A have negative real part. Therefore, the eigenvalue with largest real part is also the eigenvalue with smallest magnitude. ========================================================================================# -function principal_eigenvalue(𝔸::AbstractMatrix; which = :SM, eigenvector = :right) +function principal_eigenvalue(𝔸::AbstractMatrix; which = :SM, eigenvector = :right, r0 = ones(size(𝔸, 1))) f, η, g = nothing, nothing, nothing if which == :SM if eigenvector ∈ (:right, :both) - vals, vecs = Arpack.eigs(𝔸; nev = 1, which = :SM) + vals, vecs = Arpack.eigs(𝔸; v0 = r0, nev = 1, which = :SM) η = vals[1] f = vecs[:, 1] end @@ -85,20 +59,20 @@ function principal_eigenvalue(𝔸::AbstractMatrix; which = :SM, eigenvector = : g = vecs[:, 1] end elseif which == :LR - # While Arpack accepts SM, it often fails. Moreover it does not give the "right" eigenvector in term of multiplicity. + # Arpack LR tends to fail if the LR is close to zero, which is the typical case if we want to compute tail index + # Arpack SM is much faster, but it does not always give the right eigenvector (either because LR ≠ SM (happens when the eigenvalue is very positive) + # Even when it gives the right eigenvalue, it can return a complex eigenvector if eigenvector ∈ (:right, :both) - vals, vecs, info = KrylovKit.eigsolve(𝔸, 1, :LR, maxiter = size(𝔸, 1)) - if info.converged > 0 - η = vals[1] - f = vecs[1] - end + vals, vecs, info = KrylovKit.eigsolve(𝔸, r0, 1, :LR, maxiter = size(𝔸, 1)) + info.converged == 0 && @warn "KrylovKit did not converge" + η = vals[1] + f = vecs[1] end if eigenvector ∈ (:left, :both) vals, vecs, info = KrylovKit.eigsolve(adjoint(𝔸), 1, :LR, maxiter = size(𝔸, 1)) - if info.converged > 0 - η = vals[1] - g = vecs[1] - end + info.converged == 0 && @warn "KrylovKit did not converge" + η = vals[1] + g = vecs[1] end end return clean_eigenvector_left(g), clean_eigenvalue(η), clean_eigenvector_right(f) diff --git a/src/special.jl b/src/special.jl new file mode 100644 index 0000000..238649d --- /dev/null +++ b/src/special.jl @@ -0,0 +1,26 @@ +struct MarkovProcess + x::AbstractVector{<:Real} + μx::AbstractVector{<:Real} + σx::AbstractVector{<:Real} +end + + + +function OrnsteinUhlenbeck(; xbar = 0.0, κ = 0.1, σ = 1.0, length = 100) + x = range(xbar - 5 * σ , xbar + 5 * σ, length = 100) + μx .= κ .* (xbar .- x) + σx .= σ .* ones(length(x)) + (x, μx, σx) +end + + +function CoxIngersollRoss(; xbar = 0.1, κ = 0.1, σ = 1.0, length = 100) + x = range(μ - 5 * σ , μ + 5 * σ, length = 100) + α = 2 * κ * xbar / σ^2 + β = σ^2 / (2 * κ) + vmin = quantile(Gamma(α, β), 0.025) + vmax = quantile(Gamma(α, β), 0.975) + μx .= κ .* (μ .- x) + σx .= σ .* sqrt.(x) + (x, μx, σx) +end diff --git a/test/runtests.jl b/test/runtests.jl index 41ac987..8cc378f 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,75 +1,80 @@ using InfinitesimalGenerators, Test, Statistics, LinearAlgebra, Expokit -## Ornstein–Uhlenbeck -κx = 0.1 +κ = 0.1 σ = 0.02 -x = range(- 10 * sqrt(σ^2 /(2 * κx)), stop = 10 * sqrt(σ^2 /(2 * κx)), length = 1000) -μx = -κx .* x -σx = σ .* ones(length(x)) +X = OrnsteinUhlenbeck(;κ =κ, σ = σ, length = 1000) -## stationnary distribution -g = stationary_distribution(x, μx, σx) - ## Feynman-Kac -ψ = x.^2 +ψ = X.x.^2 t = range(0, stop = 100, step = 1/10) -𝔸 = generator(x, μx, σx) -u = feynman_kac_forward(x, μx, σx; t = t, ψ = ψ) -# Check results using exponential integrator. I could also use KrylovKit.exponentiate -@test maximum(abs, u[:, 50] .- expmv(t[50], 𝔸, ψ)) <= 1e-3 -@test maximum(abs, u[:, 200] .- expmv(t[200], 𝔸, ψ)) <= 1e-3 -@test maximum(abs, u[:, end] .- expmv(t[end], 𝔸, ψ)) <= 1e-5 -@test maximum(abs, feynman_kac_forward(x, μx, σx; t = t, ψ = ψ) .- feynman_kac_forward(x, μx, σx; t = collect(t), ψ = ψ)) <= 1e-5 +u = feynman_kac(X; t = t, ψ = ψ, direction = :forward) +@test maximum(abs, u[:, 50] .- expmv(t[50], generator(X), ψ)) <= 1e-3 +@test maximum(abs, u[:, 200] .- expmv(t[200], generator(X), ψ)) <= 1e-3 +@test maximum(abs, u[:, end] .- expmv(t[end], generator(X), ψ)) <= 1e-5 +@test maximum(abs, feynman_kac(X; t = t, ψ = ψ, direction = :forward) .- feynman_kac(X; t = collect(t), ψ = ψ, direction = :forward)) <= 1e-5 ## Multiplicative Functional dM/M = x dt -μM = x -σM = zeros(length(x)) -g, η, f = hansen_scheinkman(x, μx, σx, μM, σM) -@test η ≈ 0.5 * σ^2 / κx^2 atol = 1e-2 -@test maximum(abs, f ./ exp.(x ./ κx) .- mean(f ./ exp.(x ./ κx))) <= 1e-2 - -t = range(0, stop = 100, step = 1/10) -u = feynman_kac_forward(x, μx, σx, μM, σM; t = t) -@test log.(stationary_distribution(x, μx, σx)' * u[:, end]) ./ t[end] ≈ η atol = 1e-2 - - -## test left and right eigenvector -μM = x -σM = 0.01 * ones(length(x)) -g, η, f = hansen_scheinkman(x, μx, σx, μM, σM; ρ = 1, eigenvector = :both) -ψ = stationary_distribution(x, μx .+ σM .* σx, σx) -@test (f .* ψ) ./sum(f .* ψ) ≈ g rtol = 1e-3 - - - -## test left and right eigenvector -κx = 0.1 -σ = 0.02 -x = range(- 6 * sqrt(σ^2 /(2 * κx)), stop = 6 * sqrt(σ^2 /(2 * κx)), length = 400) -μx = -κx .* x -σx = σ .* ones(length(x)) -μM = -0.01 .+ x -σM = 0.1 .* ones(length(x)) -ζ = tail_index(x, μx, σx, μM, σM) -ζ_analytic = 2 * (0.01 + 0.1^2/2) / (0.1^2 + (σ / κx)^2) +M = MultiplicativeFunctional(X, X.x, zeros(length(X.x))) +l, η, r = hansen_scheinkman(M) +@test η ≈ 0.5 * σ^2 / κ^2 atol = 1e-2 +r_analytic = exp.(X.x ./ κ) +@test norm(r ./ sum(r) .- r_analytic ./ sum(r_analytic)) <= 2 * 1e-3 +t = range(0, stop = 200, step = 1/10) +u = feynman_kac(M; t = t, direction = :forward) +@test log.(stationary_distribution(X)' * u[:, end]) ./ t[end] ≈ η atol = 1e-2 + + +## test left and right eigenvector with correlation +M = MultiplicativeFunctional(X, X.x, 0.01 * ones(length(X.x)); ρ = 1) +l, η, r = hansen_scheinkman(M; eigenvector = :both) +ψ_tilde = stationary_distribution(MarkovProcess(X.x, X.μx .+ M.ρ .* M.σM .* X.σx, X.σx)) +@test (r .* ψ_tilde) ./ sum(r .* ψ_tilde) ≈ l rtol = 1e-3 + + +# Test Multiplicative with ρ = 0.0 +μM = -0.01 +σM = 0.1 +M = MultiplicativeFunctional(X, μM .+ X.x, σM .* ones(length(X.x))) +ζ = tail_index(M) +ζ_analytic = 2 * (-μM + σM^2/2) / (σM^2 + (σ / κ)^2) @test ζ ≈ ζ_analytic atol = 1e-2 -g, η, f = cgf_longrun(MultiplicativeFunctional(x, μx, σx, μM, σM), ζ; eigenvector = :both) +l, η, r = cgf_longrun(M; eigenvector = :both)(ζ) @test η ≈ 0.0 atol = 1e-5 -ψ = stationary_distribution(x, μx, σx) -@test (f .* ψ) ./ sum(f .* ψ) ≈ g rtol = 1e-3 +ψ = stationary_distribution(X) +@test (r .* ψ) ./ sum(r .* ψ) ≈ l rtol = 1e-3 -ρ = 1.0 -ζ = tail_index(x, μx, σx, μM, σM, ρ = ρ) -g, η, f = cgf_longrun(MultiplicativeFunctional(x, μx, σx, μM, σM; ρ = ρ), ζ; eigenvector = :both) -@test η ≈ 0.0 atol = 1e-5 -ψ = stationary_distribution(x, μx .+ ρ .* ζ .* σM .* σx, σx) -@test (f .* ψ) ./ sum(f .* ψ) ≈ g rtol = 1e-3 +# Test that the modified process μ + σ^2 ∂ ln(r) has a stationary distribution given by $r^2ψ$ +X = OrnsteinUhlenbeck(;κ =κ, σ = σ, length = 1000) +M = MultiplicativeFunctional(X, μM .+ X.x .- 0.02, σM .* ones(length(X.x))) +ψ = stationary_distribution(X) +ζ = tail_index(M) +l, η, r = cgf_longrun(M; eigenvector = :both)(ζ) +ψ_cond = stationary_distribution(MarkovProcess(X.x, X.μx .+ X.σx.^2 .* InfinitesimalGenerators.∂(X, log.(r)), X.σx)) +@test (r.^2 .* ψ) ./ sum(r.^2 .* ψ) ≈ ψ_cond rtol = 1e-1 + +# Test Multiplicative with ρ = 1.0 +M = MultiplicativeFunctional(X, M.μM, M.σM; ρ = 1.0) +ζ = tail_index(M) +l, η, r = cgf_longrun(M; eigenvector = :both)(ζ) +@test η ≈ 0.0 atol = 1e-5 +ψ_tilde = stationary_distribution(MarkovProcess(X.x, X.μx .+ ζ .* M.σM .* M.ρ .* X.σx , X.σx)) +@test (r .* ψ_tilde) ./ sum(r .* ψ_tilde) ≈ l rtol = 1e-3 +# Test CIR +gbar = 0.03 +σ = 0.01 +X = CoxIngersollRoss(xbar = gbar, κ = κ, σ = σ) +M = MultiplicativeFunctional(X, X.x, zeros(length(X))) +η_analytic = gbar * κ^2 / σ^2 * (1 - sqrt(1 - 2 * σ^2 / κ^2)) +@test cgf_longrun(M)(1.0)[2] ≈ η_analytic rtol = 1e-2 +# for CIR the speed is given by +# speed_analytic = - (g .- 0.009) + xbar * κ / sqrt(κ^2 - 2 * σ^2 * ζ) +# r, η, l = cgf_longrun(M, eigenvector = :both)(ζ) \ No newline at end of file