diff --git a/src/DualTVDD.jl b/src/DualTVDD.jl index 97475b975aa7b2bbdb6abd3290f3178d39a13476..ab2cac450f4438d47d5f25984f7de20647e37483 100644 --- a/src/DualTVDD.jl +++ b/src/DualTVDD.jl @@ -21,13 +21,13 @@ function run() B = 0.1 * rand(length(g), length(g)) B .+= diagm(ones(length(g))) - α = 0.025 + λ = 0.025 display(norm(B)) - md = DualTVDD.DualTVDDModel(g, B, α) + md = DualTVDD.DualTVL1ROFOpModel(g, B, λ) ctx = DualTVDD.init(md, DualTVDD.ChambolleAlgorithm()) - ctx2 = DualTVDD.init(md, DualTVDD.ProjGradAlgorithm(λ=1/sqrt(8)/norm(B))) + ctx2 = DualTVDD.init(md, DualTVDD.ProjGradAlgorithm(τ=1/sqrt(8)/norm(B))) #scene = vbox( # heatmap(ctx.s, colorrange=(0,1), colormap=:gray, scale_plot=false, show_axis=false), @@ -79,13 +79,13 @@ function rundd() g = similar(f) vec(g) .= A' * vec(f) - α = 1/4 + λ = 1/4 - md = DualTVDD.DualTVDDModel(f, A, α, 0., 0.) + md = DualTVDD.DualTVL1ROFOpModel(f, A, λ, 0., 0.) alg = DualTVDD.DualTVDDAlgorithm(M=(2,2), overlap=(2,2), σ=0.25) ctx = DualTVDD.init(md, alg) - md2 = DualTVDD.DualTVDDModel(g, B, α) + md2 = DualTVDD.DualTVL1ROFOpModel(g, B, λ) alg2 = DualTVDD.ChambolleAlgorithm() ctx2 = DualTVDD.init(md2, alg2) @@ -139,15 +139,15 @@ function run3() B = inv(A'*A + β*I) println(norm(A)) - α = 0.1 + λ = 0.1 # Chambolle - md = DualTVDD.DualTVDDModel(g, B, α) + md = DualTVDD.DualTVL1ROFOpModel(g, B, λ) alg = DualTVDD.ChambolleAlgorithm() ctx = DualTVDD.init(md, alg) # Projected Gradient - md = DualTVDD.DualTVDDModel(f, A, α, 0., 0.) + md = DualTVDD.DualTVL1ROFOpModel(f, A, λ, 0., 0.) alg = DualTVDD.ProjGradAlgorithm(λ = 1/norm(A)^2) ctx2 = DualTVDD.init(md, alg) @@ -205,7 +205,7 @@ function energy(ctx::ChambolleContext) end -function energy(md::DualTVDDModel, u::AbstractMatrix) +function energy(md::DualTVL1ROFOpModel, u::AbstractMatrix) @inline kf(w) = @inbounds 1/2 * (w[0,0] - md.g[w.position])^2 + md.λ * sqrt((w[1,0] - w[0,0])^2 + (w[0,1] - w[0,0])^2) k = Kernel{(0:1, 0:1)}(kf, StaticKernels.ExtensionReplicate()) diff --git a/src/chambolle.jl b/src/chambolle.jl index bd165c5a488c0b2efc585fdd8b591ce4d46a3628..5d5b44622ad9da18b80c756f6ba92543ee8b66c7 100644 --- a/src/chambolle.jl +++ b/src/chambolle.jl @@ -4,8 +4,8 @@ # https://doi.org/10.1023/B:JMIV.0000011325.36760.1e # # Implementation Notes: -# - λ is called α and is not a scalar but a scalar field -# - pointwise feasibility constraint |p|<=α instead of |p|<=1 +# - λ is called λ and is not a scalar but a scalar field +# - pointwise feasibility constraint |p|<=λ instead of |p|<=1 # - B is introduced, the original Chambolle algorithm has B=Id using Base: @_inline_meta @@ -45,7 +45,7 @@ struct ChambolleContext{M,A,T,R,S,Sv,K1,K2} <: Context k2::K2 end -function init(md::DualTVDDModel, alg::ChambolleAlgorithm) +function init(md::DualTVL1ROFOpModel, alg::ChambolleAlgorithm) d = ndims(md.g) pv = zeros(d * length(md.g)) rv = zeros(length(md.g)) @@ -63,9 +63,9 @@ function init(md::DualTVDDModel, alg::ChambolleAlgorithm) normB = my_norm(md.B) @inline function kf2(sw) - @inbounds iszero(md.α[sw.position]) && return zero(p[sw.position]) + @inbounds iszero(md.λ[sw.position]) && return zero(p[sw.position]) sgrad = (alg.τ / normB) * gradient(sw) - return @inbounds (p[sw.position] + sgrad) / (1 + norm(sgrad) / md.α[sw.position]) + return @inbounds (p[sw.position] + sgrad) / (1 + norm(sgrad) / md.λ[sw.position]) end k2 = Kernel{ntuple(_->0:1, d)}(kf2) diff --git a/src/dualtvdd.jl b/src/dualtvdd.jl index de7d1b041b93e5917f56ac03954800704f01a057..7077bf1505e4ec8e3145349a8838e00bb0c87de4 100644 --- a/src/dualtvdd.jl +++ b/src/dualtvdd.jl @@ -29,7 +29,7 @@ struct DualTVDDContext{M,A,G,d,U,V,Vtmp,VV,SAx,SC} subctx::Array{SC,d} end -function init(md::DualTVDDModel, alg::DualTVDDAlgorithm) +function init(md::DualTVL1ROFOpModel, alg::DualTVDDAlgorithm) d = ndims(md.f) ax = axes(md.f) @@ -38,7 +38,7 @@ function init(md::DualTVDDModel, alg::DualTVDDAlgorithm) # preallocated data for subproblems subg = [Array{Float64, d}(undef, length.(ax)) for i in CartesianIndices(subax)] # locally dependent tv parameter - subα = [md.α .* theta.(Ref(ax), Ref(subax[i]), Ref(alg.overlap), CartesianIndices(ax)) + subλ = [md.λ .* theta.(Ref(ax), Ref(subax[i]), Ref(alg.overlap), CartesianIndices(ax)) for i in CartesianIndices(subax)] # this is the global g, the local gs are getting initialized in step!() @@ -54,7 +54,7 @@ function init(md::DualTVDDModel, alg::DualTVDDAlgorithm) # create subproblem contexts li = LinearIndices(size(md.f)) - submds = [DualTVDDModel(subg[i], B, subα[i]) + submds = [DualTVL1ROFOpModel(subg[i], B, subλ[i]) for i in CartesianIndices(subax)] subalg = ProjGradAlgorithm(λ=1/sqrt(8)/norm(B)) diff --git a/src/models.jl b/src/models.jl index f88fb66045d9283e16d34a33757a3c0c3e0ab1d9..c13bbc0f480fd08d744c8baa95afafa5526f142d 100644 --- a/src/models.jl +++ b/src/models.jl @@ -1,17 +1,20 @@ -"min_p 1/2 * |-div(p)-g|_B^2 + χ_{|p|_2<α}" -struct DualROFOpModel{Tg,TB,Tα} <: Model +#"min_u 1/2 * |Au-f|_2^2 + λ*TV(u) + β/2 |u|_2 + γ |u|_1" +"min_p 1/2 * |p₂ - div(p₂) - g|_B^2 + χ_{|p₁|≤λ} + χ_{|p₂|≤γ}" +struct DualTVL1ROFOpModel{Tg,TB,Tλ,Tγ} <: Model "dual data" g::Tg "B operator" B::TB - "total variation parameter" - α::Tα + "TV regularization parameter" + λ::Tλ + "L1 regularization parameter" + γ::Tγ end -DualTVDDModel(g, B, α) = DualTVDDModel(g, B, α, nothing) -DualTVDDModel(g, B, α::Real) = DualTVDDModel(g, B, fill!(similar(g), α)) +DualTVL1ROFOpModel(g, B, λ) = DualTVL1ROFOpModel(g, B, λ, nothing) +DualTVL1ROFOpModel(g, B, λ::Real) = DualTVL1ROFOpModel(g, B, fill!(similar(g), λ)) -function recover_u(p, md::DualTVDDModel) +function recover_u(p, md::DualTVL1ROFOpModel) d = ndims(md.g) u = similar(md.g) v = similar(md.g) diff --git a/src/projgrad.jl b/src/projgrad.jl index 07cfa262f643aab2635807fbba7259d7a6e5cf43..6071fbf01a23a969a3ec08e56f45275006836873 100644 --- a/src/projgrad.jl +++ b/src/projgrad.jl @@ -26,7 +26,7 @@ struct ProjGradContext{M,A,Tp,Wv,R,S,K1,K2} k2::K2 end -function init(md::DualTVDDModel, alg::ProjGradAlgorithm) +function init(md::DualTVL1ROFOpModel, alg::ProjGradAlgorithm) d = ndims(md.g) ax = axes(md.g) @@ -46,7 +46,7 @@ function init(md::DualTVDDModel, alg::ProjGradAlgorithm) @inline function kf2(pw, sw) @inbounds q = pw[z] - alg.τ * gradient(sw) - return @inbounds q / max(norm(q) / md.α[sw.position], 1) + return @inbounds q / max(norm(q) / md.λ[sw.position], 1) end k2 = Kernel{ntuple(_->0:1, d)}(kf2) diff --git a/test/runtests.jl b/test/runtests.jl index bdc0181ffd97881da7425ffaf13d95dd1d3ed33f..a540d6cfe9c3d9dba38a559aa3fa4e485f518f21 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,11 +1,11 @@ using Test using LinearAlgebra using DualTVDD: - DualTVDDModel, ProjGradAlgorithm, ChambolleAlgorithm, + DualTVL1ROFOpModel, ProjGradAlgorithm, ChambolleAlgorithm, init, step!, recover_u g = Float64[0 2; 1 0] -md = DualTVDDModel(g, I, 1e-10) +md = DualTVL1ROFOpModel(g, I, 1e-10) for alg in (ProjGradAlgorithm(τ=1/8), ChambolleAlgorithm()) ctx = init(md, alg)