From e4e2d6f30dd34be5eaea4daa930f9beb1e66966c Mon Sep 17 00:00:00 2001 From: Stephan Hilb <stephan@ecshi.net> Date: Wed, 1 Jul 2020 17:06:43 +0200 Subject: [PATCH] rename variables --- src/DualTVDD.jl | 20 ++++++++++---------- src/chambolle.jl | 10 +++++----- src/dualtvdd.jl | 6 +++--- src/models.jl | 17 ++++++++++------- src/projgrad.jl | 4 ++-- test/runtests.jl | 4 ++-- 6 files changed, 32 insertions(+), 29 deletions(-) diff --git a/src/DualTVDD.jl b/src/DualTVDD.jl index 97475b9..ab2cac4 100644 --- a/src/DualTVDD.jl +++ b/src/DualTVDD.jl @@ -21,13 +21,13 @@ function run() B = 0.1 * rand(length(g), length(g)) B .+= diagm(ones(length(g))) - α = 0.025 + λ = 0.025 display(norm(B)) - md = DualTVDD.DualTVDDModel(g, B, α) + md = DualTVDD.DualTVL1ROFOpModel(g, B, λ) ctx = DualTVDD.init(md, DualTVDD.ChambolleAlgorithm()) - ctx2 = DualTVDD.init(md, DualTVDD.ProjGradAlgorithm(λ=1/sqrt(8)/norm(B))) + ctx2 = DualTVDD.init(md, DualTVDD.ProjGradAlgorithm(τ=1/sqrt(8)/norm(B))) #scene = vbox( # heatmap(ctx.s, colorrange=(0,1), colormap=:gray, scale_plot=false, show_axis=false), @@ -79,13 +79,13 @@ function rundd() g = similar(f) vec(g) .= A' * vec(f) - α = 1/4 + λ = 1/4 - md = DualTVDD.DualTVDDModel(f, A, α, 0., 0.) + md = DualTVDD.DualTVL1ROFOpModel(f, A, λ, 0., 0.) alg = DualTVDD.DualTVDDAlgorithm(M=(2,2), overlap=(2,2), σ=0.25) ctx = DualTVDD.init(md, alg) - md2 = DualTVDD.DualTVDDModel(g, B, α) + md2 = DualTVDD.DualTVL1ROFOpModel(g, B, λ) alg2 = DualTVDD.ChambolleAlgorithm() ctx2 = DualTVDD.init(md2, alg2) @@ -139,15 +139,15 @@ function run3() B = inv(A'*A + β*I) println(norm(A)) - α = 0.1 + λ = 0.1 # Chambolle - md = DualTVDD.DualTVDDModel(g, B, α) + md = DualTVDD.DualTVL1ROFOpModel(g, B, λ) alg = DualTVDD.ChambolleAlgorithm() ctx = DualTVDD.init(md, alg) # Projected Gradient - md = DualTVDD.DualTVDDModel(f, A, α, 0., 0.) + md = DualTVDD.DualTVL1ROFOpModel(f, A, λ, 0., 0.) alg = DualTVDD.ProjGradAlgorithm(λ = 1/norm(A)^2) ctx2 = DualTVDD.init(md, alg) @@ -205,7 +205,7 @@ function energy(ctx::ChambolleContext) end -function energy(md::DualTVDDModel, u::AbstractMatrix) +function energy(md::DualTVL1ROFOpModel, u::AbstractMatrix) @inline kf(w) = @inbounds 1/2 * (w[0,0] - md.g[w.position])^2 + md.λ * sqrt((w[1,0] - w[0,0])^2 + (w[0,1] - w[0,0])^2) k = Kernel{(0:1, 0:1)}(kf, StaticKernels.ExtensionReplicate()) diff --git a/src/chambolle.jl b/src/chambolle.jl index bd165c5..5d5b446 100644 --- a/src/chambolle.jl +++ b/src/chambolle.jl @@ -4,8 +4,8 @@ # https://doi.org/10.1023/B:JMIV.0000011325.36760.1e # # Implementation Notes: -# - λ is called α and is not a scalar but a scalar field -# - pointwise feasibility constraint |p|<=α instead of |p|<=1 +# - λ is called λ and is not a scalar but a scalar field +# - pointwise feasibility constraint |p|<=λ instead of |p|<=1 # - B is introduced, the original Chambolle algorithm has B=Id using Base: @_inline_meta @@ -45,7 +45,7 @@ struct ChambolleContext{M,A,T,R,S,Sv,K1,K2} <: Context k2::K2 end -function init(md::DualTVDDModel, alg::ChambolleAlgorithm) +function init(md::DualTVL1ROFOpModel, alg::ChambolleAlgorithm) d = ndims(md.g) pv = zeros(d * length(md.g)) rv = zeros(length(md.g)) @@ -63,9 +63,9 @@ function init(md::DualTVDDModel, alg::ChambolleAlgorithm) normB = my_norm(md.B) @inline function kf2(sw) - @inbounds iszero(md.α[sw.position]) && return zero(p[sw.position]) + @inbounds iszero(md.λ[sw.position]) && return zero(p[sw.position]) sgrad = (alg.τ / normB) * gradient(sw) - return @inbounds (p[sw.position] + sgrad) / (1 + norm(sgrad) / md.α[sw.position]) + return @inbounds (p[sw.position] + sgrad) / (1 + norm(sgrad) / md.λ[sw.position]) end k2 = Kernel{ntuple(_->0:1, d)}(kf2) diff --git a/src/dualtvdd.jl b/src/dualtvdd.jl index de7d1b0..7077bf1 100644 --- a/src/dualtvdd.jl +++ b/src/dualtvdd.jl @@ -29,7 +29,7 @@ struct DualTVDDContext{M,A,G,d,U,V,Vtmp,VV,SAx,SC} subctx::Array{SC,d} end -function init(md::DualTVDDModel, alg::DualTVDDAlgorithm) +function init(md::DualTVL1ROFOpModel, alg::DualTVDDAlgorithm) d = ndims(md.f) ax = axes(md.f) @@ -38,7 +38,7 @@ function init(md::DualTVDDModel, alg::DualTVDDAlgorithm) # preallocated data for subproblems subg = [Array{Float64, d}(undef, length.(ax)) for i in CartesianIndices(subax)] # locally dependent tv parameter - subα = [md.α .* theta.(Ref(ax), Ref(subax[i]), Ref(alg.overlap), CartesianIndices(ax)) + subλ = [md.λ .* theta.(Ref(ax), Ref(subax[i]), Ref(alg.overlap), CartesianIndices(ax)) for i in CartesianIndices(subax)] # this is the global g, the local gs are getting initialized in step!() @@ -54,7 +54,7 @@ function init(md::DualTVDDModel, alg::DualTVDDAlgorithm) # create subproblem contexts li = LinearIndices(size(md.f)) - submds = [DualTVDDModel(subg[i], B, subα[i]) + submds = [DualTVL1ROFOpModel(subg[i], B, subλ[i]) for i in CartesianIndices(subax)] subalg = ProjGradAlgorithm(λ=1/sqrt(8)/norm(B)) diff --git a/src/models.jl b/src/models.jl index f88fb66..c13bbc0 100644 --- a/src/models.jl +++ b/src/models.jl @@ -1,17 +1,20 @@ -"min_p 1/2 * |-div(p)-g|_B^2 + χ_{|p|_2<α}" -struct DualROFOpModel{Tg,TB,Tα} <: Model +#"min_u 1/2 * |Au-f|_2^2 + λ*TV(u) + β/2 |u|_2 + γ |u|_1" +"min_p 1/2 * |p₂ - div(p₂) - g|_B^2 + χ_{|p₁|≤λ} + χ_{|p₂|≤γ}" +struct DualTVL1ROFOpModel{Tg,TB,Tλ,Tγ} <: Model "dual data" g::Tg "B operator" B::TB - "total variation parameter" - α::Tα + "TV regularization parameter" + λ::Tλ + "L1 regularization parameter" + γ::Tγ end -DualTVDDModel(g, B, α) = DualTVDDModel(g, B, α, nothing) -DualTVDDModel(g, B, α::Real) = DualTVDDModel(g, B, fill!(similar(g), α)) +DualTVL1ROFOpModel(g, B, λ) = DualTVL1ROFOpModel(g, B, λ, nothing) +DualTVL1ROFOpModel(g, B, λ::Real) = DualTVL1ROFOpModel(g, B, fill!(similar(g), λ)) -function recover_u(p, md::DualTVDDModel) +function recover_u(p, md::DualTVL1ROFOpModel) d = ndims(md.g) u = similar(md.g) v = similar(md.g) diff --git a/src/projgrad.jl b/src/projgrad.jl index 07cfa26..6071fbf 100644 --- a/src/projgrad.jl +++ b/src/projgrad.jl @@ -26,7 +26,7 @@ struct ProjGradContext{M,A,Tp,Wv,R,S,K1,K2} k2::K2 end -function init(md::DualTVDDModel, alg::ProjGradAlgorithm) +function init(md::DualTVL1ROFOpModel, alg::ProjGradAlgorithm) d = ndims(md.g) ax = axes(md.g) @@ -46,7 +46,7 @@ function init(md::DualTVDDModel, alg::ProjGradAlgorithm) @inline function kf2(pw, sw) @inbounds q = pw[z] - alg.τ * gradient(sw) - return @inbounds q / max(norm(q) / md.α[sw.position], 1) + return @inbounds q / max(norm(q) / md.λ[sw.position], 1) end k2 = Kernel{ntuple(_->0:1, d)}(kf2) diff --git a/test/runtests.jl b/test/runtests.jl index bdc0181..a540d6c 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -1,11 +1,11 @@ using Test using LinearAlgebra using DualTVDD: - DualTVDDModel, ProjGradAlgorithm, ChambolleAlgorithm, + DualTVL1ROFOpModel, ProjGradAlgorithm, ChambolleAlgorithm, init, step!, recover_u g = Float64[0 2; 1 0] -md = DualTVDDModel(g, I, 1e-10) +md = DualTVL1ROFOpModel(g, I, 1e-10) for alg in (ProjGradAlgorithm(τ=1/8), ChambolleAlgorithm()) ctx = init(md, alg) -- GitLab