Skip to content
Snippets Groups Projects
Commit ce1fb5d8 authored by Stephan Hilb's avatar Stephan Hilb
Browse files

implement inpainting and optical flow

parent 2271b59a
No related branches found
No related tags found
No related merge requests found
data/
module DualTVDD
export init, step!, fetch, run
include("common.jl")
include("problems.jl")
include("chambolle.jl")
include("dualtvdd.jl")
include("projgrad.jl")
using Plots
function run()
#g = [0. 2; 1 0.]
g = rand(10,10)
#g[4:17,4:17] .= 1
#g[:size(g, 1)÷2,:] .= 1
#g = [0. 2; 1 0.]
display(g)
B = 0.1 * rand(length(g), length(g))
B .+= diagm(ones(length(g)))
λ = 0.025
display(norm(B))
prob = DualTVDD.DualTVL1ROFOpProblem(g, B, λ)
ctx = DualTVDD.init(DualTVDD.ChambolleAlgorithm(prob))
ctx2 = DualTVDD.init(DualTVDD.ProjGradAlgorithm(prob, τ=1/sqrt(8)/norm(B)))
#scene = vbox(
# heatmap(ctx.s, colorrange=(0,1), colormap=:gray, scale_plot=false, show_axis=false),
# heatmap(ctx.s, colorrange=(0,1), colormap=:gray, scale_plot=false, show_axis=false),
# )
#display(scene)
#hm = last(scene)
for i in 1:10000
step!(ctx)
step!(ctx2)
#hm[1] = ctx.s
#yield()
#sleep(0.2)
end
display(copy(ctx.p))
#display(copy(recover_u!(ctx)))
println(energy(ctx))
println()
display(copy(ctx2.p))
#display(copy(recover_u!(ctx2)))
println(energy(ctx2))
ctx, ctx2
#hm[1] = ctx.s
#yield()
end
function alg_energy(alg, niter)
print("run ...")
res = Float64[]
(p, ctx) = iterate(alg)
push!(res, energy(ctx))
for i in 1:niter
(p, ctx) = iterate(alg, ctx)
push!(res, energy(ctx))
end
println(" finished")
return res
end
function alg_error(alg, pmin, niter, ninner=1)
print("run ...")
res = Float64[]
ctx = init(alg)
push!(res, error(fetch(ctx), pmin, ctx.algorithm.problem))
for i in 1:niter
for j in 1:ninner
step!(ctx)
end
push!(res, error(fetch(ctx), pmin, ctx.algorithm.problem))
end
println(" finished")
return res
end
function calc_energy(prob, niter)
alg_ref = DualTVDD.ChambolleAlgorithm(prob)
ctx = init(alg_ref)
for i in 1:niter
ctx = step!(ctx)
end
return energy(ctx), fetch(ctx)
end
function rundd()
λ = 2.5
β = 0
#f = zeros(100,100)
#f[1] = 1
#f = [0. 2; 1 0.]
#A = 0. * rand(length(f), length(f))
#A .+= diagm(ones(length(f)))
#B = inv(A'*A + β*I)
#g = similar(f)
#vec(g) .= A' * vec(f)
g = rand(50, 50)
prob = DualTVDD.DualTVL1ROFOpProblem(g, I, λ)
alg_ref = DualTVDD.ChambolleAlgorithm(prob)
alg_dd = DualTVDD.DualTVDDAlgorithm(prob, M=(2,2), overlap=(4,4))
alg_dd2 = DualTVDD.DualTVDDAlgorithm(prob, M=(2,2), overlap=(4,4), parallel=false)
n = 1000
ref_energy, pmin = calc_energy(prob, 100000)
lognan(x) = x > 0 ? x : NaN
#y = [
# lognan.(alg_energy(alg_ref, n) .- ref_energy),
# lognan.(alg_energy(alg_dd, n) .- ref_energy),
# lognan.(alg_energy(alg_dd2, n) .- ref_energy),
# ]
y = [
lognan.(alg_error(alg_ref, pmin, n, 10)),
lognan.(alg_error(alg_dd, pmin, n)),
lognan.(alg_error(alg_dd2, pmin, n)),
]
plt = plot(y, xaxis=:log, yaxis=:log)
display(plt)
#display(energy(ctx))
#display(ctx.p)
#display(recover_u(p, prob))
#println(energy(ctx))
#println(energy(ctx2))
end
function run3()
f = rand(20,20)
A = 0.1*rand(length(f), length(f))
A .+= diagm(ones(length(f)))
g = reshape(A'*vec(f), size(f))
β = 0
B = inv(A'*A + β*I)
println(norm(A))
λ = 0.1
# Chambolle
md = DualTVDD.DualTVL1ROFOpProblem(g, B, λ)
alg = DualTVDD.ChambolleAlgorithm()
ctx = DualTVDD.init(md, alg)
# Projected Gradient
md = DualTVDD.DualTVL1ROFOpProblem(f, A, λ, 0., 0.)
alg = DualTVDD.ProjGradAlgorithm(λ = 1/norm(A)^2)
ctx2 = DualTVDD.init(md, alg)
for i in 1:100000
step!(ctx)
step!(ctx2)
end
#display(ctx.p)
#display(ctx2.p)
display(recover_u!(ctx))
display(recover_u!(ctx2))
println(energy(ctx))
println(energy(ctx2))
ctx, ctx2
end
function energy(ctx::Union{DualTVDDState,ProjGradState,ChambolleState})
return energy(ctx.p, ctx.algorithm.problem)
end
function error(p, pmin, prob::DualTVL1ROFOpProblem)
return energy_norm(p .- pmin, prob) / energy(pmin, prob)
end
function energy(p, prob::DualTVL1ROFOpProblem)
d = ndims(p)
@inline kfΛ(w) = @inbounds divergence(w)
= Kernel{ntuple(_->-1:1, d)}(kfΛ)
v = similar(prob.g)
# v = div(p) + g
map!(, v, extend(p, StaticKernels.ExtensionNothing()))
v .+= prob.g
#display(v)
# |v|_B^2 / 2
u = prob.B * vec(v)
return sum(u .* vec(v)) / 2
end
function energy_norm(p, prob::DualTVL1ROFOpProblem)
d = ndims(p)
@inline kfΛ(w) = @inbounds divergence(w)
= Kernel{ntuple(_->-1:1, d)}(kfΛ)
v = similar(prob.g)
# v = div(p)
map!(, v, extend(p, StaticKernels.ExtensionNothing()))
#display(v)
# |v|_B^2 / 2
u = prob.B * vec(v)
return sum(u .* vec(v)) / 2
end
function energy(md::DualTVL1ROFOpProblem, u::AbstractMatrix)
@inline kf(w) = @inbounds 1/2 * (w[0,0] - md.g[w.position])^2 +
md.λ * sqrt((w[1,0] - w[0,0])^2 + (w[0,1] - w[0,0])^2)
k = Kernel{(0:1, 0:1)}(kf, StaticKernels.ExtensionReplicate())
return sum(k, u)
end
#using Plots: heatmap
#
#plotimage(u) = heatmap(u, c=:grayC, legend=:none, framestyle=:none, aspect_ratio=1)
#
#function alg_energy(alg, niter)
# print("run ...")
# res = Float64[]
# (p, ctx) = iterate(alg)
# push!(res, energy(ctx))
# for i in 1:niter
# (p, ctx) = iterate(alg, ctx)
# push!(res, energy(ctx))
# end
# println(" finished")
# return res
#end
#
#function alg_error(alg, pmin, niter, ninner=1)
# print("run ...")
# res = Float64[]
# ctx = init(alg)
# push!(res, error(fetch(ctx), pmin, ctx.algorithm.problem))
# for i in 1:niter
# for j in 1:ninner
# step!(ctx)
# end
# push!(res, error(fetch(ctx), pmin, ctx.algorithm.problem))
# end
# println(" finished")
# return res
#end
#
#function calc_energy(prob, niter)
# alg_ref = DualTVDD.ChambolleAlgorithm(prob)
# ctx = init(alg_ref)
# for i in 1:niter
# ctx = step!(ctx)
# end
# return energy(ctx), fetch(ctx)
#end
#
#function rundd()
# λ = 2.5
# β = 0
# #f = zeros(100,100)
# #f[1] = 1
# #f = [0. 2; 1 0.]
#
#
# #A = 0. * rand(length(f), length(f))
# #A .+= diagm(ones(length(f)))
# #B = inv(A'*A + β*I)
#
# #g = similar(f)
# #vec(g) .= A' * vec(f)
#
# g = rand(50, 50)
#
# prob = DualTVDD.DualTVL1ROFOpProblem(g, I, λ)
#
# alg_ref = DualTVDD.ChambolleAlgorithm(prob)
# alg_dd = DualTVDD.DualTVDDAlgorithm(prob, M=(2,2), overlap=(4,4))
# alg_dd2 = DualTVDD.DualTVDDAlgorithm(prob, M=(2,2), overlap=(4,4), parallel=false)
#
# n = 1000
#
# ref_energy, pmin = calc_energy(prob, 100000)
#
# lognan(x) = x > 0 ? x : NaN
#
# #y = [
# # lognan.(alg_energy(alg_ref, n) .- ref_energy),
# # lognan.(alg_energy(alg_dd, n) .- ref_energy),
# # lognan.(alg_energy(alg_dd2, n) .- ref_energy),
# # ]
# y = [
# lognan.(alg_error(alg_ref, pmin, n, 10)),
# lognan.(alg_error(alg_dd, pmin, n)),
# lognan.(alg_error(alg_dd2, pmin, n)),
# ]
#
# plt = plot(y, xaxis=:log, yaxis=:log)
# display(plt)
#
# #display(energy(ctx))
# #display(ctx.p)
# #display(recover_u(p, prob))
#
# #println(energy(ctx))
# #println(energy(ctx2))
#end
#
#function run3()
# f = rand(20,20)
# A = 0.1*rand(length(f), length(f))
# A .+= diagm(ones(length(f)))
#
# g = reshape(A'*vec(f), size(f))
#
# β = 0
# B = inv(A'*A + β*I)
# println(norm(A))
#
# λ = 0.1
#
# # Chambolle
# md = DualTVDD.DualTVL1ROFOpProblem(g, B, λ)
# alg = DualTVDD.ChambolleAlgorithm()
# ctx = DualTVDD.init(md, alg)
#
# # Projected Gradient
# md = DualTVDD.DualTVL1ROFOpProblem(f, A, λ, 0., 0.)
# alg = DualTVDD.ProjGradAlgorithm(λ = 1/norm(A)^2)
# ctx2 = DualTVDD.init(md, alg)
#
# for i in 1:100000
# step!(ctx)
# step!(ctx2)
# end
#
# #display(ctx.p)
# #display(ctx2.p)
# display(recover_u!(ctx))
# display(recover_u!(ctx2))
#
# println(energy(ctx))
# println(energy(ctx2))
#
# ctx, ctx2
#end
#
#function energy(ctx::Union{DualTVDDState,ProjGradState,ChambolleState})
# return energy(ctx.p, ctx.algorithm.problem)
#end
#
#function error(p, pmin, prob::DualTVL1ROFOpProblem)
# return energy_norm(p .- pmin, prob) / energy(pmin, prob)
#end
#
#function energy(p, prob::DualTVL1ROFOpProblem)
# d = ndims(p)
#
# @inline kfΛ(w) = @inbounds divergence(w)
# kΛ = Kernel{ntuple(_->-1:1, d)}(kfΛ)
#
# v = similar(prob.g)
#
# # v = div(p) + g
# map!(kΛ, v, extend(p, StaticKernels.ExtensionNothing()))
# v .+= prob.g
# #display(v)
#
# # |v|_B^2 / 2
# u = prob.B * vec(v)
# return sum(u .* vec(v)) / 2
#end
#
#function energy_norm(p, prob::DualTVL1ROFOpProblem)
# d = ndims(p)
#
# @inline kfΛ(w) = @inbounds divergence(w)
# kΛ = Kernel{ntuple(_->-1:1, d)}(kfΛ)
#
# v = similar(prob.g)
#
# # v = div(p)
# map!(kΛ, v, extend(p, StaticKernels.ExtensionNothing()))
# #display(v)
#
# # |v|_B^2 / 2
# u = prob.B * vec(v)
# return sum(u .* vec(v)) / 2
#end
#
#
#function energy(md::DualTVL1ROFOpProblem, u::AbstractMatrix)
# @inline kf(w) = @inbounds 1/2 * (w[0,0] - md.g[w.position])^2 +
# md.λ * sqrt((w[1,0] - w[0,0])^2 + (w[0,1] - w[0,0])^2)
# k = Kernel{(0:1, 0:1)}(kf, StaticKernels.ExtensionReplicate())
# return sum(k, u)
#end
end # module
......@@ -48,18 +48,25 @@ struct ChambolleState{A,T,R,S,Sv,K1,K2} <: State
k2::K2
end
projnorm(v, anisotropic) = anisotropic ? abs.(v) : norm(v)
projnorm(v) = projnorm(v, false)
projnorm(v, anisotropic) = anisotropic ? abs.(v) : norm(vec(v))
const SVectorNest = SArray{<:Any,<:SArray{<:Any,Float64,1},1}
# TODO: respect anisotropic
#@inline projnorm(v::SVectorNest, _) = norm(norm.(v))
@inline projnorm(v::SVectorNest, _) = sqrt(sum(sum(v[i] .^ 2) for i in eachindex(v)))
function init(alg::ChambolleAlgorithm{<:DualTVL1ROFOpProblem})
g = alg.problem.g
λ = alg.problem.λ
d = ndims(g)
pv = zeros(d * length(g))
rv = zeros(length(g))
pv = zeros(d * length(reinterpret(Float64, g)))
rv = zeros(eltype(g), length(g))
sv = zero(rv)
p = extend(reshape(reinterpret(SVector{d,Float64}, pv), size(g)), StaticKernels.ExtensionNothing())
r = reshape(reinterpret(Float64, rv), size(g))
s = extend(reshape(reinterpret(Float64, sv), size(g)), StaticKernels.ExtensionReplicate())
p = extend(reshape(reinterpret(SVector{d,eltype(g)}, pv), size(g)), StaticKernels.ExtensionNothing())
r = reshape(rv, size(g))
s = extend(reshape(sv, size(g)), StaticKernels.ExtensionReplicate())
@inline kf1(pw) = @inbounds divergence(pw) + g[pw.position]
k1 = Kernel{ntuple(_->-1:1, d)}(kf1)
......@@ -78,6 +85,13 @@ function reset!(ctx::ChambolleState)
fill!(ctx.p, zero(eltype(ctx.p)))
end
# special case for diagonal ops
function LinearAlgebra.mul!(Y::AbstractVector{<:SVector}, A, B::AbstractVector{<:SVector})
for i in eachindex(Y)
Y[i] = A[i,i] * B[i]
end
end
function step!(ctx::ChambolleState)
alg = ctx.algorithm
# r = div(p) + g
......
......@@ -5,7 +5,7 @@ using StaticKernels
<:Problem
The abstract type hierarchy specifies the problem model interface (problem,
available data, available oracles).
available data, available oracles, solution form).
"""
abstract type Problem end
......
......@@ -37,15 +37,15 @@ function init(alg::DualTVDDAlgorithm{<:DualTVL1ROFOpProblem})
# subdomain axes
subax = subaxes(size(g), alg.M, alg.overlap)
# preallocated data for subproblems
subg = [Array{Float64, d}(undef, length.(x)) for x in subax]
subg = [Array{eltype(g), d}(undef, length.(x)) for x in subax]
# locally dependent tv parameter
subλ = [alg.problem.λ[subax[i]...] .* theta.(Ref(ax), Ref(subax[i]), Ref(alg.overlap), CartesianIndices(subax[i]))
for i in CartesianIndices(subax)]
# global dual variable
p = zeros(SVector{d,Float64}, size(g))
p = zeros(SVector{d,eltype(g)}, size(g))
# local buffer variables
q = [extend(zeros(SVector{d,Float64}, length.(x)), StaticKernels.ExtensionNothing()) for x in subax]
q = [extend(zeros(SVector{d,eltype(g)}, length.(x)), StaticKernels.ExtensionNothing()) for x in subax]
# create subproblem contexts
li = LinearIndices(ax)
......@@ -132,6 +132,11 @@ fetch(ctx::DualTVDDState) = ctx.p
end
op_restrict(B::UniformScaling, ax, sax) = B
function op_restrict(B::Diagonal, ax, sax)
Bdiag = diag(B)
li = LinearIndices(ax)
return Diagonal(vec(collect(Bdiag[li[i]] for i in CartesianIndices(sax))))
end
function op_restrict(B, ax, sax)
li = LinearIndices(ax)
si = vec(li[sax...])
......
using LinearAlgebra: UniformScaling
using LinearAlgebra: UniformScaling, Diagonal
#"min_u 1/2 * |Au-f|_2^2 + λ*TV(u) + β/2 |u|_2 + γ |u|_1"
"min_p 1/2 * |p₂ - div(p₁) - g|_B^2 + χ_{|p₁|≤λ} + χ_{|p₂|≤γ}"
......@@ -14,7 +14,8 @@ struct DualTVL1ROFOpProblem{Tg,TB,Tλ,Tγ} <: Problem
end
DualTVL1ROFOpProblem(g, B, λ) = DualTVL1ROFOpProblem(g, B, λ, nothing)
DualTVL1ROFOpProblem(g, B, λ::Real) = DualTVL1ROFOpProblem(g, B, fill!(similar(g), λ))
DualTVL1ROFOpProblem(g, B, λ::Real) = DualTVL1ROFOpProblem(g, B, fill!(similar(g, typeof(λ)), λ))
normB(p::DualTVL1ROFOpProblem{<:Any,<:UniformScaling}) = p.B.λ * sqrt(length(p.g))
normB(p::DualTVL1ROFOpProblem) = norm(p.B)
normB(p::DualTVL1ROFOpProblem{<:Any,<:Diagonal}) = sqrt(sum(norm.(diag(p.B)) .^ 2))
......@@ -32,26 +32,23 @@ end
function init(alg::ProjGradAlgorithm{<:DualTVL1ROFOpProblem})
g = alg.problem.g
λ = alg.problem.λ
d = ndims(g)
ax = axes(g)
p = extend(zeros(SVector{d,Float64}, ax), StaticKernels.ExtensionNothing())
ge = extend(g, StaticKernels.ExtensionNothing())
pv = zeros(d * length(reinterpret(Float64, g)))
rv = zeros(eltype(g), length(g))
sv = zero(rv)
rv = zeros(length(g))
sv = zeros(length(g))
p = extend(reshape(reinterpret(SVector{d,eltype(g)}, pv), size(g)), StaticKernels.ExtensionNothing())
r = reshape(rv, size(g))
s = extend(reshape(sv, size(g)), StaticKernels.ExtensionReplicate())
r = reshape(rv, ax)
s = extend(reshape(sv, ax), StaticKernels.ExtensionReplicate())
z = zero(CartesianIndex{d})
@inline kf1(pw) = @inbounds -divergence(pw) - ge[pw.position]
@inline kf1(pw) = @inbounds -divergence(pw) - g[pw.position]
k1 = Kernel{ntuple(_->-1:1, d)}(kf1)
@inline function kf2(pw, sw)
@inbounds q = pw[z] - alg.τ * gradient(sw)
return @inbounds q / max(norm(q) / alg.problem.λ[sw.position], 1)
@inbounds q = p[sw.position] - alg.τ * gradient(sw)
return @inbounds q ./ max.(projnorm(q) ./ λ[sw.position], 1)
end
k2 = Kernel{ntuple(_->0:1, d)}(kf2)
......
using Revise
Revise.track(@__FILE__)
using Colors: HSV
using DualTVDD:
ChambolleAlgorithm, ProjGradAlgorithm,
init, step!, fetch, recover_u
using FileIO
using Plots
function InpaintProblem(img::AbstractArray{T,d}, imgmask::AbstractArray{Bool,d}, λ, β = 1e-5) where {T,d}
Bval = inv.(imgmask .+ β .* Ref(I))
B = Diagonal(vec(Bval))
g = imgmask .* img
return DualTVL1ROFOpProblem(g, B, λ)
end
function run_inpaint(img, imgmask, λ=0.5, β=0.001)
prob = InpaintProblem(img, imgmask, λ, β)
#alg = ChambolleAlgorithm(prob, τ=0.0001)
alg = ProjGradAlgorithm(prob, τ=0.0001)
ctx = init(alg)
for i in 1:10000
step!(ctx)
end
plot_inpaint(ctx)
return ctx
end
function plot_inpaint(ctx)
u = recover_u(fetch(ctx), ctx.algorithm.problem)
pltg = plot(Gray.(ctx.algorithm.problem.g), c=:grayC, legend=:none, framestyle=:none, aspect_ratio=1)
pltu = plot(Gray.(u), c=:grayC, legend=:none, framestyle=:none, aspect_ratio=1)
#plt = plot(pltg, pltu, layout=(1,2))
display(pltu)
end
img = Float64.(load("data/other-data-gray/RubberWhale/frame10.png"))
imgmask = rand(Bool, size(img))
#run_inpaint(f0, f1)
using Revise
Revise.track(@__FILE__)
using LinearAlgebra: Diagonal, dot
using Colors: HSV
using DualTVDD:
DualTVL1ROFOpProblem, ChambolleAlgorithm, ProjGradAlgorithm,
init, step!, fetch, recover_u, gradient
using FileIO
using ImageIO
using Plots
using StaticKernels: Kernel, ExtensionReplicate, extend
function OptFlowProblem(f0::AbstractArray{T,d}, f1::AbstractArray{T,d}, λ, β = 1e-5) where {T,d}
= Kernel{ntuple(_->0:1, d)}(gradient)
∇f0 = map(, extend(f0, ExtensionReplicate()))
Bval = inv.(∇f0 .* adjoint.(∇f0) .+ β .* Ref(I))
B = Diagonal(vec(Bval))
g = .-(f1 .- f0) .* ∇f0
return DualTVL1ROFOpProblem(g, B, λ)
end
function run_optflow(f0, f1, λ=0.01, β=0.001)
prob = OptFlowProblem(f0, f1, λ, β)
#alg = ChambolleAlgorithm(prob, τ=1/10000)
alg = ProjGradAlgorithm(prob, τ=1/10000)
#alg = ProjGradAlgorithm(prob)
#alg = DualTVDD.DualTVDDAlgorithm(prob, M=(2,2), overlap=(4,4))
ctx = init(alg)
for i in 1:10000
step!(ctx)
end
plot_optflow(ctx)
return ctx
end
color(v) = HSV(180 / pi * atan(v[1], v[2]), norm(v), 1)
function plot_optflow(ctx)
u = recover_u(fetch(ctx), ctx.algorithm.problem)
flowimg = color.(u)
plt = plot(flowimg)
display(plt)
end
f0 = Float64.(load("data/other-data-gray/RubberWhale/frame10.png"))
f1 = Float64.(load("data/other-data-gray/RubberWhale/frame11.png"))
#run_optflow(f0, f1)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment