Skip to content
Snippets Groups Projects
Commit 2abf3020 authored by Stephan Hilb's avatar Stephan Hilb
Browse files

cleanup

parent 671c2dff
No related branches found
No related tags found
No related merge requests found
...@@ -13,186 +13,4 @@ include("projgrad.jl") ...@@ -13,186 +13,4 @@ include("projgrad.jl")
include("surrogate.jl") include("surrogate.jl")
#include("tvnewton.jl") #include("tvnewton.jl")
#using Plots: heatmap
#
#plotimage(u) = heatmap(u, c=:grayC, legend=:none, framestyle=:none, aspect_ratio=1)
#
#function alg_energy(alg, niter)
# print("run ...")
# res = Float64[]
# (p, ctx) = iterate(alg)
# push!(res, energy(ctx))
# for i in 1:niter
# (p, ctx) = iterate(alg, ctx)
# push!(res, energy(ctx))
# end
# println(" finished")
# return res
#end
#
#function alg_error(alg, pmin, niter, ninner=1)
# print("run ...")
# res = Float64[]
# ctx = init(alg)
# push!(res, error(fetch(ctx), pmin, ctx.algorithm.problem))
# for i in 1:niter
# for j in 1:ninner
# step!(ctx)
# end
# push!(res, error(fetch(ctx), pmin, ctx.algorithm.problem))
# end
# println(" finished")
# return res
#end
#
#function calc_energy(prob, niter)
# alg_ref = DualTVDD.ChambolleAlgorithm(prob)
# ctx = init(alg_ref)
# for i in 1:niter
# ctx = step!(ctx)
# end
# return energy(ctx), fetch(ctx)
#end
#
#function rundd()
# λ = 2.5
# β = 0
# #f = zeros(100,100)
# #f[1] = 1
# #f = [0. 2; 1 0.]
#
#
# #A = 0. * rand(length(f), length(f))
# #A .+= diagm(ones(length(f)))
# #B = inv(A'*A + β*I)
#
# #g = similar(f)
# #vec(g) .= A' * vec(f)
#
# g = rand(50, 50)
#
# prob = DualTVDD.DualTVL1ROFOpProblem(g, I, λ)
#
# alg_ref = DualTVDD.ChambolleAlgorithm(prob)
# alg_dd = DualTVDD.DualTVDDAlgorithm(prob, M=(2,2), overlap=(4,4))
# alg_dd2 = DualTVDD.DualTVDDAlgorithm(prob, M=(2,2), overlap=(4,4), parallel=false)
#
# n = 1000
#
# ref_energy, pmin = calc_energy(prob, 100000)
#
# lognan(x) = x > 0 ? x : NaN
#
# #y = [
# # lognan.(alg_energy(alg_ref, n) .- ref_energy),
# # lognan.(alg_energy(alg_dd, n) .- ref_energy),
# # lognan.(alg_energy(alg_dd2, n) .- ref_energy),
# # ]
# y = [
# lognan.(alg_error(alg_ref, pmin, n, 10)),
# lognan.(alg_error(alg_dd, pmin, n)),
# lognan.(alg_error(alg_dd2, pmin, n)),
# ]
#
# plt = plot(y, xaxis=:log, yaxis=:log)
# display(plt)
#
# #display(energy(ctx))
# #display(ctx.p)
# #display(recover_u(p, prob))
#
# #println(energy(ctx))
# #println(energy(ctx2))
#end
#
#function run3()
# f = rand(20,20)
# A = 0.1*rand(length(f), length(f))
# A .+= diagm(ones(length(f)))
#
# g = reshape(A'*vec(f), size(f))
#
# β = 0
# B = inv(A'*A + β*I)
# println(norm(A))
#
# λ = 0.1
#
# # Chambolle
# md = DualTVDD.DualTVL1ROFOpProblem(g, B, λ)
# alg = DualTVDD.ChambolleAlgorithm()
# ctx = DualTVDD.init(md, alg)
#
# # Projected Gradient
# md = DualTVDD.DualTVL1ROFOpProblem(f, A, λ, 0., 0.)
# alg = DualTVDD.ProjGradAlgorithm(λ = 1/norm(A)^2)
# ctx2 = DualTVDD.init(md, alg)
#
# for i in 1:100000
# step!(ctx)
# step!(ctx2)
# end
#
# #display(ctx.p)
# #display(ctx2.p)
# display(recover_u!(ctx))
# display(recover_u!(ctx2))
#
# println(energy(ctx))
# println(energy(ctx2))
#
# ctx, ctx2
#end
#
#function energy(ctx::Union{DualTVDDState,ProjGradState,ChambolleState})
# return energy(ctx.p, ctx.algorithm.problem)
#end
#
#function error(p, pmin, prob::DualTVL1ROFOpProblem)
# return energy_norm(p .- pmin, prob) / energy(pmin, prob)
#end
#
#function energy(p, prob::DualTVL1ROFOpProblem)
# d = ndims(p)
#
# @inline kfΛ(w) = @inbounds divergence(w)
# kΛ = Kernel{ntuple(_->-1:1, d)}(kfΛ)
#
# v = similar(prob.g)
#
# # v = div(p) + g
# map!(kΛ, v, extend(p, StaticKernels.ExtensionNothing()))
# v .+= prob.g
# #display(v)
#
# # |v|_B^2 / 2
# u = prob.B * vec(v)
# return sum(u .* vec(v)) / 2
#end
#
#function energy_norm(p, prob::DualTVL1ROFOpProblem)
# d = ndims(p)
#
# @inline kfΛ(w) = @inbounds divergence(w)
# kΛ = Kernel{ntuple(_->-1:1, d)}(kfΛ)
#
# v = similar(prob.g)
#
# # v = div(p)
# map!(kΛ, v, extend(p, StaticKernels.ExtensionNothing()))
# #display(v)
#
# # |v|_B^2 / 2
# u = prob.B * vec(v)
# return sum(u .* vec(v)) / 2
#end
#
#
#function energy(md::DualTVL1ROFOpProblem, u::AbstractMatrix)
# @inline kf(w) = @inbounds 1/2 * (w[0,0] - md.g[w.position])^2 +
# md.λ * sqrt((w[1,0] - w[0,0])^2 + (w[0,1] - w[0,0])^2)
# k = Kernel{(0:1, 0:1)}(kf, StaticKernels.ExtensionReplicate())
# return sum(k, u)
#end
end # module end # module
...@@ -40,7 +40,6 @@ function init(alg::SurrogateAlgorithm) ...@@ -40,7 +40,6 @@ function init(alg::SurrogateAlgorithm)
r = reshape(rv, size(g)) r = reshape(rv, size(g))
s = reshape(sv, size(g)) s = reshape(sv, size(g))
#s = extend(reshape(sv, size(g)), StaticKernels.ExtensionReplicate())
@inline kf1(pw) = @inbounds -divergence(pw) @inline kf1(pw) = @inbounds -divergence(pw)
k1 = Kernel{ntuple(_->-1:1, d)}(kf1) k1 = Kernel{ntuple(_->-1:1, d)}(kf1)
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment