Select Git revision
chambollefp_dd.jl
chambollefp_dd.jl 3.26 KiB
#include("fd.jl")
using LinearAlgebra
using SparseArrays
import FD
import Colors: Gray
import Plots: plot
# TODO: add clamp option
my_plot(img::FD.AbstractGridFunction{G,T,d,1}) where {G,T,d} =
plot(reshape(Gray.((img .- minimum(img)) ./ (maximum(img) - minimum(img))), size(img.grid)), show=true)
"partition of unity function"
function theta(grid, idx...)
function h(r, rp, i)
i <= first(r) + grid.overlap && return 1.0
i >= last(r) - grid.overlap && return 1.0
(min(i - first(rp), last(rp) - i, grid.overlap) + 1) / (grid.overlap + 1)
end
vidx = FD.indices(grid, idx...)
ctheta = map(x -> [h(x[1], x[2], i) for i in x[2]], zip(axes(grid), vidx))
dst = zeros(size(grid))
for (I,J) in zip(CartesianIndices(vidx), CartesianIndices(length.(vidx)))
dst[I] = prod(getindex.(ctheta, Tuple(J)))
end
FD.GridFunction(grid, dst)
end
function p2u(grid, p, A, f; α, β)
FD.GridFunction(grid, (A'*A + β*I) \ (α .* FD.divergence(p).data[:] .+ A'*f.data[:]))
end
# TODO: investigate matrix multiplication performance with GridFunctions
function fpiter(grid, p, w, A, α, β, τ)
x = FD.GridFunction(grid, (A'*A + β*I) \ (FD.divergence(p).data[:] .+ w ./ α))
q = FD.gradient(x)
#display(x.data)
#FD.my_plot(x)
#checknan(q)
p_new = FD.GridFunction(grid, FD.pwmul(FD.GridFunction(grid, 1 ./ (1 .+ τ .* FD.pw_norm(q))), FD.GridFunction(grid, p .+ τ .* q)))
reserr = maximum(sum((p_new .- p).^2, dims=ndims(p)))
(p_new, reserr)
end
function fpalg(grid, w, A; α, β, τ = 1/8, ϵ = 1e-4)
local uimg
p = FD.GridFunction(x -> 0, ndims(grid), grid)
reserr = 1
k = 0
while reserr > ϵ && k < 30e1
k += 1
(p, reserr) = fpiter(grid, p, w, A, α, β, τ)
#FD.my_plot(uimg)
#println("[$(lpad(k, 4))] res = $(round(reserr, digits=5))")
#println("min/max: $(minimum(uimg)) / $(maximum(uimg))")
#println("min/max: $(minimum(p)) / $(maximum(p))")
end
p
#uimg
end
grid = FD.MetaGrid((0:0.01:1, 0:0.01:1), (3, 3), 5)
A = I
#A = spdiagm(0 => ones(length(grid)), 1 => ones(length(grid)-1) )
#f = A * f
f = FD.GridFunction(x -> float(norm(x .- [0.5, 0.5]) < 0.4), 1, grid)
g = FD.GridFunction(x -> norm(x), 2, grid)
α=1
β=1
τ=1/100
ϵ=1e-7
ρ=0.25
p = FD.GridFunction(x -> 0, 2, grid)
function dditer(grid, p, A, f; α, β, τ, ϵ, ρ)
p .= (1-ρ) .* p
for part in FD.parts(grid)
gridv = view(grid, Tuple(part)...)
fv = view(f, Tuple(part)...)
#p.data .= 0
pv = view(p, Tuple(part)...)
θ = theta(grid, Tuple(part)...)
θv = view(θ, Tuple(part)...)
w = FD.GridFunction(grid, A' * f.data[:] - FD.divergence(FD.pwmul(FD.GridFunction(grid, 1 .- θ), p)))
wv = view(w, Tuple(part)...)
#wv = A' * fv
pv .+= ρ .* fpalg(gridv, wv, A, α=α*θv, β=β, τ=τ, ϵ=ϵ)
#u = p2u(grid, p, A, f, α=α*θ, β=β)
u = p2u(grid, p, A, f, α=α, β=β)
uv = view(u, Tuple(part)...)
my_plot(θv)
#sleep(1)
#my_plot(FD.pw_norm(pv))
@show maximum(pv), minimum(pv)
#sleep(1)
#my_plot(θv)
return
end
end
dditer(grid, p, A, f, α=α, β=β, τ=τ, ϵ=ϵ, ρ=ρ)
#for n = 1:10
# println("iteration $n")
# dditer(grid, p, A, f, α=α, β=β, τ=τ, ϵ=ϵ, ρ=ρ)
#end
#gridv = view(grid, 1, 1)
#fv = view(f, 1, 1)
#g = view(g, 1, 1)
#FD.my_plot(f)
#FD.plot(f)