Skip to content
Snippets Groups Projects
Commit d3ff68d5 authored by David's avatar David
Browse files

setup layered soil simulations

parent 9ffa5580
No related branches found
No related tags found
No related merge requests found
...@@ -24,6 +24,8 @@ datestr = date.strftime("%Y-%m-%d") ...@@ -24,6 +24,8 @@ datestr = date.strftime("%Y-%m-%d")
sym.init_printing() sym.init_printing()
# solver_tol = 6E-7 # solver_tol = 6E-7
use_case = "TP-R-layered-soil-all-params-set-one-new-lambda" use_case = "TP-R-layered-soil-all-params-set-one-new-lambda"
thisfile = "TP-R-layered_soil-all-params-one.py"
max_iter_num = 500 max_iter_num = 500
FEM_Lagrange_degree = 1 FEM_Lagrange_degree = 1
mesh_study = False mesh_study = False
...@@ -603,11 +605,10 @@ for subdomain in isRichards.keys(): ...@@ -603,11 +605,10 @@ for subdomain in isRichards.keys():
# read this file and print it to std out. This way the simulation can produce a # read this file and print it to std out. This way the simulation can produce a
# log file with ./TP-R-layered_soil.py | tee simulation.log # log file with ./TP-R-layered_soil.py | tee simulation.log
f = open('TP-R-layered_soil.py', 'r') f = open(thisfile, 'r')
print(f.read()) print(f.read())
f.close() f.close()
for starttime in starttimes: for starttime in starttimes:
for mesh_resolution, solver_tol in resolutions.items(): for mesh_resolution, solver_tol in resolutions.items():
# initialise LDD simulation class # initialise LDD simulation class
......
...@@ -23,32 +23,34 @@ datestr = date.strftime("%Y-%m-%d") ...@@ -23,32 +23,34 @@ datestr = date.strftime("%Y-%m-%d")
# init sympy session # init sympy session
sym.init_printing() sym.init_printing()
# solver_tol = 6E-7 # solver_tol = 6E-7
use_case = "TP-R-layered-soil-realistic-new-lambda" use_case = "TP-R-layered-soil-realistic"
max_iter_num = 600 # name of this very file. Needed for log output.
thisfile = "TP-R-layered_soil.py"
max_iter_num = 300
FEM_Lagrange_degree = 1 FEM_Lagrange_degree = 1
mesh_study = False mesh_study = False
resolutions = { resolutions = {
# 1: 1e-7, # h=2 # 1: 2e-6, # h=2
# 2: 2e-5, # h=1.1180 # 2: 2e-6, # h=1.1180
# 4: 1e-6, # h=0.5590 # 4: 2e-6, # h=0.5590
# 8: 1e-6, # h=0.2814 # 8: 2e-6, # h=0.2814
# 16: 1e-6, # h=0.1412 # 16: 2e-6, # h=0.1412
32: 1e-6, 32: 2e-6,
# 64: 5e-7, # 64: 2e-6,
# 128: 5e-7 # 128: 2e-6
} }
# GRID ####################### # GRID #######################
# mesh_resolution = 20 # mesh_resolution = 20
timestep_size = 0.005 timestep_size = 0.001
number_of_timesteps = 200 number_of_timesteps = 1000
plot_timestep_every = 2 plot_timestep_every = 4
# decide how many timesteps you want analysed. Analysed means, that we write # decide how many timesteps you want analysed. Analysed means, that we write
# out subsequent errors of the L-iteration within the timestep. # out subsequent errors of the L-iteration within the timestep.
number_of_timesteps_to_analyse = 5 number_of_timesteps_to_analyse = 4
starttimes = [0.0] starttimes = [0.0]
Lw1 = 0.25 # /timestep_size Lw1 = 0.025 # /timestep_size
Lnw1 = Lw1 Lnw1 = Lw1
Lw2 = 0.025 # /timestep_size Lw2 = 0.025 # /timestep_size
Lnw2 = Lw2 Lnw2 = Lw2
...@@ -57,8 +59,8 @@ Lnw3 = Lw3 ...@@ -57,8 +59,8 @@ Lnw3 = Lw3
Lw4 = 0.025 # /timestep_size Lw4 = 0.025 # /timestep_size
Lnw4 = Lw4 Lnw4 = Lw4
lambda12_w = 4 lambda12_w = 40
lambda12_nw = 4 lambda12_nw = 40
lambda23_w = 40 lambda23_w = 40
lambda23_nw = 40 lambda23_nw = 40
lambda34_w = 40 lambda34_w = 40
...@@ -66,7 +68,7 @@ lambda34_nw = 40 ...@@ -66,7 +68,7 @@ lambda34_nw = 40
include_gravity = False include_gravity = False
debugflag = False debugflag = False
analyse_condition = False analyse_condition = True
if mesh_study: if mesh_study:
output_string = "./output/{}-{}_timesteps{}_P{}".format( output_string = "./output/{}-{}_timesteps{}_P{}".format(
...@@ -279,18 +281,28 @@ viscosity = { ...@@ -279,18 +281,28 @@ viscosity = {
} }
# Dict of the form: { subdom_num : density } # Dict of the form: { subdom_num : density }
# densities = {
# 1: {'wetting': 9.97, # 997
# 'nonwetting': 0.01225}, # 1.225}},
# 2: {'wetting': 9.97, # 997
# 'nonwetting': 0.01225}, # 1.225}},
# 3: {'wetting': 9.97, # 997
# 'nonwetting': 0.01225}, # 1.225}},
# 4: {'wetting': 9.97, # 997
# 'nonwetting': 0.01225}, # 1.225}}
# }
densities = { densities = {
1: {'wetting': 9.97, # 997 1: {'wetting': 997, # 997
'nonwetting': 0.01225}, # 1.225}}, 'nonwetting': 1.225}, # 1.225}},
2: {'wetting': 9.97, # 997 2: {'wetting': 997, # 997
'nonwetting': 0.01225}, # 1.225}}, 'nonwetting': 1.225}, # 1.225}},
3: {'wetting': 9.97, # 997 3: {'wetting': 997, # 997
'nonwetting': 0.01225}, # 1.225}}, 'nonwetting': 1.225}, # 1.225}},
4: {'wetting': 9.97, # 997 4: {'wetting': 997, # 997
'nonwetting': 0.01225}, # 1.225}} 'nonwetting': 1.225}, # 1.225}}
} }
gravity_acceleration = 9.81 gravity_acceleration = 9.81
# porosities taken from # porosities taken from
# https://www.geotechdata.info/parameter/soil-porosity.html # https://www.geotechdata.info/parameter/soil-porosity.html
...@@ -338,10 +350,10 @@ lambda_param = { ...@@ -338,10 +350,10 @@ lambda_param = {
# after Lewis, see pdf file # after Lewis, see pdf file
intrinsic_permeability = { intrinsic_permeability = {
1: 1, # sand 1: 0.1, # sand
2: 0.1, # sand, there is a range 2: 0.1, # sand, there is a range
3: 10e-2, # clay has a range 3: 0.001, #10e-2, # clay has a range
4: 10e-4 4: 0.001, #10e-3
} }
...@@ -698,9 +710,10 @@ for subdomain in isRichards.keys(): ...@@ -698,9 +710,10 @@ for subdomain in isRichards.keys():
{outer_boundary_ind: exact_solution[subdomain]} {outer_boundary_ind: exact_solution[subdomain]}
) )
# read this file and print it to std out. This way the simulation can produce a # read this file and print it to std out. This way the simulation can produce a
# log file with ./TP-R-layered_soil.py | tee simulation.log # log file with ./TP-R-layered_soil.py | tee simulation.log
f = open('TP-R-layered_soil.py', 'r') f = open(thisfile, 'r')
print(f.read()) print(f.read())
f.close() f.close()
......
#!/usr/bin/python3
"""Layered soil simulation.
This program sets up an LDD simulation
"""
import dolfin as df
# import mshr
# import numpy as np
import sympy as sym
# import typing as tp
import functools as ft
# import domainPatch as dp
import LDDsimulation as ldd
import helpers as hlp
import datetime
import os
import pandas as pd
date = datetime.datetime.now()
datestr = date.strftime("%Y-%m-%d")
# init sympy session
sym.init_printing()
# solver_tol = 6E-7
use_case = "TP-R-layered-soil-realistic"
# name of this very file. Needed for log output.
thisfile = "TP-R-layered_soil.py"
max_iter_num = 300
FEM_Lagrange_degree = 1
mesh_study = True
resolutions = {
1: 2e-6, # h=2
2: 2e-6, # h=1.1180
4: 2e-6, # h=0.5590
8: 2e-6, # h=0.2814
16: 2e-6, # h=0.1412
32: 2e-6,
64: 2e-6,
128: 2e-6
}
# GRID #######################
# mesh_resolution = 20
timestep_size = 0.001
number_of_timesteps = 800
plot_timestep_every = 4
# decide how many timesteps you want analysed. Analysed means, that we write
# out subsequent errors of the L-iteration within the timestep.
number_of_timesteps_to_analyse = 4
starttimes = [0.0]
Lw1 = 0.025 # /timestep_size
Lnw1 = Lw1
Lw2 = 0.025 # /timestep_size
Lnw2 = Lw2
Lw3 = 0.025 # /timestep_size
Lnw3 = Lw3
Lw4 = 0.025 # /timestep_size
Lnw4 = Lw4
lambda12_w = 40
lambda12_nw = 40
lambda23_w = 40
lambda23_nw = 40
lambda34_w = 40
lambda34_nw = 40
include_gravity = False
debugflag = False
analyse_condition = True
if mesh_study:
output_string = "./output/{}-{}_timesteps{}_P{}".format(
datestr, use_case, number_of_timesteps, FEM_Lagrange_degree
)
else:
for tol in resolutions.values():
solver_tol = tol
output_string = "./output/{}-{}_timesteps{}_P{}_solver_tol{}".format(
datestr, use_case, number_of_timesteps, FEM_Lagrange_degree, solver_tol
)
# toggle what should be written to files
if mesh_study:
write_to_file = {
'space_errornorms': True,
'meshes_and_markers': True,
'L_iterations_per_timestep': False,
'solutions': True,
'absolute_differences': True,
'condition_numbers': analyse_condition,
'subsequent_errors': True
}
else:
write_to_file = {
'space_errornorms': True,
'meshes_and_markers': True,
'L_iterations_per_timestep': False,
'solutions': True,
'absolute_differences': True,
'condition_numbers': analyse_condition,
'subsequent_errors': True
}
# global domain
subdomain0_vertices = [
df.Point(-1.0, -1.0),
df.Point(1.0, -1.0),
df.Point(1.0, 1.0),
df.Point(-1.0, 1.0)
]
interface12_vertices = [df.Point(-1.0, 0.8),
df.Point(0.3, 0.8),
df.Point(0.5, 0.9),
df.Point(0.8, 0.7),
df.Point(1.0, 0.65)]
# subdomain1.
subdomain1_vertices = [
interface12_vertices[0],
interface12_vertices[1],
interface12_vertices[2],
interface12_vertices[3],
interface12_vertices[4], # southern boundary, 12 interface
subdomain0_vertices[2], # eastern boundary, outer boundary
subdomain0_vertices[3] # northern boundary, outer on_boundary
]
# vertex coordinates of the outer boundaries. If it can not be specified as a
# polygon, use an entry per boundary polygon. This information is used for
# defining the Dirichlet boundary conditions. If a domain is completely
# internal, the dictionary entry should be 0: None
subdomain1_outer_boundary_verts = {
0: [interface12_vertices[4],
subdomain0_vertices[2], # eastern boundary, outer boundary
subdomain0_vertices[3],
interface12_vertices[0]]
}
# interface23
interface23_vertices = [
df.Point(-1.0, 0.0),
df.Point(-0.35, 0.0),
# df.Point(6.5, 4.5),
df.Point(0.0, 0.0),
df.Point(0.5, 0.0),
# df.Point(11.5, 3.5),
# df.Point(13.0, 3)
df.Point(0.85, 0.0),
df.Point(1.0, 0.0)
]
# subdomain1
subdomain2_vertices = [
interface23_vertices[0],
interface23_vertices[1],
interface23_vertices[2],
interface23_vertices[3],
interface23_vertices[4],
interface23_vertices[5], # southern boundary, 23 interface
subdomain1_vertices[4], # eastern boundary, outer boundary
subdomain1_vertices[3],
subdomain1_vertices[2],
subdomain1_vertices[1],
subdomain1_vertices[0] # northern boundary, 12 interface
]
subdomain2_outer_boundary_verts = {
0: [interface23_vertices[5],
subdomain1_vertices[4]],
1: [subdomain1_vertices[0],
interface23_vertices[0]]
}
# interface34
interface34_vertices = [df.Point(-1.0, -0.6),
df.Point(-0.6, -0.45),
df.Point(0.3, -0.25),
df.Point(0.65, -0.6),
df.Point(1.0, -0.7)]
# subdomain3
subdomain3_vertices = [
interface34_vertices[0],
interface34_vertices[1],
interface34_vertices[2],
interface34_vertices[3],
interface34_vertices[4], # southern boundary, 34 interface
subdomain2_vertices[5], # eastern boundary, outer boundary
subdomain2_vertices[4],
subdomain2_vertices[3],
subdomain2_vertices[2],
subdomain2_vertices[1],
subdomain2_vertices[0] # northern boundary, 23 interface
]
subdomain3_outer_boundary_verts = {
0: [interface34_vertices[4],
subdomain2_vertices[5]],
1: [subdomain2_vertices[0],
interface34_vertices[0]]
}
# subdomain4
subdomain4_vertices = [
subdomain0_vertices[0],
subdomain0_vertices[1], # southern boundary, outer boundary
subdomain3_vertices[4], # eastern boundary, outer boundary
subdomain3_vertices[3],
subdomain3_vertices[2],
subdomain3_vertices[1],
subdomain3_vertices[0]
] # northern boundary, 34 interface
subdomain4_outer_boundary_verts = {
0: [subdomain4_vertices[6],
subdomain4_vertices[0],
subdomain4_vertices[1],
subdomain4_vertices[2]]
}
subdomain_def_points = [
subdomain0_vertices,
subdomain1_vertices,
subdomain2_vertices,
subdomain3_vertices,
subdomain4_vertices
]
# interface_vertices introduces a global numbering of interfaces.
interface_def_points = [
interface12_vertices, interface23_vertices, interface34_vertices
]
adjacent_subdomains = [[1, 2], [2, 3], [3, 4]]
# if a subdomain has no outer boundary write None instead, i.e.
# i: None
# if i is the index of the inner subdomain.
outer_boundary_def_points = {
# subdomain number
1: subdomain1_outer_boundary_verts,
2: subdomain2_outer_boundary_verts,
3: subdomain3_outer_boundary_verts,
4: subdomain4_outer_boundary_verts
}
isRichards = {
1: True,
2: True,
3: False,
4: False
}
# isRichards = {
# 1: True,
# 2: True,
# 3: True,
# 4: True
# }
# Dict of the form: { subdom_num : viscosity }
viscosity = {
1: {'wetting': 1,
'nonwetting': 1/50},
2: {'wetting': 1,
'nonwetting': 1/50},
3: {'wetting': 1,
'nonwetting': 1/50},
4: {'wetting': 1,
'nonwetting': 1/50},
}
# Dict of the form: { subdom_num : density }
# densities = {
# 1: {'wetting': 9.97, # 997
# 'nonwetting': 0.01225}, # 1.225}},
# 2: {'wetting': 9.97, # 997
# 'nonwetting': 0.01225}, # 1.225}},
# 3: {'wetting': 9.97, # 997
# 'nonwetting': 0.01225}, # 1.225}},
# 4: {'wetting': 9.97, # 997
# 'nonwetting': 0.01225}, # 1.225}}
# }
densities = {
1: {'wetting': 997, # 997
'nonwetting': 1.225}, # 1.225}},
2: {'wetting': 997, # 997
'nonwetting': 1.225}, # 1.225}},
3: {'wetting': 997, # 997
'nonwetting': 1.225}, # 1.225}},
4: {'wetting': 997, # 997
'nonwetting': 1.225}, # 1.225}}
}
gravity_acceleration = 9.81
# porosities taken from
# https://www.geotechdata.info/parameter/soil-porosity.html
# Dict of the form: { subdom_num : porosity }
porosity = {
1: 0.37, # 0.2, # Clayey gravels, clayey sandy gravels
2: 0.22, # 0.22, # Silty gravels, silty sandy gravels
3: 0.2, # 0.37, # Clayey sands
4: 0.22, # 0.2 # Silty or sandy clay
}
# subdom_num : subdomain L for L-scheme
L = {
1: {'wetting': Lw1,
'nonwetting': Lnw1},
2: {'wetting': Lw2,
'nonwetting': Lnw2},
3: {'wetting': Lw3,
'nonwetting': Lnw3},
4: {'wetting': Lw4,
'nonwetting': Lnw4}
}
# interface_num : lambda parameter for the L-scheme on that interface.
# Note that interfaces are numbered starting from 0, because
# adjacent_subdomains is a list and not a dict. Historic fuckup, I know
lambda_param = {
0: {'wetting': lambda12_w,
'nonwetting': lambda12_nw},
1: {'wetting': lambda23_w,
'nonwetting': lambda23_nw},
2: {'wetting': lambda34_w,
'nonwetting': lambda34_nw},
}
# lambda_param = {
# 1: {'wetting': lambda_w,
# 'nonwetting': lambda_nw},
# 2: {'wetting': lambda_w,
# 'nonwetting': lambda_nw},
# 3: {'wetting': lambda_w,
# 'nonwetting': lambda_nw},
# 4: {'wetting': lambda_w,
# 'nonwetting': lambda_nw},
# }
# after Lewis, see pdf file
intrinsic_permeability = {
1: 0.1, # sand
2: 0.1, # sand, there is a range
3: 0.001, #10e-2, # clay has a range
4: 0.001, #10e-3
}
# relative permeabilty functions on subdomain 1
def rel_perm1w(s):
# relative permeabilty wetting on subdomain1
return intrinsic_permeability[1]*s**2
def rel_perm1nw(s):
# relative permeabilty nonwetting on subdomain1
return intrinsic_permeability[1]*(1-s)**2
# relative permeabilty functions on subdomain 2
def rel_perm2w(s):
# relative permeabilty wetting on subdomain2
return intrinsic_permeability[2]*s**2
def rel_perm2nw(s):
# relative permeabilty nonwetting on subdomain2
return intrinsic_permeability[2]*(1-s)**2
# relative permeabilty functions on subdomain 3
def rel_perm3w(s):
# relative permeabilty wetting on subdomain3
return intrinsic_permeability[3]*s**3
def rel_perm3nw(s):
# relative permeabilty nonwetting on subdomain3
return intrinsic_permeability[3]*(1-s)**3
# relative permeabilty functions on subdomain 4
def rel_perm4w(s):
# relative permeabilty wetting on subdomain4
return intrinsic_permeability[4]*s**3
def rel_perm4nw(s):
# relative permeabilty nonwetting on subdomain4
return intrinsic_permeability[4]*(1-s)**3
_rel_perm1w = ft.partial(rel_perm1w)
_rel_perm1nw = ft.partial(rel_perm1nw)
_rel_perm2w = ft.partial(rel_perm2w)
_rel_perm2nw = ft.partial(rel_perm2nw)
_rel_perm3w = ft.partial(rel_perm3w)
_rel_perm3nw = ft.partial(rel_perm3nw)
_rel_perm4w = ft.partial(rel_perm4w)
_rel_perm4nw = ft.partial(rel_perm4nw)
subdomain1_rel_perm = {
'wetting': _rel_perm1w,
'nonwetting': _rel_perm1nw
}
subdomain2_rel_perm = {
'wetting': _rel_perm2w,
'nonwetting': _rel_perm2nw
}
subdomain3_rel_perm = {
'wetting': _rel_perm3w,
'nonwetting': _rel_perm3nw
}
subdomain4_rel_perm = {
'wetting': _rel_perm4w,
'nonwetting': _rel_perm4nw
}
# dictionary of relative permeabilties on all domains.
# relative_permeability = {
# 1: subdomain1_rel_perm,
# 2: subdomain1_rel_perm,
# 3: subdomain2_rel_perm,
# 4: subdomain2_rel_perm
# }
relative_permeability = {
1: subdomain1_rel_perm,
2: subdomain2_rel_perm,
3: subdomain3_rel_perm,
4: subdomain4_rel_perm
}
# definition of the derivatives of the relative permeabilities
# relative permeabilty functions on subdomain 1
def rel_perm1w_prime(s):
# relative permeabilty on subdomain1
return intrinsic_permeability[1]*2*s
def rel_perm1nw_prime(s):
# relative permeabilty on subdomain1
return -1*intrinsic_permeability[1]*2*(1-s)
def rel_perm2w_prime(s):
# relative permeabilty on subdomain2
return intrinsic_permeability[2]*2*s
def rel_perm2nw_prime(s):
# relative permeabilty on subdomain2
return -1*intrinsic_permeability[2]*2*(1-s)
# definition of the derivatives of the relative permeabilities
# relative permeabilty functions on subdomain 3
def rel_perm3w_prime(s):
# relative permeabilty on subdomain3
return intrinsic_permeability[3]*3*s**2
def rel_perm3nw_prime(s):
# relative permeabilty on subdomain3
return -1*intrinsic_permeability[3]*3*(1-s)**2
# definition of the derivatives of the relative permeabilities
# relative permeabilty functions on subdomain 4
def rel_perm4w_prime(s):
# relative permeabilty on subdomain4
return intrinsic_permeability[4]*3*s**2
def rel_perm4nw_prime(s):
# relative permeabilty on subdomain4
return -1*intrinsic_permeability[4]*3*(1-s)**2
_rel_perm1w_prime = ft.partial(rel_perm1w_prime)
_rel_perm1nw_prime = ft.partial(rel_perm1nw_prime)
_rel_perm2w_prime = ft.partial(rel_perm2w_prime)
_rel_perm2nw_prime = ft.partial(rel_perm2nw_prime)
_rel_perm3w_prime = ft.partial(rel_perm3w_prime)
_rel_perm3nw_prime = ft.partial(rel_perm3nw_prime)
_rel_perm4w_prime = ft.partial(rel_perm4w_prime)
_rel_perm4nw_prime = ft.partial(rel_perm4nw_prime)
subdomain1_rel_perm_prime = {
'wetting': _rel_perm1w_prime,
'nonwetting': _rel_perm1nw_prime
}
subdomain2_rel_perm_prime = {
'wetting': _rel_perm2w_prime,
'nonwetting': _rel_perm2nw_prime
}
subdomain3_rel_perm_prime = {
'wetting': _rel_perm3w_prime,
'nonwetting': _rel_perm3nw_prime
}
subdomain4_rel_perm_prime = {
'wetting': _rel_perm4w_prime,
'nonwetting': _rel_perm4nw_prime
}
# dictionary of relative permeabilties on all domains.
# ka_prime = {
# 1: subdomain1_rel_perm_prime,
# 2: subdomain1_rel_perm_prime,
# 3: subdomain2_rel_perm_prime,
# 4: subdomain2_rel_perm_prime
# }
ka_prime = {
1: subdomain1_rel_perm_prime,
2: subdomain2_rel_perm_prime,
3: subdomain3_rel_perm_prime,
4: subdomain4_rel_perm_prime
}
# S-pc-relation ship. We use the van Genuchten approach, i.e.
# pc = 1/alpha*(S^{-1/m} -1)^1/n, where we set alpha = 0, assume m = 1-1/n
# (see Helmig) and assume that residual saturation is Sw
# this function needs to be monotonically decreasing in the capillary pressure
# pc. Since in the richards case pc=-pw, this becomes as a function of pw a
# mono tonically INCREASING function like in our Richards-Richards paper.
# However since we unify the treatment in the code for Richards and two-phase,
# we need the same requierment for both cases, two-phase and Richards.
def saturation(pc, n_index, alpha):
# inverse capillary pressure-saturation-relationship
expr = df.conditional(
pc > 0, 1/((1 + (alpha*pc)**n_index)**((n_index - 1)/n_index)), 1)
return expr
# S-pc-relation ship. We use the van Genuchten approach, i.e.
# pc = 1/alpha*(S^{-1/m} -1)^1/n, where we set alpha = 0, assume m = 1-1/n
# (see Helmig) and assume that residual saturation is Sw
def saturation_sym(pc, n_index, alpha):
# inverse capillary pressure-saturation-relationship
# df.conditional(pc > 0,
return 1/((1 + (alpha*pc)**n_index)**((n_index - 1)/n_index))
# derivative of S-pc relationship with respect to pc. This is needed for the
# construction of a analytic solution.
def saturation_sym_prime(pc, n_index, alpha):
# inverse capillary pressure-saturation-relationship
expr = -(alpha*(n_index - 1)*(alpha*pc)**(n_index - 1))\
/ ((1 + (alpha*pc)**n_index)**((2*n_index - 1)/n_index))
return expr
# note that the conditional definition of S-pc in the nonsymbolic part will be
# incorporated in the construction of the exact solution below.
S_pc_sym = {
1: ft.partial(saturation_sym, n_index=3, alpha=0.001),
2: ft.partial(saturation_sym, n_index=3, alpha=0.001),
3: ft.partial(saturation_sym, n_index=6, alpha=0.001),
4: ft.partial(saturation_sym, n_index=6, alpha=0.001)
}
S_pc_sym_prime = {
1: ft.partial(saturation_sym_prime, n_index=3, alpha=0.001),
2: ft.partial(saturation_sym_prime, n_index=3, alpha=0.001),
3: ft.partial(saturation_sym_prime, n_index=6, alpha=0.001),
4: ft.partial(saturation_sym_prime, n_index=6, alpha=0.001)
}
sat_pressure_relationship = {
1: ft.partial(saturation, n_index=3, alpha=0.001),
2: ft.partial(saturation, n_index=3, alpha=0.001),
3: ft.partial(saturation, n_index=6, alpha=0.001),
4: ft.partial(saturation, n_index=6, alpha=0.001)
}
#############################################
# Manufacture source expressions with sympy #
#############################################
x, y = sym.symbols('x[0], x[1]') # needed by UFL
t = sym.symbols('t', positive=True)
p_e_sym_2patch = {
1: {'wetting': -6 - (1+t*t)*(1 + x*x + y*y),
'nonwetting': 0.0*t}, # -1-t*(1.1 + y + x**2)**2},
2: {'wetting': -6.0 - (1.0 + t*t)*(1.0 + x*x),
'nonwetting': (-1-t*(1.1+y + x**2))*y**2},
# 'nonwetting': (-1-t*(1.1 + x**2)**2 - sym.sqrt(5+t**2))*y**2},
}
p_e_sym = {
1: {'wetting': p_e_sym_2patch[1]['wetting'],
'nonwetting': p_e_sym_2patch[1]['nonwetting']},
2: {'wetting': p_e_sym_2patch[1]['wetting'],
'nonwetting': p_e_sym_2patch[1]['nonwetting']},
3: {'wetting': p_e_sym_2patch[2]['wetting'],
'nonwetting': p_e_sym_2patch[2]['nonwetting']},
4: {'wetting': p_e_sym_2patch[2]['wetting'],
'nonwetting': p_e_sym_2patch[2]['nonwetting']}
}
# p_e_sym = {
# 1: {'wetting': 1.0 - (1.0 + t*t)*(10.0 + x*x + (y-5.0)*(y-5.0)),
# 'nonwetting':
# -2 - t*(1 + (y - 5.0) + x**2)**2
# - sym.sqrt(2 + t**2)*(1 + (y - 5.0))
# },
# 2: {'wetting': 1.0 - (1.0 + t*t)*(10.0 + x*x + (y-5.0)*(y-5.0)),
# 'nonwetting':
# - 2 - t*(1 + (y - 5.0) + x**2)**2
# - sym.sqrt(2 + t**2)*(1 + (y - 5.0))
# },
# 3: {'wetting':
# 1.0 - (1.0 + t*t)*(10.0 + x*x + (y - 5.0)*(y - 5.0))
# - (y - 5.0)*(y - 5.0)*3*sym.sin(-2*t + 2*x)*sym.sin(1/2*y - 1.2*t),
# 'nonwetting': - 2 - t*(1 + x**2)**2 - sym.sqrt(2 + t**2)
# },
# 4: {'wetting':
# 1.0 - (1.0 + t*t)*(10.0 + x*x + (y - 5.0)*(y - 5.0))
# - (y - 5.0)*(y - 5.0)*3*sym.sin(-2*t + 2*x)*sym.sin(1/2*y - 1.2*t),
# 'nonwetting': - 2 - t*(1 + x**2)**2 - sym.sqrt(2 + t**2)
# }
# }
pc_e_sym = dict()
for subdomain, isR in isRichards.items():
if isR:
pc_e_sym.update({subdomain: -p_e_sym[subdomain]['wetting']})
else:
pc_e_sym.update(
{subdomain: p_e_sym[subdomain]['nonwetting']
- p_e_sym[subdomain]['wetting']}
)
symbols = {"x": x,
"y": y,
"t": t}
# turn above symbolic code into exact solution for dolphin and
# construct the rhs that matches the above exact solution.
exact_solution_example = hlp.generate_exact_solution_expressions(
symbols=symbols,
isRichards=isRichards,
symbolic_pressure=p_e_sym,
symbolic_capillary_pressure=pc_e_sym,
saturation_pressure_relationship=S_pc_sym,
saturation_pressure_relationship_prime=S_pc_sym_prime,
viscosity=viscosity,
porosity=porosity,
relative_permeability=relative_permeability,
relative_permeability_prime=ka_prime,
densities=densities,
gravity_acceleration=gravity_acceleration,
include_gravity=include_gravity,
)
source_expression = exact_solution_example['source']
exact_solution = exact_solution_example['exact_solution']
initial_condition = exact_solution_example['initial_condition']
# Dictionary of dirichlet boundary conditions.
dirichletBC = dict()
# similarly to the outer boundary dictionary, if a patch has no outer boundary
# None should be written instead of an expression.
# This is a bit of a brainfuck:
# dirichletBC[ind] gives a dictionary of the outer boundaries of subdomain ind.
# Since a domain patch can have several disjoint outer boundary parts, the
# expressions need to get an enumaration index which starts at 0.
# So dirichletBC[ind][j] is the dictionary of outer dirichlet conditions of
# subdomain ind and boundary part j.
# Finally, dirichletBC[ind][j]['wetting'] and dirichletBC[ind][j]['nonwetting']
# return the actual expression needed for the dirichlet condition for both
# phases if present.
# subdomain index: {outer boudary part index: {phase: expression}}
for subdomain in isRichards.keys():
# if subdomain has no outer boundary, outer_boundary_def_points[subdomain]
# is None
if outer_boundary_def_points[subdomain] is None:
dirichletBC.update({subdomain: None})
else:
dirichletBC.update({subdomain: dict()})
# set the dirichlet conditions to be the same code as exact solution on
# the subdomain.
for outer_boundary_ind in outer_boundary_def_points[subdomain].keys():
dirichletBC[subdomain].update(
{outer_boundary_ind: exact_solution[subdomain]}
)
# read this file and print it to std out. This way the simulation can produce a
# log file with ./TP-R-layered_soil.py | tee simulation.log
f = open(thisfile, 'r')
print(f.read())
f.close()
for starttime in starttimes:
for mesh_resolution, solver_tol in resolutions.items():
# initialise LDD simulation class
simulation = ldd.LDDsimulation(
tol=1E-14,
LDDsolver_tol=solver_tol,
debug=debugflag,
max_iter_num=max_iter_num,
FEM_Lagrange_degree=FEM_Lagrange_degree,
mesh_study=mesh_study
)
simulation.set_parameters(
use_case=use_case,
output_dir=output_string,
subdomain_def_points=subdomain_def_points,
isRichards=isRichards,
interface_def_points=interface_def_points,
outer_boundary_def_points=outer_boundary_def_points,
adjacent_subdomains=adjacent_subdomains,
mesh_resolution=mesh_resolution,
viscosity=viscosity,
porosity=porosity,
L=L,
lambda_param=lambda_param,
relative_permeability=relative_permeability,
saturation=sat_pressure_relationship,
starttime=starttime,
number_of_timesteps=number_of_timesteps,
number_of_timesteps_to_analyse=number_of_timesteps_to_analyse,
plot_timestep_every=plot_timestep_every,
timestep_size=timestep_size,
sources=source_expression,
initial_conditions=initial_condition,
dirichletBC_expression_strings=dirichletBC,
exact_solution=exact_solution,
densities=densities,
include_gravity=include_gravity,
write2file=write_to_file,
)
simulation.initialise()
output_dir = simulation.output_dir
# simulation.write_exact_solution_to_xdmf()
output = simulation.run(analyse_condition=analyse_condition)
for subdomain_index, subdomain_output in output.items():
mesh_h = subdomain_output['mesh_size']
for phase, error_dict in subdomain_output['errornorm'].items():
filename = output_dir \
+ "subdomain{}".format(subdomain_index)\
+ "-space-time-errornorm-{}-phase.csv".format(phase)
# for errortype, errornorm in error_dict.items():
# eocfile = open("eoc_filename", "a")
# eocfile.write( str(mesh_h) + " " + str(errornorm) + "\n" )
# eocfile.close()
# if subdomain.isRichards:mesh_h
data_dict = {
'mesh_parameter': mesh_resolution,
'mesh_h': mesh_h,
}
for norm_type, errornorm in error_dict.items():
data_dict.update(
{norm_type: errornorm}
)
errors = pd.DataFrame(data_dict, index=[mesh_resolution])
# check if file exists
if os.path.isfile(filename) is True:
with open(filename, 'a') as f:
errors.to_csv(
f,
header=False,
sep='\t',
encoding='utf-8',
index=False
)
else:
errors.to_csv(
filename,
sep='\t',
encoding='utf-8',
index=False
)
#!/bin/bash
[ $# -eq 0 ] && { echo "Usage: $0 simulation_file [logfile_name]"; exit 1; }
SIMULATION_FILE=$1
SIMULATION=${SIMULATION_FILE%.py}
LOGFILE_DEFAULT="$SIMULATION.log"
DATE=$(date -I)
LOGFILE=${2:-$DATE-$LOGFILE_DEFAULT}
GREETING="Simulation $SIMULATION is run on $DATE by $USER"
echo $GREETING
echo "running $SIMULATION_FILE | tee $LOGFILE"
./$SIMULATION_FILE | tee $LOGFILE
...@@ -20,6 +20,7 @@ datestr = date.strftime("%Y-%m-%d") ...@@ -20,6 +20,7 @@ datestr = date.strftime("%Y-%m-%d")
sym.init_printing() sym.init_printing()
use_case = "TP-R-2-patch-realistic-new-implementation" use_case = "TP-R-2-patch-realistic-new-implementation"
thisfile = "TP-R-2-patch-test.py"
# solver_tol = 6E-7 # solver_tol = 6E-7
max_iter_num = 1000 max_iter_num = 1000
FEM_Lagrange_degree = 1 FEM_Lagrange_degree = 1
...@@ -56,7 +57,17 @@ include_gravity = True ...@@ -56,7 +57,17 @@ include_gravity = True
debugflag = True debugflag = True
analyse_condition = True analyse_condition = True
output_string = "./output/{}-{}_timesteps{}_P{}".format(datestr, use_case, number_of_timesteps, FEM_Lagrange_degree) if mesh_study:
output_string = "./output/{}-{}_timesteps{}_P{}".format(
datestr, use_case, number_of_timesteps, FEM_Lagrange_degree
)
else:
for tol in resolutions.values():
solver_tol = tol
output_string = "./output/{}-{}_timesteps{}_P{}_solver_tol{}".format(
datestr, use_case, number_of_timesteps, FEM_Lagrange_degree, solver_tol
)
# toggle what should be written to files # toggle what should be written to files
if mesh_study: if mesh_study:
...@@ -433,6 +444,10 @@ for subdomain in isRichards.keys(): ...@@ -433,6 +444,10 @@ for subdomain in isRichards.keys():
{outer_boundary_ind: exact_solution[subdomain]} {outer_boundary_ind: exact_solution[subdomain]}
) )
f = open(thisfile, 'r')
print(f.read())
f.close()
for starttime in starttimes: for starttime in starttimes:
for mesh_resolution, solver_tol in resolutions.items(): for mesh_resolution, solver_tol in resolutions.items():
...@@ -446,7 +461,8 @@ for starttime in starttimes: ...@@ -446,7 +461,8 @@ for starttime in starttimes:
mesh_study=mesh_study mesh_study=mesh_study
) )
simulation.set_parameters(use_case=use_case, simulation.set_parameters(
use_case=use_case,
output_dir=output_string, output_dir=output_string,
subdomain_def_points=subdomain_def_points, subdomain_def_points=subdomain_def_points,
isRichards=isRichards, isRichards=isRichards,
...@@ -480,9 +496,11 @@ for starttime in starttimes: ...@@ -480,9 +496,11 @@ for starttime in starttimes:
output = simulation.run(analyse_condition=analyse_condition) output = simulation.run(analyse_condition=analyse_condition)
for subdomain_index, subdomain_output in output.items(): for subdomain_index, subdomain_output in output.items():
mesh_h = subdomain_output['mesh_size'] mesh_h = subdomain_output['mesh_size']
for phase, different_errornorms in subdomain_output['errornorm'].items(): for phase, error_dict in subdomain_output['errornorm'].items():
filename = output_dir + "subdomain{}-space-time-errornorm-{}-phase.csv".format(subdomain_index, phase) filename = output_dir \
# for errortype, errornorm in different_errornorms.items(): + "subdomain{}".format(subdomain_index)\
+ "-space-time-errornorm-{}-phase.csv".format(phase)
# for errortype, errornorm in error_dict.items():
# eocfile = open("eoc_filename", "a") # eocfile = open("eoc_filename", "a")
# eocfile.write( str(mesh_h) + " " + str(errornorm) + "\n" ) # eocfile.write( str(mesh_h) + " " + str(errornorm) + "\n" )
...@@ -492,14 +510,25 @@ for starttime in starttimes: ...@@ -492,14 +510,25 @@ for starttime in starttimes:
'mesh_parameter': mesh_resolution, 'mesh_parameter': mesh_resolution,
'mesh_h': mesh_h, 'mesh_h': mesh_h,
} }
for error_type, errornorms in different_errornorms.items(): for norm_type, errornorm in error_dict.items():
data_dict.update( data_dict.update(
{error_type: errornorms} {norm_type: errornorm}
) )
errors = pd.DataFrame(data_dict, index=[mesh_resolution]) errors = pd.DataFrame(data_dict, index=[mesh_resolution])
# check if file exists # check if file exists
if os.path.isfile(filename) == True: if os.path.isfile(filename) is True:
with open(filename, 'a') as f: with open(filename, 'a') as f:
errors.to_csv(f, header=False, sep='\t', encoding='utf-8', index=False) errors.to_csv(
f,
header=False,
sep='\t',
encoding='utf-8',
index=False
)
else: else:
errors.to_csv(filename, sep='\t', encoding='utf-8', index=False) errors.to_csv(
filename,
sep='\t',
encoding='utf-8',
index=False
)
...@@ -20,6 +20,7 @@ datestr = date.strftime("%Y-%m-%d") ...@@ -20,6 +20,7 @@ datestr = date.strftime("%Y-%m-%d")
sym.init_printing() sym.init_printing()
use_case = "TPR-2-desities-scaled-down" use_case = "TPR-2-desities-scaled-down"
thisfile = "TP-R-2-realistic-parameters-densities-scaled.py"
# solver_tol = 6E-7 # solver_tol = 6E-7
max_iter_num = 500 max_iter_num = 500
FEM_Lagrange_degree = 1 FEM_Lagrange_degree = 1
...@@ -57,11 +58,15 @@ debugflag = False ...@@ -57,11 +58,15 @@ debugflag = False
analyse_condition = True analyse_condition = True
if mesh_study: if mesh_study:
output_string = "./output/{}-{}_timesteps{}_P{}".format(datestr, use_case, number_of_timesteps, FEM_Lagrange_degree) output_string = "./output/{}-{}_timesteps{}_P{}".format(
datestr, use_case, number_of_timesteps, FEM_Lagrange_degree
)
else: else:
for tol in resolutions.values(): for tol in resolutions.values():
solver_tol = tol solver_tol = tol
output_string = "./output/{}-{}_timesteps{}_P{}_solver_tol{}".format(datestr, use_case, number_of_timesteps, FEM_Lagrange_degree, solver_tol) output_string = "./output/{}-{}_timesteps{}_P{}_solver_tol{}".format(
datestr, use_case, number_of_timesteps, FEM_Lagrange_degree, solver_tol
)
# toggle what should be written to files # toggle what should be written to files
if mesh_study: if mesh_study:
...@@ -188,19 +193,12 @@ L = {# ...@@ -188,19 +193,12 @@ L = {#
} }
lambda_param = {# lambda_param = {
# subdom_num : lambda parameter for the L-scheme # subdom_num : lambda parameter for the L-scheme
0 : {'wetting' :lambda_w, 0 : {'wetting' :lambda_w,
'nonwetting': lambda_nw},# 'nonwetting': lambda_nw},
} }
# intrinsic_permeability = {
# 1: {"wetting": 1,
# "nonwetting": 1},
# 2: {"wetting": 1,
# "nonwetting": 1},
# }
intrinsic_permeability = { intrinsic_permeability = {
1: 1, 1: 1,
2: 1, 2: 1,
...@@ -459,7 +457,7 @@ for subdomain in isRichards.keys(): ...@@ -459,7 +457,7 @@ for subdomain in isRichards.keys():
# #
# sa # sa
f = open('TP-R-2-patch-mesh-study.py', 'r') f = open(thisfile, 'r')
print(f.read()) print(f.read())
f.close() f.close()
...@@ -476,7 +474,8 @@ for starttime in starttimes: ...@@ -476,7 +474,8 @@ for starttime in starttimes:
mesh_study=mesh_study mesh_study=mesh_study
) )
simulation.set_parameters(use_case=use_case, simulation.set_parameters(
use_case=use_case,
output_dir=output_string, output_dir=output_string,
subdomain_def_points=subdomain_def_points, subdomain_def_points=subdomain_def_points,
isRichards=isRichards, isRichards=isRichards,
...@@ -510,9 +509,11 @@ for starttime in starttimes: ...@@ -510,9 +509,11 @@ for starttime in starttimes:
output = simulation.run(analyse_condition=analyse_condition) output = simulation.run(analyse_condition=analyse_condition)
for subdomain_index, subdomain_output in output.items(): for subdomain_index, subdomain_output in output.items():
mesh_h = subdomain_output['mesh_size'] mesh_h = subdomain_output['mesh_size']
for phase, different_errornorms in subdomain_output['errornorm'].items(): for phase, error_dict in subdomain_output['errornorm'].items():
filename = output_dir + "subdomain{}-space-time-errornorm-{}-phase.csv".format(subdomain_index, phase) filename = output_dir \
# for errortype, errornorm in different_errornorms.items(): + "subdomain{}".format(subdomain_index)\
+ "-space-time-errornorm-{}-phase.csv".format(phase)
# for errortype, errornorm in error_dict.items():
# eocfile = open("eoc_filename", "a") # eocfile = open("eoc_filename", "a")
# eocfile.write( str(mesh_h) + " " + str(errornorm) + "\n" ) # eocfile.write( str(mesh_h) + " " + str(errornorm) + "\n" )
...@@ -522,14 +523,25 @@ for starttime in starttimes: ...@@ -522,14 +523,25 @@ for starttime in starttimes:
'mesh_parameter': mesh_resolution, 'mesh_parameter': mesh_resolution,
'mesh_h': mesh_h, 'mesh_h': mesh_h,
} }
for error_type, errornorms in different_errornorms.items(): for norm_type, errornorm in error_dict.items():
data_dict.update( data_dict.update(
{error_type: errornorms} {norm_type: errornorm}
) )
errors = pd.DataFrame(data_dict, index=[mesh_resolution]) errors = pd.DataFrame(data_dict, index=[mesh_resolution])
# check if file exists # check if file exists
if os.path.isfile(filename) == True: if os.path.isfile(filename) is True:
with open(filename, 'a') as f: with open(filename, 'a') as f:
errors.to_csv(f, header=False, sep='\t', encoding='utf-8', index=False) errors.to_csv(
f,
header=False,
sep='\t',
encoding='utf-8',
index=False
)
else: else:
errors.to_csv(filename, sep='\t', encoding='utf-8', index=False) errors.to_csv(
filename,
sep='\t',
encoding='utf-8',
index=False
)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please to comment