Skip to content
Snippets Groups Projects
Commit 6d7a4abd authored by David Seus's avatar David Seus
Browse files

add methods and flags to write solution to files

parent c86096d1
No related branches found
No related tags found
No related merge requests found
......@@ -9,6 +9,8 @@ import typing as tp
import mshr
import domainPatch as dp
import helpers as hlp
import pandas as pd
class LDDsimulation(object):
""" Main Class of LDD simulation
......@@ -19,7 +21,9 @@ class LDDsimulation(object):
def __init__(self, dimension: int = 2,#
tol: float = None):
hlp.print_once("\n ###############################################\n",
"############# Begin Simulation ################\n",
"###############################################\n")
hlp.print_once("Init LDD simulation...")
# parameter as set in NSAC_simulation by Lukas Ostrowski
hlp.print_once("Set internal fenics parameter...")
......@@ -34,14 +38,18 @@ class LDDsimulation(object):
#Parameters
# df.parameters["allow_extrapolation"] = True
# df.parameters["refinement_algorithm"] = "plaza_with_parent_facets"
df.parameters["form_compiler"]["quadrature_degree"] = 10
df.parameters["form_compiler"]["quadrature_degree"] = 12
# interpolation degree, for source terms, intitial and boundary conditions.
self.interpolation_degree = 8
# # To be able to run DG in parallel
# df.parameters["ghost_mode"] = "shared_facet"
# df.parameters["ghost_mode"] = "none"
# Form compiler options
df.parameters["form_compiler"]["optimize"] = True
df.parameters["form_compiler"]["cpp_optimize"] = True
# df.parameters['std_out_all_processes'] = False
df.set_log_level(df.LogLevel.INFO)
# df.set_log_level(df.LogLevel.INFO)
......@@ -53,7 +61,7 @@ class LDDsimulation(object):
# TRACE = 13, // what's happening (in detail)
# DBG = 10 // sundry
hlp.print_once("Set default parameter...")
hlp.print_once("Set default parameter...\n")
self.restart_file = None
# # write checkpoints
# self.write_checkpoints = True
......@@ -61,6 +69,10 @@ class LDDsimulation(object):
# self.project_to_cg = False
# # no sponge zones
# self.sponge = False
# set up mpi for internal parallelisation.
self._mpi_rank = df.MPI.rank(df.MPI.comm_world)
self._mpi_communicator = df.MPI.comm_world
## Public variables.
# directory to write files output to.
......@@ -81,13 +93,15 @@ class LDDsimulation(object):
## Private variables
# maximal number of L-iterations that the LDD solver uses.
self._max_iter_num = 100
self._max_iter_num = 300
# TODO rewrite this with regard to the mesh sizes
self.calc_tol = self.tol
# The number of subdomains are counted by self.init_meshes_and_markers()
self._number_of_subdomains = 0
# variable to check if self.set_parameters() has been called
self._parameters_set = False
# flag holds true if self.initialise() has been executed.
self._initialised = False
# dictionary of objects of class DomainPatch initialised by self._init_subdomains()
self.subdomain = dict()
# dictionary to hold the input Expressions for the external boundary. These are
......@@ -111,6 +125,9 @@ class LDDsimulation(object):
# These are interpolated and stored to each respective subdomain by
# self._init_initial_values()
self.initial_conditions = None
# dictionary of filenames for writing the solution to xdmf files for each
# subdomain. This gets populated by self._init_solution_files()
self.solution_filename = None
########## SOLVER DEFINITION AND PARAMETERS ########
# print("\nLinear Algebra Backends:")
# df.list_linear_algebra_backends()
......@@ -120,8 +137,19 @@ class LDDsimulation(object):
# df.list_krylov_solver_preconditioners()
### Define the linear solver to be used.
self.solver = 'bicgstab' # biconjugate gradient stabilized method
self.preconditioner = 'hypre_amg' # incomplete LU factorization
self.solver = 'bicgstab' #'gmres'#'bicgstab' # biconjugate gradient stabilized method
self.preconditioner = 'hypre_amg' #'ilu'#'hypre_amg' # incomplete LU factorization
# dictionary of solver parametrs. This is passed to self._init_subdomains,
# where for each subdomain a sovler object of type self.solver is created
# with these parameters.
self.solver_parameters = {
'nonzero_initial_guess': True,
'absolute_tolerance': 1E-12,
'relative_tolerance': 1E-10,
'maximum_iterations': 1000,
'report': True
}
def set_parameters(self,#
output_dir: str,#
......@@ -138,6 +166,7 @@ class LDDsimulation(object):
lambda_param: tp.Dict[int, tp.Dict[str, float]],#
relative_permeability: tp.Dict[int, tp.Dict[str, tp.Callable[...,None]] ],#
saturation: tp.Dict[int, tp.Callable[...,None]],#
time_interval: tp.List[float],#
timestep_size: tp.Dict[int, float],#
sources: tp.Dict[int, tp.Dict[str, str]],#
initial_conditions: tp.Dict[int, tp.Dict[str, str]],#
......@@ -160,6 +189,10 @@ class LDDsimulation(object):
self.lambda_param = lambda_param
self.relative_permeability = relative_permeability
self.saturation = saturation
# simulation start time and time variable
self.t = time_interval[0]
# end_time
self.T = time_interval[1]
self.timestep_size = timestep_size
self.sources = sources
self.initial_conditions = initial_conditions
......@@ -175,18 +208,66 @@ class LDDsimulation(object):
"""
if not self._parameters_set:
raise RuntimeError('Parameters note set!\nYou forgott to run self.set_parameters(**kwds)')
print("initialise meshes and marker functions ...\n")
self._init_meshes_and_markers()
print("initialise interfaces ...\n")
self._init_interfaces()
print("initialise subdomains ...\n")
self._init_subdomains()
# initial values get initialised here to be able to call the solver at different
# timesteps without having to call self.run().
self._init_initial_values()
self._initialised = True
def run(self, analyse_solver_at_times: tp.List[float] = None):
""" run the simulation
This method implements the time stepping, calls the acutal solvers and writes
solutions to files. It is the main funtion of the simulation class. If you
want to understand this code, start reading here.
def LDDsolver(self, time: float, debug: bool = False):
PARAMETERS
analyse_solver_at_times #type tp.List[float] List of timesteps at which
to produce output to
analyse the solver at
given timestep. This
tells the function at
which timesteps to
toggle the analyse_timestep
flag of self.LDDsolver.
"""
timer = df.Timer("Simulation time")
# check if simulation has been initialised. If the user calls self.initialise()
# from his/her script, then the time for initialisation is not counted in
# the simulation time.
if not self._initialised:
print("Simulation has not been initialised by the user. \n",
"Initialising simulation now ... ")
self.initialise()
#
def LDDsolver(self, time: float = None, #
debug: bool = False, #
analyse_timestep: bool = False):
""" calulate the solution on all patches for the next timestep
The heart of the simulation. The function LDDsolver implements the LDD
iteration, that is solves the problem given the initial data at timestep
time.
"""
# in case no time is given, use the time of the class.
if time is None:
time = self.t
if analyse_timestep:
# dictionary holding filename strings for each subdomain to store
# iterations in, used to analyse the behaviour of the iterates within
# the current time step.
solution_over_iteration_within_timestep = dict()
subsequent_errors_over_iteration = dict()
# dictionary holding bool variables for each subdomain indicating wether
# minimal error has been achieved, i.e. the calculation can be considered
# done or not.
......@@ -194,20 +275,18 @@ class LDDsimulation(object):
# before the iteration gets started all iteration numbers must be nulled
# and the subdomain_calc_isdone dictionary must be set to False
for ind, subdomain in self.subdomain.items():
#create solver object for subdomains
if time < self.calc_tol:
subdomain.linear_solver = df.KrylovSolver(self.solver, self.preconditioner)
# we use the previous iteration as an initial guess for the linear solver.
solver_param = subdomain.linear_solver.parameters
solver_param['nonzero_initial_guess'] = True
# solver_param['absolute_tolerance'] = 1E-12
# solver_param['relative_tolerance'] = 1E-9
solver_param['maximum_iterations'] = 1000
solver_param['report'] = True
# ## print out set solver parameters
# for parameter, value in self.linear_solver.parameters.items():
# print(f"parameter: {parameter} = {value}")
# df.info(solver_param, True)
# create xdmf files for writing out iterations if analyse_timestep is
# given.
if analyse_timestep:
filename = self.output_dir+"subdomain"+"{}_".format(ind)+ "iterations_at_time"+ "{}".format(time) +".xdmf"
solution_over_iteration_within_timestep.update( #
{ind: SolutionFile(self._mpi_communicator, filename)}
)
subsequent_errors_over_iteration.update( {ind: dict()} )
# # we need a dictionries for each phase.
# for phase in subdomain.has_phases:
# subsequent_errors_over_iteration[ind].update( {phase: dict()} )
#
subdomain_calc_isdone.update({ind: False})
# reset all interface[has_interface].current_iteration[ind] = 0, for
......@@ -215,10 +294,11 @@ class LDDsimulation(object):
subdomain.null_all_interface_iteration_numbers()
# update the time of the source terms before starting the iteration.
# The sources and sinks are the same throughout the iteration.
self._eval_sources(interpolation_degree = 3,
time = time,
self._eval_sources(time = time,
subdomain_index = ind)
self.update_DirichletBC_dictionary(time = time, subdomain_index = ind)
self.update_DirichletBC_dictionary(time = time, #
subdomain_index = ind,
)
# the following only needs to be done when time is not equal 0 since
# our initialisation functions already took care of setting what follows
# for the initial iteration step at time = 0.
......@@ -247,32 +327,67 @@ class LDDsimulation(object):
# set subdomain iteration to current iteration
subdomain.iteration_number = iteration
if debug:
print(f"Entering iteration {iteration}")
print(f"On subdomain{sd_index}: Entering iteration {iteration}\n")
print(f"subdomain{sd_index}.isRichards = {subdomain.isRichards}.")
print(f"subdomain{sd_index}.has_phases = {subdomain.has_phases}.")
# solve the problem on subdomain
self.Lsolver_step(subdomain_index = sd_index,#
debug = debug,
)
subsequent_iter_error = dict()
for phase in subdomain.has_phases:
pa = subdomain.pressure[phase]
pa_prev_i = subdomain.pressure_prev_iter[phase]
subsequent_iter_error.update(
{phase: df.errornorm(pa, pa_prev_i, 'L2')}
)
total_subsequent_iter_error = max(subsequent_iter_error.values())
# check if error criterion has been met
if total_subsequent_iter_error < subdomain.mesh.hmin()/60:
subdomain_calc_isdone[sd_index] = True
if iteration > 0:
subsequent_iter_error = dict()
for phase in subdomain.has_phases:
error = df.Function(subdomain.function_space[phase])
error.assign(subdomain.pressure[phase] - subdomain.pressure_prev_iter[phase])
error_calculated = df.norm(error, 'L2')
subsequent_iter_error.update(
{phase: error_calculated}
)
print(f"the subsequent error in iteration {iteration} is {subsequent_iter_error[phase]}")
if analyse_timestep:
#
subsequent_errors_over_iteration[sd_index].update(
{ phase: error })
subsequent_error_filename = self.output_dir+"subdomain"+"{}_".format(sd_index)+\
"subsequent_iteration_error_for_phase_"+"{}".format(phase)+"_at_time"+ "{}".format(time) +".csv"
self.write_subsequent_errors(
filename = subsequent_error_filename, #
subdomain_index = sd_index,
subsequent_errors = subsequent_iter_error,
time = time)
# both phases should be done before we mark the subdomain as
# finished.
total_subsequent_iter_error = max(subsequent_iter_error.values())
# check if error criterion has been met
error_criterion = subdomain.mesh.hmin()**2*subdomain.timestep_size
if debug:
print(f" total_subsequent_error in iter {iteration} = {total_subsequent_iter_error }\n")
print(f"the grid size on subdomain{sd_index} is h={subdomain.mesh.hmin()}")
print(f"the error criterion is tol = {error_criterion}")
if total_subsequent_iter_error < error_criterion:
if debug:
print(f"subdomain{sd_index} reachd error tol = {error_criterion} in iteration {iteration}.")
subdomain_calc_isdone[sd_index] = True
# prepare next iteration
# write the newly calculated solution to the inteface dictionaries
# for communication
subdomain.write_pressure_to_interfaces()
for phase in subdomain.has_phases:
if analyse_timestep:
subdomain.write_solution_to_xdmf(#
file = solution_over_iteration_within_timestep[sd_index], #
time = time,#
write_iter_for_fixed_time = True,
)#subsequent_errors = subsequent_errors_over_iteration[sd_index])
subdomain.pressure_prev_iter[phase].assign(
subdomain.pressure[phase]
)
else:
# one subdomain is done. Check if all subdomains are done to
# stop the while loop if needed.
......@@ -340,7 +455,9 @@ class LDDsimulation(object):
# apply outer Dirichlet boundary conditions if present.
if subdomain.outer_boundary is not None:
self.outerBC[subdomain_index][phase].apply(form_assembled, rhs_assembled)
# apply boundary dirichlet conditions for all outer boundaries.
for index, boundary in enumerate(subdomain.outer_boundary):
self.outerBC[subdomain_index][index][phase].apply(form_assembled, rhs_assembled)
if debug and subdomain.mesh.num_cells() < 36:
print("\nSystem after applying outer boundary conditions:")
......@@ -364,6 +481,76 @@ class LDDsimulation(object):
print("\npressure after solver:\n", subdomain.pressure[phase].vector().get_local())
def _init_solution_files(self):
""" set up solution files for saving the solution of the LDD scheme for
each subdomain.
"""
# set up solution file names for each subdomain.
self.solution_filename = dict()
for subdom_ind, subdomain in self.subdomain.items():
tau = subdomain.timestep_size
L = subodmain.L
h = subdomain.mesh.hmin()
if subdomain.isRichards:
Lam = subdomain.lambda_param['wetting']
filename = self.output_dir+"solution_subdomain" + "{}_".format(subdom_ind)#
+"dt" + "{}".format(tau)#
# only show three digits of the mesh size.
+"_hmin" + "{number:.{digits}f}".format(number=h, digits=3)#
+"_lambda" + "{}".format(Lam)
+ "_Lw" + "{}".format(L["wetting"])#
+".xdmf"
else:
Lam_w = subdomain.lambda_param['wetting']
Lam_nw = subdomain.lambda_param['nonwetting']
filename = self.output_dir+"solution_subdomain" + "{}_".format(subdom_ind)#
+"dt" + "{}".format(tau)#
# only show three digits of the mesh size.
+"_hmin" + "{number:.{digits}f}".format(number=h, digits=3)#
+"_lambda_w" + "{}".format(Lam_w)#
+"_lambda_nw" + "{}".format(Lam_nw)#
+"_Lw" + "{}".format(L["wetting"])#
+"_Lnw" + "{}".format(L["nonwetting"])
+".xdmf"
self.solution_filename.update( #
{ind: SolutionFile(self._mpi_communicator, filename)}
)
def write_subsequent_errors(self,#
filename: str, #
subdomain_index: int,#
subsequent_errors,#: tp.Dict[str: float],#
time: float = None) -> None:
""" write subsequent errors to csv file for plotting with pgfplots"""
# in case no time is given, use the time of the class.
subdomain = self.subdomain[subdomain_index]
if time is None:
time = self.t
subdomain = self.subdomain[subdomain_index]
iteration = subdomain.iteration_number
if subdomain.isRichards:
self.errors = pd.DataFrame({ "iteration": iteration,
#"th. initial mass": df.assemble(rho*df.dx), \
#"energy": self.total_energy(), \
"wetting": subsequent_errors["wetting"]}, index=[iteration])
else:
self.errors = pd.DataFrame({"iteration":iteration, #\
#"th. initial mass": df.assemble(rho*df.dx), \
#"energy": self.total_energy(), \
"wetting": subsequent_errors["wetting"],
"nonwetting": subsequent_errors["nonwetting"]})
# if self._rank == 0:
# check if file exists
if os.path.isfile(filename) == True:
with open(filename, 'a') as f:
self.errors.to_csv(f, header=False, sep='\t', encoding='utf-8', index=False)
else:
self.errors.to_csv(filename, sep='\t', encoding='utf-8', index=False)
## Private methods
def _init_meshes_and_markers(self, subdomain_def_points: tp.List[tp.List[df.Point]] = None,#
......@@ -429,7 +616,12 @@ class LDDsimulation(object):
self.domain_marker = df.MeshFunction('size_t', mesh, mesh.topology().dim(), mesh.domains())
# extract subdomain meshes
for num in range(1, self._number_of_subdomains+1):
# if MPI.size(mpi_comm_world()) == 1:
self.mesh_subdomain.append(df.SubMesh(mesh, self.domain_marker, num))
# else:
# self.mesh_subdomain.append(create_submesh(mesh, self.domain_marker, num))
if self.write2file['meshes_and_markers']:
filepath = self.output_dir+"domain_marker.pvd"
......@@ -489,20 +681,25 @@ class LDDsimulation(object):
internal=True,#
adjacent_subdomains = adjacent_subdomains[num],#
isRichards = self.isRichards,#
global_index = num)
global_index = num,
)
)#
# the marking number of each interface corresponds to the mathematical
# numbering of the interfaces (starting with 1 not 0 for the first interface).
# marker value gets internally set to global_index+1.
# The reason is that we want to mark outer boundaries with the value 0.
self.interface[num].mark(self.interface_marker, num+1)
self.interface[num].init_interface_values(self.interface_marker, num+1)
self.interface[num].mark(self.interface_marker, self.interface[num].marker_value)
print("Global interface vertices:")
self.interface[num].init_interface_values(self.interface_marker, self.interface[num].marker_value)
if self.write2file['meshes_and_markers']:
filepath = self.output_dir+"interface_marker_global.pvd"
df.File(filepath) << self.interface_marker
def _init_subdomains(self) -> None:
""" generate list of opjects of class DomainPatch.
""" generate dictionary of opjects of class DomainPatch.
This method generates a dictionary of opjects of class DomainPatch, containing
everything that is needed for the LDDsolver. Additionally a linear solver
object is created.
"""
# The list is_richards has the same length as the nuber of subdomains.
......@@ -528,6 +725,20 @@ class LDDsimulation(object):
tol = self.tol)
}
)
# setup the linear solvers and set the solver parameters.
self.subdomain[subdom_num].linear_solver = df.KrylovSolver(self.solver, self.preconditioner)
# we use the previous iteration as an initial guess for the linear solver.
solver_param = self.subdomain[subdom_num].linear_solver.parameters
solver_param['nonzero_initial_guess'] = self.solver_parameters['nonzero_initial_guess']
solver_param['absolute_tolerance'] = self.solver_parameters['absolute_tolerance']
solver_param['relative_tolerance'] = self.solver_parameters['relative_tolerance']
solver_param['maximum_iterations'] = self.solver_parameters['maximum_iterations']
solver_param['report'] = self.solver_parameters['report']
# ## print out set solver parameters
# for parameter, value in self.linear_solver.parameters.items():
# print(f"parameter: {parameter} = {value}")
# df.info(solver_param, True)
......@@ -555,12 +766,15 @@ class LDDsimulation(object):
return interface_of_subdomain
def _init_initial_values(self, interpolation_degree: int = 2):
def _init_initial_values(self, interpolation_degree: int = None):
""" set initial values
This method populates the subdomain dictionaries as well as the interface
dictionaries with initial values needed for the LDDsolver iterations.
"""
if interpolation_degree is None:
interpolation_degree = self.interpolation_degree
for num, subdomain in self.subdomain.items():
mesh = subdomain.mesh
V = subdomain.function_space
......@@ -585,10 +799,10 @@ class LDDsimulation(object):
subdomain.pressure_prev_timestep[phase].assign(pa0_interpolated)
# populate interface dictionaries with pressure values.
subdomain.write_pressure_to_interfaces(initial_values = True)
subdomain.write_pressure_to_interfaces()
def update_DirichletBC_dictionary(self, subdomain_index: int,#
interpolation_degree: int = 2, #
interpolation_degree: int = None, #
time: float = 0,#
):
""" update time of the dirichlet boundary object for subdomain with
......@@ -597,6 +811,9 @@ class LDDsimulation(object):
In case we don't have a case with an exact solution, we should have 0
expressions in self.dirichletBC_Expressions, so nothing should happen.
"""
if interpolation_degree is None:
interpolation_degree = self.interpolation_degree
num = subdomain_index
subdomain = self.subdomain[num]
# we only need to set Dirichlet boundary conditions if we have an outer
......@@ -604,36 +821,58 @@ class LDDsimulation(object):
if subdomain.outer_boundary is not None:
mesh = subdomain.mesh
V = subdomain.function_space
# Here the dictionary has to be created first because t = 0.
if np.abs(time) < self.calc_tol:
self.outerBC.update({num: dict()})
self.dirichletBC_dfExpression.update({num: dict()})
pDC = self.dirichletBC_Expressions[num]
boundary_marker = subdomain.outer_boundary_marker
for phase in subdomain.has_phases:
# note that the default interpolation degree is 2
if np.abs(time) < self.tol :
# time = 0 so the Dolfin Expression has to be created first.
pDCa = df.Expression(pDC[phase], domain = mesh, degree = interpolation_degree, t=time)
self.dirichletBC_dfExpression[num].update({phase: pDCa})
else:
pDCa = self.dirichletBC_dfExpression[num][phase].t = time
self.outerBC[num].update(
{phase: df.DirichletBC(V[phase], pDCa, boundary_marker, 1)}
)
# loop over all outer boundary parts. This is a bit of a brainfuck:
# subdomain.outer_boundary gives a dictionary of the outer boundaries of the subdomain.
# Since a domain patch can have several disjoint outer boundary parts, the expressions
# need to get an enumaration index which starts at 0. So subdomain.outer_boundary[j] is
# the dictionary of outer dirichlet conditions of the subdomain and boundary part j for
# the wetting and the nonwetting phase, so subdomain.outer_boundary[j]['wetting'] and
# subdomain.outer_boundary[j]['nonwetting'] return
# the actual expression needed for the dirichlet condition for both phases if present.
for index, boundary in enumerate(subdomain.outer_boundary):
if np.abs(time) < self.tol:
# Here the dictionary has to be created first because t = 0.
# create a dictionary of dictionaries for each subdomain
self.outerBC.update({num: dict()})
self.dirichletBC_dfExpression.update({num: dict()})
# create a dictionary of dictionaries for each outer boundary
# part of the subdomain.
self.outerBC[num].update({index: dict()})
self.dirichletBC_dfExpression[num].update({index: dict()})
pDC = self.dirichletBC_Expressions[num][index]
boundary_marker = subdomain.outer_boundary_marker
# as with the interfaces, since numbering of the outer boundary
# parts of each subdomain starts at 0 and subdomain.outer_boundary_marker
# gets set to 0 on all facets of the mesh, we need to up the value for
# an actual outer boundary part by 1.
boundary_marker_value = index+1
for phase in subdomain.has_phases:
# note that the default interpolation degree is 2
if np.abs(time) < self.tol :
# time = 0 so the Dolfin Expression has to be created first.
pDCa = df.Expression(pDC[phase], domain = mesh, degree = interpolation_degree, t=time)
self.dirichletBC_dfExpression[num][index].update({phase: pDCa})
else:
pDCa = self.dirichletBC_dfExpression[num][index][phase].t = time
self.outerBC[num][index].update(
{phase: df.DirichletBC(V[phase], pDCa, boundary_marker, boundary_marker_value)}
)
else:
# case subdomain.outer_boundary is None
print("subdomain is an inner subdomain and has no outer boundary.\n",
"no Dirichlet boundary has been set.")
def _eval_sources(self, subdomain_index: int, #
interpolation_degree: int = 2, #
interpolation_degree: int = None, #
time: float = 0,#
):
""" evaluate time dependent source terms or initialise them if time == 0
"""
if interpolation_degree is None:
interpolation_degree = self.interpolation_degree
subdomain = self.subdomain[subdomain_index]
for phase in subdomain.has_phases:
if np.abs(time) < self.calc_tol:
......@@ -652,3 +891,17 @@ class LDDsimulation(object):
else:
# note that the default interpolation degree is 2
subdomain.source[phase].t = time
# https://github.com/geo-fluid-dynamics/phaseflow-fenics
# adapted from Lucas Ostrowski
class SolutionFile(df.XDMFFile):
"""
This class extends `df.XDMFFile` with some minor changes for convenience.
"""
def __init__(self, mpicomm, filepath):
df.XDMFFile.__init__(self, mpicomm, filepath)
self.parameters["functions_share_mesh"] = True
self.parameters["flush_output"] = True
self.path = filepath # Mimic the file path attribute from a `file` returned by `open`
This diff is collapsed.
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment