From 423d451762eb0861190602a1ae353e77eb12661e Mon Sep 17 00:00:00 2001 From: David Seus <david.seus@ians.uni-stuttgart.de> Date: Wed, 28 Aug 2019 17:40:49 +0200 Subject: [PATCH] verify that interface.dofs_and_coordinates, init_facet_dictionaries and init_facet_maps work --- LDDsimulation/LDDsimulation.py | 18 +- LDDsimulation/boundary_and_interface.py | 146 ++++++++--- LDDsimulation/domainPatch.py | 26 +- .../RR-multi-patch-with-gravity.py | 20 +- .../RR-multi-patch-with-inner-patch.py | 19 +- Rechnungen_auszuwerten.txt | 8 +- ...ame-wetting-phase-as-RR-zero-nonwetting.py | 49 ++-- .../TP-TP-2-patch-constant-solution.py | 37 +-- .../TP-TP-2-patch-pure-dd.py | 53 ++-- TP-TP-2-patch-test-case/TP-TP-2-patch-test.py | 46 ++-- .../TP-TP-layered_soil-const-solution.py | 180 +++++-------- ...ed_soil_with_inner_patch_const_solution.py | 246 +++++++----------- ...layered_soil_with_inner_patch-realistic.py | 22 +- .../TP-one-patch-purely-postive-pc.py | 14 +- 14 files changed, 450 insertions(+), 434 deletions(-) diff --git a/LDDsimulation/LDDsimulation.py b/LDDsimulation/LDDsimulation.py index ae785b2..baba899 100644 --- a/LDDsimulation/LDDsimulation.py +++ b/LDDsimulation/LDDsimulation.py @@ -31,7 +31,9 @@ class LDDsimulation(object): def __init__(self, dimension: int = 2,# debug: bool = False,# tol: float = None,# - LDDsolver_tol: float = 1E-8): + LDDsolver_tol: float = 1E-8, + # maximal number of L-iterations that the LDD solver uses. + max_iter_num: int = 1000): hlp.print_once("\n ###############################################\n", "############# Begin Simulation ################\n", "###############################################\n") @@ -88,6 +90,8 @@ class LDDsimulation(object): ## Public variables. + # maximal number of L-iterations that the LDD solver uses. + self._max_iter_num = max_iter_num # directory to write files output to. self.output_dir = None # list of subdomain meshes. Is populated by self.init_meshes_and_markers() @@ -105,8 +109,6 @@ class LDDsimulation(object): self.adjacent_subdomains = None ## Private variables - # maximal number of L-iterations that the LDD solver uses. - self._max_iter_num = 500 # TODO rewrite this with regard to the mesh sizes # self.calc_tol = self.tol # list of timesteps that get analyed. Gets initiated by self._init_analyse_timesteps @@ -731,7 +733,7 @@ class LDDsimulation(object): name1 ="subdomain" + "{}".format(subdom_ind) # only show three digits of the mesh size. name2 = "_dt" + "{}".format(tau) +"_hmax" + "{number:.{digits}f}".format(number=h, digits=3) \ - + "_gravitiy_" + "{}".format(self.include_gravity) + + "_gravity_" + "{}".format(self.include_gravity) name3 = "_lambda" + "{}".format(Lam) + "_Lw" + "{}".format(L["wetting"]) filename_param_part = name1 + name2 + name3 else: @@ -740,7 +742,7 @@ class LDDsimulation(object): filename_param_part = "subdomain" + "{}".format(subdom_ind)\ +"_dt" + "{}".format(tau)\ +"_hmax" + "{number:.{digits}f}".format(number=h, digits=3)\ - + "_gravitiy_" + "{}".format(self.include_gravity)\ + + "_gravity_" + "{}".format(self.include_gravity)\ +"_lambda_w" + "{}".format(Lam_w)\ +"_lambda_nw" + "{}".format(Lam_nw)\ +"_Lw" + "{}".format(L["wetting"])\ @@ -781,11 +783,11 @@ class LDDsimulation(object): file.write(exact_pressure[phase], t) exact_capillary_pressure = df.Function(subdomain.function_space["pressure"]['wetting']) - if self.isRichards: + if subdomain.isRichards: exact_capillary_pressure.assign(-exact_pressure['wetting']) else: - pc_temp = exact_pressure["nonwetting"].vector()[:] - exact_pressure["wetting"].vector()[:] - exact_capillary_pressure.vector()[:] = pc_temp + pc_temp = exact_pressure["nonwetting"].vector().get_local() - exact_pressure["wetting"].vector().get_local() + exact_capillary_pressure.vector().set_local(pc_temp) exact_capillary_pressure.rename("pc_exact", "pc_exact") file.write(exact_capillary_pressure, t) t += self.timestep_size diff --git a/LDDsimulation/boundary_and_interface.py b/LDDsimulation/boundary_and_interface.py index 9204d77..3863616 100644 --- a/LDDsimulation/boundary_and_interface.py +++ b/LDDsimulation/boundary_and_interface.py @@ -14,6 +14,7 @@ import dolfin as df import numpy as np import typing as tp import copy as cp +import numpy.linalg as la class BoundaryPart(df.SubDomain): @@ -333,8 +334,8 @@ class Interface(BoundaryPart): interface_marker_value: int, debug: bool = False) -> tp.Dict[int, tp.Dict[int, np.ndarray]]: """ for a given function space function_space, calculate the dof indices - and coordinates along the interface. The pair {global_dof_index: dof_coordinate} - is saved as value in a dictionary which has global facet indices as keys. + and coresponding coordinates along the interface. The pair {global_dof_index: dof_coordinate} + is saved as value in a dictionary which has global facet indices as keys. # Parameters function_space: #type df.functionSpace the function space of whitch @@ -351,41 +352,49 @@ class Interface(BoundaryPart): is defined on. The value to that key is a dictionary containing global (not cell) - dof indix and corresponding + dof index and corresponding coordinates of the dof as key: value pairs. """ dofmap = function_space.dofmap() element = function_space.element() - mesh_entity_iterator = df.SubsetIterator(interface_marker, interface_marker_value) + facet_mesh_entity_iterator = df.SubsetIterator(interface_marker, interface_marker_value) dofs_and_coords = dict() - for mesh_entity in mesh_entity_iterator: + for facet_mesh_entity in facet_mesh_entity_iterator: # this is the facet index relative to the Mesh # that interface_marker is defined on. - interface_facet_index = mesh_entity.index() - for interface_facet in df.facets(mesh_entity): - # we only need the x,y coordinates. vertex.point()[:] returns - # the array and cannot be sliced. the result can be indexed, hence, - # vertex.point()[:][0:2] - # facet_midpoint = interface_facet.midpoint()[:][0:2] - # print(f"facet: {interface_facet}") - # print("facet coordinates: ") - facet_points = [] - for vertex in df.vertices(interface_facet): - # print(f"vertex: {vertex} with coordinates {vertex.point()[:][0:2]}") - facet_points.append(vertex.point()) + interface_facet_index = facet_mesh_entity.index() + # if debug: + # print("\n") + # for interface_facet in df.facets(facet_mesh_entity): + # if debug: + # print(f"interface facet with mesh entity index: {facet_mesh_entity.index()}") + # print(f"interface facet with facet index: {interface_facet.index()}") + # # we only need the x,y coordinates. vertex.point()[:] returns + # # the array and cannot be sliced. the result can be indexed, hence, + # # vertex.point()[:][0:2] + # # facet_midpoint = interface_facet.midpoint()[:][0:2] + # # print(f"facet: {interface_facet}") + # # print("facet coordinates: ") + facet_points = [] + for vertex in df.vertices(facet_mesh_entity): + # if debug: + # print(f"vertex: {vertex} with coordinates {vertex.point()[:][0:2]}") + facet_points.append(vertex.point()) dofs_and_coords.update({interface_facet_index: dict()}) - # because mesh_entity doesn't have a method to access coordinates we + # because facet_mesh_entity doesn't have a method to access coordinates we # need to cast it into an acutal facet object. - for cell in df.cells(mesh_entity): - # we actually only have one facet contained in this mesh_entity - # but we still need to loop over „all“ facets since df.facets(mesh_entity) + for cell in df.cells(facet_mesh_entity): + # we actually only have one facet contained in this facet_mesh_entity + # but we still need to loop over „all“ facets since df.facets(facet_mesh_entity) # returns an iterator object. facet_cell = cell facet_cell_index = facet_cell.index() + # if debug: + # print(f"facet_cell_index on the cell belonging to facet_mesh_entity {interface_facet_index} is {facet_cell.index()}") # cell_facets_entity = df.facets(cell) # cell_coordinates = cell.get_vertex_coordinates() @@ -409,8 +418,13 @@ class Interface(BoundaryPart): if debug: - print(f"newly defined facet_dof_to_coordinates_dictionary of interface[{interface_marker_value-1}]") - print(dofs_and_coords.items()) + print(f"The newly defined facet_dof_to_coordinates_dictionary of interface[{interface_marker_value-1}]") + number_of_interface_facets = 0 + for facets in dofs_and_coords.items(): + number_of_interface_facets += 1 + print(f"contains {number_of_interface_facets} interface facets, so {number_of_interface_facets + 1} dofs") + for facet_index, facet_dofs in dofs_and_coords.items(): + print(f"on facet {facet_index} we have the dofs", facet_dofs) return cp.deepcopy(dofs_and_coords) @@ -440,17 +454,29 @@ class Interface(BoundaryPart): self.facet_to_vertex_coordinates.update({subdomain_index: dict()}) self.outer_normals.update({subdomain_index: dict()}) self.facets.update({subdomain_index: dict()}) - # Get a subset iterator for mesh entities along the facets marked by + # Get a subset iterator for mesh entities corresponding to the facets marked by # interface_marker and value interface_marker_value. - mesh_entity_iterator = df.SubsetIterator(interface_marker, interface_marker_value) - for mesh_entity in mesh_entity_iterator: - # because mesh_entity doesn't have a method to access coordinates we - # need to cast it into an acutal facet object. - for f in df.facets(mesh_entity): - # we actually only have one facet contained in this mesh_entity - # but we still need to loop over „all“ facets since df.facets(mesh_entity) - # returns an iterator object. - facet = f + interface_facet_mesh_entities = df.SubsetIterator(interface_marker, interface_marker_value) + # we actually want to access things like the outer normal and coordinates + # of the vertices belonging to the facet. The entity iterator provides a + # way to iterate over facets but as instances of the more abstract class + # dolfin.cpp.mesh.MeshEntity. We need to find for each facet_entity the + # actual instance of class dolfin.cpp.mesh.Facet, which then allows us + # to access the outer normal. + if debug: + print(f"\non subdomain {subdomain_index}") + for facet_mesh_entity in interface_facet_mesh_entities: + # df.facets(facet_mesh_entity) gives us for each facet_entity the + # an instance of class dolfin.cpp.mesh.Facet. Yes, it's annoying, + # since we actually only have one facet (of class dolfin.cpp.mesh.Facet) + # contained in facet_mesh_entity but we still need to loop over + # „all“ facets since df.facets(facet_mesh_entity) returns an iterator object. + # if debug: + # print(f"\nfacet_mesh_entity: {facet_mesh_entity} of type {type(facet_mesh_entity)}") + for facet_instance in df.facets(facet_mesh_entity): + facet = facet_instance + # if debug: + # print(f"facet (type: {type(facet)}) corresponding to facet_mesh_entity: {facet}") facet_vertices = [] for vertex in df.vertices(facet): @@ -462,6 +488,8 @@ class Interface(BoundaryPart): self.facet_to_vertex_coordinates[subdomain_index].update( {facet.index(): facet_vertices} ) + # if debug: + # print(f"saved facet vertices as {self.facet_to_vertex_coordinates[subdomain_index].items()}") self.facets[subdomain_index].update({facet.index(): facet}) if subdomain_index == 0: # we do not have outer normals if this interface is part of the @@ -473,16 +501,29 @@ class Interface(BoundaryPart): self.outer_normals[subdomain_index].update( {facet.index(): facet.normal()[:][0:2]} ) - # if subdomain_index is not equal to 0, we are on an actual SubDomain - # and self.init_facet_dictionaries() has been run for the global domain - # already. This means we actually can compute a local2global facet map. - self.init_facet_maps(subdomain_index) + if debug: + print(f"normal has norm: {la.norm(facet.normal()[:][0:2])}") + # print(f"saved outer normall as {self.outer_normals[subdomain_index].items()}") + # print(f"outer normal {facet.normal()[:][0:2]}: belonging to facet {facet}") + # problem_point = np.array([0.7, -0.2]) + # vertex1ispoint = np.allclose(problem_point, self.facet_to_vertex_coordinates[subdomain_index][facet.index()][0]) + # vertex2ispoint = np.allclose(problem_point, self.facet_to_vertex_coordinates[subdomain_index][facet.index()][1]) + # if vertex1ispoint or vertex2ispoint: + # print(f"on subdomain {subdomain_index}:") + # print(f"the normal of the facet belonging to {self.facet_to_vertex_coordinates[subdomain_index][facet.index()][0]} and point {self.facet_to_vertex_coordinates[subdomain_index][facet.index()][1]}, problem point = {problem_point} is", facet.normal()[:][0:2]) + # # raise RuntimeError('stopped for debugging') + if subdomain_index != 0: + # if subdomain_index is not equal to 0, we are on an actual SubDomain + # and self.init_facet_dictionaries() has been run for the global domain + # already. This means we actually can compute a local2global facet map. + self.init_facet_maps(subdomain_index) if debug: print(f"newly defined facet_to_vertex_coordinates_dictionary[{subdomain_index}]") print(self.facet_to_vertex_coordinates[subdomain_index].items()) print(f"outer normals per facet are: ", self.outer_normals[subdomain_index].items()) + def init_facet_maps(self, subdomain_index: int, debug: bool = False) -> None: """ calculate the mapping that maps the local numbering of interface facets (w.r.t. the mesh of subdomain subdomain_index) to the global @@ -512,8 +553,39 @@ class Interface(BoundaryPart): {global_facet_index: local_facet_index} ) if debug: + print(f"\n on subdomain {subdomain_index}") print("local_to_global_facet_map: ", self.local_to_global_facet_map[subdomain_index].items()) print("global_to_local_facet_map: ", self.global_to_local_facet_map[subdomain_index].items()) + print(f"\nlocal_to_global_facet_map sanity check:") + for local_facet_index, local_facet_vertex in subdom_facet2coord.items(): + global_index = self.local_to_global_facet_map[subdomain_index][local_facet_index] + global_facet_vertex = global_facet2coord[global_index] + vertices_are_equal = np.allclose(local_facet_vertex[0], global_facet_vertex[0], rtol=1e-10, atol=1e-14) + vertices_are_equal += np.allclose(local_facet_vertex[0], global_facet_vertex[1], rtol=1e-10, atol=1e-14) + vertices_are_equal += np.allclose(local_facet_vertex[1], global_facet_vertex[0], rtol=1e-10, atol=1e-14) + vertices_are_equal += np.allclose(local_facet_vertex[1], global_facet_vertex[1], rtol=1e-10, atol=1e-14) + both_vertices_are_equal = (vertices_are_equal == 2) + if both_vertices_are_equal: + print(f"facet with local index {local_facet_index} has correctly been identyfied to global number {global_index}") + else: + print("FALSE FALSE FALSE") + print(f"facet with local index {local_facet_index} has INCORRECTLY been identyfied to global number {global_index}") + + print(f"\nglobal_to_local_facet_map sanity check:") + for global_facet_index, global_facet_vertex in global_facet2coord.items(): + local_index = self.global_to_local_facet_map[subdomain_index][global_facet_index] + local_facet_vertex = subdom_facet2coord[local_index] + vertices_are_equal = np.allclose(local_facet_vertex[0], global_facet_vertex[0], rtol=1e-10, atol=1e-14) + vertices_are_equal += np.allclose(local_facet_vertex[0], global_facet_vertex[1], rtol=1e-10, atol=1e-14) + vertices_are_equal += np.allclose(local_facet_vertex[1], global_facet_vertex[0], rtol=1e-10, atol=1e-14) + vertices_are_equal += np.allclose(local_facet_vertex[1], global_facet_vertex[1], rtol=1e-10, atol=1e-14) + both_vertices_are_equal = (vertices_are_equal == 2) + if both_vertices_are_equal: + print(f"facet with global number {global_facet_index} has correctly been identyfied to local index {local_index}") + else: + print("FALSE FALSE FALSE") + print(f"facet with global number {global_facet_index} has INCORRECTLY been identyfied to local index {local_index}") + def init_interface_values(self,# interface_marker: df.MeshFunction,# diff --git a/LDDsimulation/domainPatch.py b/LDDsimulation/domainPatch.py index 52476d1..4cfcc6f 100644 --- a/LDDsimulation/domainPatch.py +++ b/LDDsimulation/domainPatch.py @@ -167,9 +167,9 @@ class DomainPatch(df.SubDomain): # sometimes we need to loop over the phases and the length of the loop is # determined by the model we are assuming if self.isRichards: - self.has_phases =['wetting'] + self.has_phases = ['wetting'] else: - self.has_phases =['wetting', 'nonwetting'] + self.has_phases = ['nonwetting', 'wetting'] #['wetting', 'nonwetting'] # dictionary holding the dof indices corresponding to an interface of # given interface. see self._calc_dof_indices_of_interfaces() self._dof_indices_of_interface = dict() @@ -621,7 +621,7 @@ class DomainPatch(df.SubDomain): # print(f"np.allclose({p_coord}, {gli_dof_coord}, rtol=1e-10, atol=1e-14): {b_close_a}") if a_close_b and b_close_a: p_previous_dof = p_prev_dof - + if p_previous_dof is None: raise RuntimeError(f"didn't find p_previous_dof corresponding to dict key ({gli_dof_coord})", "something is wrong") @@ -811,7 +811,7 @@ class DomainPatch(df.SubDomain): self.ds = df.Measure('ds', domain = self.mesh, subdomain_data = self.interface_marker) - def _calc_interface_dof_indices_and_coordinates(self): + def _calc_interface_dof_indices_and_coordinates(self, debug: bool = False): """ calculate dictionaries containing for each local facet index of interfaces a dictionary containing {interface_dof_index: dof_coordinates} """ @@ -829,20 +829,25 @@ class DomainPatch(df.SubDomain): self.interface_dof_indices_and_coordinates[function].update( {interface_index: dict()} ) + if debug: + print(f"\nOn subdomain {self.subdomain_index}") for phase in self.has_phases: if function == "flux": self.interface_dof_indices_and_coordinates[function][interface_index].update(# {phase: dict()} ) + # print(f"\ndofs on interface for function space {function} for phase {phase}:") self.interface_dof_indices_and_coordinates[function][interface_index][phase].update(# - {"x": interface.dofs_and_coordinates(function_space[phase].sub(0), marker, marker_value)} + {"x": interface.dofs_and_coordinates(function_space[phase].sub(0), marker, marker_value, debug=False)} ) self.interface_dof_indices_and_coordinates[function][interface_index][phase].update(# - {"y": interface.dofs_and_coordinates(function_space[phase].sub(1), marker, marker_value)} + {"y": interface.dofs_and_coordinates(function_space[phase].sub(1), marker, marker_value, debug=False)} ) else: + if debug: + print(f"\ndofs on interface for function space {function} for phase {phase}:") self.interface_dof_indices_and_coordinates[function][interface_index].update(# - {phase: interface.dofs_and_coordinates(function_space[phase], marker, marker_value)} + {phase: interface.dofs_and_coordinates(function_space[phase], marker, marker_value, debug=False)} ) @@ -874,8 +879,8 @@ class DomainPatch(df.SubDomain): flux_dof_coordinates = self.interface_dof_indices_and_coordinates["flux"][interf_ind][phase] # the attributes local and global for facet numbering mean # the following: local facet index is the index of the facet - # with respect to the numbering of the submesh belonging to - # the subdomain. global facet index refers to the facet numbering + # with respect to the numbering of the subdomain submesh. + # Global facet index refers to the facet numbering # of the mesh of the whole computational domain. # local2global_facet = interface.local_to_global_facet_map[subdomain] for local_facet_ind in interface.outer_normals[subdomain].keys(): @@ -990,7 +995,8 @@ class DomainPatch(df.SubDomain): capillary_pressure.assign(-self.pressure["wetting"]) else: pc_temp = self.pressure["nonwetting"].vector()[:] - self.pressure["wetting"].vector()[:] - capillary_pressure.vector()[:] = pc_temp + capillary_pressure.vector().set_local(pc_temp) + # capillary_pressure.assign(pc_temp) capillary_pressure.rename("pc_num", "pc_num") file.write(capillary_pressure, t) else: diff --git a/RR-multi-patch-plus-gravity/RR-multi-patch-with-gravity.py b/RR-multi-patch-plus-gravity/RR-multi-patch-with-gravity.py index 962d813..159fad1 100755 --- a/RR-multi-patch-plus-gravity/RR-multi-patch-with-gravity.py +++ b/RR-multi-patch-plus-gravity/RR-multi-patch-with-gravity.py @@ -14,7 +14,7 @@ import helpers as hlp sym.init_printing() use_case = "RR-multi-patch" -solver_tol = 1E-7 +solver_tol = 1E-6 # ----------------------------------------------------------------------------# # ------------------- MESH ---------------------------------------------------# @@ -37,7 +37,7 @@ include_gravity = True debugflag = False analyse_condition = False -output_string = "./output/new_gravity_term-number_of_timesteps{}_".format(number_of_timesteps) +output_string = "./output/post-fix-new_gravity_term-number_of_timesteps{}_".format(number_of_timesteps) # ----------------------------------------------------------------------------# @@ -321,10 +321,10 @@ sat_pressure_relationship = { } exact_solution = { - 1: {'wetting': '1.0 - (1.0 + t*t)*(1.0 + x[0]*x[0] + x[1]*x[1])'}, - 2: {'wetting': '1.0 - (1.0 + t*t)*(1.0 + x[0]*x[0])'}, - 3: {'wetting': '1.0 - (1.0 + t*t)*(1.0 + x[0]*x[0])'}, - 4: {'wetting': '1.0 - (1.0 + t*t)*(1.0 + x[0]*x[0] + x[1]*x[1])'} + 1: {'wetting': '-1.0 - (1.0 + t*t)*(1.0 + x[0]*x[0] + x[1]*x[1])'}, + 2: {'wetting': '-1.0 - (1.0 + t*t)*(1.0 + x[0]*x[0])'}, + 3: {'wetting': '-1.0 - (1.0 + t*t)*(1.0 + x[0]*x[0])'}, + 4: {'wetting': '-1.0 - (1.0 + t*t)*(1.0 + x[0]*x[0] + x[1]*x[1])'} } initial_condition = { @@ -341,10 +341,10 @@ x, y = sym.symbols('x[0], x[1]') # needed by UFL t = sym.symbols('t', positive=True) p_e_sym = { - 1: {'wetting': 1.0 - (1.0 + t*t)*(1.0 + x*x + y*y)}, - 2: {'wetting': 1.0 - (1.0 + t*t)*(1.0 + x*x)}, - 3: {'wetting': 1.0 - (1.0 + t*t)*(1.0 + x*x)}, - 4: {'wetting': 1.0 - (1.0 + t*t)*(1.0 + x*x + y*y)} + 1: {'wetting': -1.0 - (1.0 + t*t)*(1.0 + x*x + y*y)}, + 2: {'wetting': -1.0 - (1.0 + t*t)*(1.0 + x*x)}, + 3: {'wetting': -1.0 - (1.0 + t*t)*(1.0 + x*x)}, + 4: {'wetting': -1.0 - (1.0 + t*t)*(1.0 + x*x + y*y)} } # pc_e_sym = { diff --git a/RR-multi-patch-with-inner-patch/RR-multi-patch-with-inner-patch.py b/RR-multi-patch-with-inner-patch/RR-multi-patch-with-inner-patch.py index df7dd19..c6793a5 100755 --- a/RR-multi-patch-with-inner-patch/RR-multi-patch-with-inner-patch.py +++ b/RR-multi-patch-with-inner-patch/RR-multi-patch-with-inner-patch.py @@ -13,11 +13,11 @@ import helpers as hlp # init sympy session sym.init_printing() -use_case = "RR-multi-patch-with-inner-patch-cuttoff" +use_case = "RR-multi-patch-with-inner-patch-with-gravity" solver_tol = 1E-6 ############ GRID #######################ü -mesh_resolution = 20 +mesh_resolution = 40 timestep_size = 0.01 number_of_timesteps = 150 # decide how many timesteps you want analysed. Analysed means, that we write out @@ -32,9 +32,9 @@ lambda_w = 4 include_gravity = True debugflag = False -analyse_condition = False +analyse_condition = True -output_string = "./output/new_gravity_cutoff_term-number_of_timesteps{}_".format(number_of_timesteps) +output_string = "./output/post-fix-number_of_timesteps{}_".format(number_of_timesteps) # ----------------------------------------------------------------------------# # ------------------- Domain and Interface -----------------------------------# @@ -211,7 +211,6 @@ L = { 5: {'wetting': Lw} } -lambda_w = 32 # subdom_num : lambda parameter for the L-scheme lambda_param = { @@ -458,11 +457,11 @@ epsilon = 0.5 # } p_e_sym = { - 1: {'wetting': (1.0 - (1.0 + t*t)*(1.0 + x*x + y*y))*cutoff}, - 2: {'wetting': (1.0 - (1.0 + t*t)*(1.0 + x*x))*cutoff}, - 3: {'wetting': (1.0 - (1.0 + t*t)*(1.0 + x*x))*cutoff}, - 4: {'wetting': (1.0 - (1.0 + t*t)*(1.0 + x*x))*cutoff}, - 5: {'wetting': (1.0 - (1.0 + t*t)*(1.0 + x*x + y*y))*cutoff} + 1: {'wetting': (-1.0 - (1.0 + t*t)*(1.0 + x*x + y*y))}, #*cutoff}, + 2: {'wetting': (-1.0 - (1.0 + t*t)*(1.0 + x*x))}, #*cutoff}, + 3: {'wetting': (-1.0 - (1.0 + t*t)*(1.0 + x*x))}, #*cutoff}, + 4: {'wetting': (-1.0 - (1.0 + t*t)*(1.0 + x*x))}, #*cutoff}, + 5: {'wetting': (-1.0 - (1.0 + t*t)*(1.0 + x*x + y*y))} #*cutoff} } diff --git a/Rechnungen_auszuwerten.txt b/Rechnungen_auszuwerten.txt index d808dde..ea08d8e 100644 --- a/Rechnungen_auszuwerten.txt +++ b/Rechnungen_auszuwerten.txt @@ -1,14 +1,10 @@ Rechungen, die noch nicht ausgewertet wurden -- TP-one-patch (purely positive pc) - TP-TP-layered-soil-case-with-inner-patch -- TP-R-two-patch-test-case -- RR-multipatch-plus-gravity - - +- RR-multi-patch-with-inner-patch (no-gravtiy) +- RR-multi-patch-with-inner-patch (with-gravity) Rechnungen, die neu gestartet werden müssen: -TP-R-multi-patch-same-wetting-phase-as-RR-zero-nonwetting - - TP-R-two-patch-test-case (nach der reimplementierung von gravity) TP pure dd diff --git a/TP-R-multi-patch-same-wetting-phase-as-RR-zero-nonwetting/TP-R-multi-patch-same-wetting-phase-as-RR-zero-nonwetting.py b/TP-R-multi-patch-same-wetting-phase-as-RR-zero-nonwetting/TP-R-multi-patch-same-wetting-phase-as-RR-zero-nonwetting.py index 80bde1a..ccc8614 100755 --- a/TP-R-multi-patch-same-wetting-phase-as-RR-zero-nonwetting/TP-R-multi-patch-same-wetting-phase-as-RR-zero-nonwetting.py +++ b/TP-R-multi-patch-same-wetting-phase-as-RR-zero-nonwetting/TP-R-multi-patch-same-wetting-phase-as-RR-zero-nonwetting.py @@ -14,28 +14,29 @@ import helpers as hlp sym.init_printing() use_case = "TP-R-multi-patch-zero-nonwetting-phase" -solver_tol = 1E-6 +solver_tol = 7E-6 +max_iter_num = 200 ############ GRID #######################ü mesh_resolution = 40 -timestep_size = 0.001 -number_of_timesteps = 1500 +timestep_size = 0.0001 +number_of_timesteps = 100 # decide how many timesteps you want analysed. Analysed means, that we write out # subsequent errors of the L-iteration within the timestep. number_of_timesteps_to_analyse = 10 starttime = 0 -Lw = 0.5 #/timestep_size +Lw = 1 #/timestep_size Lnw=Lw -l_param_w = 40 -l_param_nw = 40 +lambda_w = 4 +lambda_nw = 4 -include_gravity = False +include_gravity = True debugflag = False analyse_condition = True -output_string = "./output/number_of_timesteps{}_".format(number_of_timesteps) +output_string = "./output/post-fix-number_of_timesteps{}_".format(number_of_timesteps) # ----------------------------------------------------------------------------# # ------------------- Domain and Interface -----------------------------------# @@ -193,14 +194,14 @@ L = { # subdom_num : lambda parameter for the L-scheme lambda_param = { - 1: {'wetting': l_param_w, - 'nonwetting': l_param_nw},# - 2: {'wetting': l_param_w, - 'nonwetting': l_param_nw},# - 3: {'wetting': l_param_w, - 'nonwetting': l_param_nw},# - 4: {'wetting': l_param_w, - 'nonwetting': l_param_nw},# + 1: {'wetting': lambda_w, + 'nonwetting': lambda_nw},# + 2: {'wetting': lambda_w, + 'nonwetting': lambda_nw},# + 3: {'wetting': lambda_w, + 'nonwetting': lambda_nw},# + 4: {'wetting': lambda_w, + 'nonwetting': lambda_nw},# } @@ -359,13 +360,13 @@ x, y = sym.symbols('x[0], x[1]') # needed by UFL t = sym.symbols('t', positive=True) p_e_sym = { - 1: {'wetting': (1.0 - (1.0 + t*t)*(1.0 + x*x + y*y)), #*(sym.sin((1+y)/2*sym.pi)*sym.sin((1+x)/2*sym.pi))**2, + 1: {'wetting': (-1.0 - (1.0 + t*t)*(1.0 + x*x + y*y)), #*(sym.sin((1+y)/2*sym.pi)*sym.sin((1+x)/2*sym.pi))**2, 'nonwetting': 0.0*t}, - 2: {'wetting': (1.0 - (1.0 + t*t)*(1.0 + x*x)), #*(sym.sin((1+y)/2*sym.pi)*sym.sin((1+x)/2*sym.pi))**2, + 2: {'wetting': (-1.0 - (1.0 + t*t)*(1.0 + x*x)), #*(sym.sin((1+y)/2*sym.pi)*sym.sin((1+x)/2*sym.pi))**2, 'nonwetting': 0.0*t}, - 3: {'wetting': (1.0 - (1.0 + t*t)*(1.0 + x*x)), #*(sym.sin((1+y)/2*sym.pi)*sym.sin((1+x)/2*sym.pi))**2, + 3: {'wetting': (-1.0 - (1.0 + t*t)*(1.0 + x*x)), #*(sym.sin((1+y)/2*sym.pi)*sym.sin((1+x)/2*sym.pi))**2, 'nonwetting': 0.0*t}, - 4: {'wetting': (1.0 - (1.0 + t*t)*(1.0 + x*x + y*y)), #*(sym.sin((1+y)/2*sym.pi)*sym.sin((1+x)/2*sym.pi))**2, + 4: {'wetting': (-1.0 - (1.0 + t*t)*(1.0 + x*x + y*y)), #*(sym.sin((1+y)/2*sym.pi)*sym.sin((1+x)/2*sym.pi))**2, 'nonwetting': 0.0*t} } @@ -445,7 +446,13 @@ write_to_file = { } # initialise LDD simulation class -simulation = ldd.LDDsimulation(tol=1E-14, LDDsolver_tol=solver_tol, debug=debugflag) +simulation = ldd.LDDsimulation( + tol=1E-14, + LDDsolver_tol=solver_tol, + debug=debugflag, + max_iter_num=max_iter_num + ) + simulation.set_parameters(use_case=use_case, output_dir=output_string, subdomain_def_points=subdomain_def_points, diff --git a/TP-TP-2-patch-constant-solution/TP-TP-2-patch-constant-solution.py b/TP-TP-2-patch-constant-solution/TP-TP-2-patch-constant-solution.py index 0dfa344..e7e076a 100755 --- a/TP-TP-2-patch-constant-solution/TP-TP-2-patch-constant-solution.py +++ b/TP-TP-2-patch-constant-solution/TP-TP-2-patch-constant-solution.py @@ -13,24 +13,29 @@ import helpers as hlp # init sympy session sym.init_printing() -solver_tol = 5E-6 +use_case = "TP-TP-two-patch-constant-solution" +solver_tol = 1E-6 ############ GRID #######################ü -mesh_resolution = 20 +mesh_resolution = 30 timestep_size = 0.01 -number_of_timesteps = 100 +number_of_timesteps = 150 # decide how many timesteps you want analysed. Analysed means, that we write out # subsequent errors of the L-iteration within the timestep. -number_of_timesteps_to_analyse = 10 +number_of_timesteps_to_analyse = 5 starttime = 0 -Lw = 1/timestep_size +Lw = 0.5 #/timestep_size Lnw=Lw -l_param_w = 40 -l_param_nw = 40 +lambda_w = 40 +lambda_nw = 40 include_gravity = True +debugflag = False +analyse_condition = True + +output_string = "./output/post-fix-number_of_timesteps{}_".format(number_of_timesteps) ##### Domain and Interface #### @@ -137,10 +142,10 @@ L = {# lambda_param = {# # subdom_num : lambda parameter for the L-scheme - 1 : {'wetting' :l_param_w, - 'nonwetting': l_param_nw},# - 2 : {'wetting' :l_param_w, - 'nonwetting': l_param_nw} + 1 : {'wetting' :lambda_w, + 'nonwetting': lambda_nw},# + 2 : {'wetting' :lambda_w, + 'nonwetting': lambda_nw} } ## relative permeabilty functions on subdomain 1 @@ -199,7 +204,7 @@ def rel_perm1nw_prime(s): # # def rel_perm2nw_prime(s): # # relative permeabilty on subdomain1 -# return -2*(l_param_w1-s) +# return -2*(lambda_w1-s) _rel_perm1w_prime = ft.partial(rel_perm1w_prime) _rel_perm1nw_prime = ft.partial(rel_perm1nw_prime) @@ -449,8 +454,10 @@ write_to_file = { # initialise LDD simulation class -simulation = ldd.LDDsimulation(tol = 1E-14, LDDsolver_tol = solver_tol, debug = True) -simulation.set_parameters(output_dir = "./output/",# +simulation = ldd.LDDsimulation(tol = 1E-14, LDDsolver_tol=solver_tol, debug=debugflag) +simulation.set_parameters( + use_case=use_case, + output_dir = output_string,# subdomain_def_points = subdomain_def_points,# isRichards = isRichards,# interface_def_points = interface_def_points,# @@ -478,4 +485,4 @@ simulation.set_parameters(output_dir = "./output/",# simulation.initialise() # simulation.write_exact_solution_to_xdmf() -simulation.run() +simulation.run(analyse_condition=analyse_condition) diff --git a/TP-TP-2-patch-pure-dd/TP-TP-2-patch-pure-dd.py b/TP-TP-2-patch-pure-dd/TP-TP-2-patch-pure-dd.py index 1a9a2c1..b11500b 100755 --- a/TP-TP-2-patch-pure-dd/TP-TP-2-patch-pure-dd.py +++ b/TP-TP-2-patch-pure-dd/TP-TP-2-patch-pure-dd.py @@ -14,28 +14,29 @@ import helpers as hlp sym.init_printing() use_case = "TP-TP-2-patch-pure-dd" -solver_tol = 1E-6 +solver_tol = 6E-6 +max_iter_num = 1000 -############ GRID #######################ü -mesh_resolution = 30 -timestep_size = 0.0001 -number_of_timesteps = 11000 +############ GRID ####################### +mesh_resolution = 20 +timestep_size = 0.0005 +number_of_timesteps = 600 # decide how many timesteps you want analysed. Analysed means, that we write out # subsequent errors of the L-iteration within the timestep. -number_of_timesteps_to_analyse = 10 +number_of_timesteps_to_analyse = 6 starttime = 0 -Lw = 10 #/timestep_size +Lw = 1 #/timestep_size Lnw=Lw -l_param_w = 20 -l_param_nw = 20 +lambda_w = 4 +lambda_nw = 4 -include_gravity = True +include_gravity = False debugflag = False -analyse_condition = False +analyse_condition = True -output_string = "./output/nondirichlet_number_of_timesteps{}_".format(number_of_timesteps) +output_string = "./output/2019-08-23-nondirichlet_number_of_timesteps{}_".format(number_of_timesteps) ##### Domain and Interface #### # global simulation domain domain @@ -149,10 +150,10 @@ L = {# lambda_param = {# # subdom_num : lambda parameter for the L-scheme - 1 : {'wetting' :l_param_w, - 'nonwetting': l_param_nw},# - 2 : {'wetting' :l_param_w, - 'nonwetting': l_param_nw} + 1 : {'wetting' :lambda_w, + 'nonwetting': lambda_nw},# + 2 : {'wetting' :lambda_w, + 'nonwetting': lambda_nw} } ## relative permeabilty functions on subdomain 1 @@ -428,10 +429,14 @@ symbols = { "x": x, # } p_e_sym = { - 1: {'wetting': (-5 - (1+t*t)*(1 + x*x + y*y)), #*cutoff, - 'nonwetting': (-1 -t*(1.1+y + x**2))}, #*cutoff}, - 2: {'wetting': (-5 - (1+t*t)*(1 + x*x + y*y)), #*cutoff, - 'nonwetting': (-1 -t*(1.1+y + x**2))}, #*cutoff}, + 1: {'wetting': (-6 - (1+t*t)*(1 + x*x + y*y)), #*cutoff, + 'nonwetting': (-1 -t*(1.1+ y + x**2))}, #*(sym.sin((1+y)/2*sym.pi)*sym.sin((1+x)/2*sym.pi))**2}, + 2: {'wetting': (-6.0 - (1.0 + t*t)*(1.0 + x*x)), #*(sym.sin((1+y)/2*sym.pi)*sym.sin((1+x)/2*sym.pi))**2, + 'nonwetting': (-1 -t*(1.1 + x**2) - sym.sqrt(2+t**2)*(1.1+y)**2*y**2)}, #*(sym.sin((1+y)/2*sym.pi)*sym.sin((1+x)/2*sym.pi))**2}, + # 1: {'wetting': (-5 - (1+t*t)*(1 + x*x + y*y)), #*cutoff, + # 'nonwetting': (-1 -t*(1.1+y + x**2))}, #*cutoff}, + # 2: {'wetting': (-5 - (1+t*t)*(1 + x*x + y*y)), #*cutoff, + # 'nonwetting': (-1 -t*(1.1+y + x**2))}, #*cutoff}, } @@ -506,7 +511,13 @@ write_to_file = { # initialise LDD simulation class -simulation = ldd.LDDsimulation(tol=1E-14, LDDsolver_tol=solver_tol, debug=debugflag) +simulation = ldd.LDDsimulation( + tol=1E-14, + LDDsolver_tol=solver_tol, + debug=debugflag, + max_iter_num=max_iter_num + ) + simulation.set_parameters(use_case=use_case, output_dir=output_string, subdomain_def_points=subdomain_def_points, diff --git a/TP-TP-2-patch-test-case/TP-TP-2-patch-test.py b/TP-TP-2-patch-test-case/TP-TP-2-patch-test.py index 239aab1..3855efc 100755 --- a/TP-TP-2-patch-test-case/TP-TP-2-patch-test.py +++ b/TP-TP-2-patch-test-case/TP-TP-2-patch-test.py @@ -13,26 +13,29 @@ import helpers as hlp # init sympy session sym.init_printing() -solver_tol = 1E-7 +use_case = "TP-TP-two-patch" +solver_tol = 6E-6 ############ GRID #######################ü -mesh_resolution = 40 -timestep_size = 0.0001 -number_of_timesteps = 1000 +mesh_resolution = 20 +timestep_size = 0.0005 +number_of_timesteps = 600 # decide how many timesteps you want analysed. Analysed means, that we write out # subsequent errors of the L-iteration within the timestep. -number_of_timesteps_to_analyse = 11 +number_of_timesteps_to_analyse = 0 starttime = 0 -Lw = 0.25*1/timestep_size +Lw = 1 #/timestep_size Lnw=Lw -l_param_w = 40 -l_param_nw = l_param_w +lambda_w = 4 +lambda_nw = 4 include_gravity = True +debugflag = False +analyse_condition = True -output_string = "./output/number_of_timesteps_{}_".format(number_of_timesteps) +output_string = "./output/2019-08-23-number_of_timesteps{}_".format(number_of_timesteps) ##### Domain and Interface #### # global simulation domain domain @@ -145,10 +148,10 @@ L = {# lambda_param = {# # subdom_num : lambda parameter for the L-scheme - 1 : {'wetting' :l_param_w, - 'nonwetting': l_param_nw},# - 2 : {'wetting' :l_param_w, - 'nonwetting': l_param_nw} + 1 : {'wetting' :lambda_w, + 'nonwetting': lambda_nw},# + 2 : {'wetting' :lambda_w, + 'nonwetting': lambda_nw} } ## relative permeabilty functions on subdomain 1 @@ -198,7 +201,7 @@ def rel_perm1w_prime(s): def rel_perm1nw_prime(s): # relative permeabilty on subdomain1 - return 2*(1-s) + return -2*(1-s) # # definition of the derivatives of the relative permeabilities # # relative permeabilty functions on subdomain 1 @@ -331,10 +334,10 @@ x, y = sym.symbols('x[0], x[1]') # needed by UFL t = sym.symbols('t', positive=True) p_e_sym = { - 1: {'wetting': (-3 - (1+t*t)*(1 + x*x + y*y)), #*(sym.sin((1+y)/2*sym.pi)*sym.sin((1+x)/2*sym.pi))**2, - 'nonwetting': (-1 -t*(1-y - x**2)**2)}, #*(sym.sin((1+y)/2*sym.pi)*sym.sin((1+x)/2*sym.pi))**2}, - 2: {'wetting': (-3.0 - (1.0 + t*t)*(1.0 + x*x)), #*(sym.sin((1+y)/2*sym.pi)*sym.sin((1+x)/2*sym.pi))**2, - 'nonwetting': (-1 -t*(1- x**2)**2 - sym.sqrt(2+t**2)*(1+y)*y**2)}, #*(sym.sin((1+y)/2*sym.pi)*sym.sin((1+x)/2*sym.pi))**2}, + 1: {'wetting': (-6 - (1+t*t)*(1 + x*x + y*y)), #*cutoff, + 'nonwetting': (-1 -t*(1.1+ y + x**2))}, #*(sym.sin((1+y)/2*sym.pi)*sym.sin((1+x)/2*sym.pi))**2}, + 2: {'wetting': (-6.0 - (1.0 + t*t)*(1.0 + x*x)), #*(sym.sin((1+y)/2*sym.pi)*sym.sin((1+x)/2*sym.pi))**2, + 'nonwetting': (-1 -t*(1.1 + x**2) - sym.sqrt(2+t**2)*(1.1+y)**2*y**2)}, #*(sym.sin((1+y)/2*sym.pi)*sym.sin((1+x)/2*sym.pi))**2}, } @@ -413,8 +416,9 @@ write_to_file = { # initialise LDD simulation class -simulation = ldd.LDDsimulation(tol = 1E-14, LDDsolver_tol = solver_tol, debug = True) -simulation.set_parameters(output_dir = output_string,# +simulation = ldd.LDDsimulation(tol = 1E-14, LDDsolver_tol = solver_tol, debug = debugflag) +simulation.set_parameters(use_case=use_case, + output_dir = output_string,# subdomain_def_points = subdomain_def_points,# isRichards = isRichards,# interface_def_points = interface_def_points,# @@ -442,4 +446,4 @@ simulation.set_parameters(output_dir = output_string,# simulation.initialise() # simulation.write_exact_solution_to_xdmf() -simulation.run() +simulation.run(analyse_condition=analyse_condition) diff --git a/TP-TP-layered-soil-case-const-solution/TP-TP-layered_soil-const-solution.py b/TP-TP-layered-soil-case-const-solution/TP-TP-layered_soil-const-solution.py index b17d1b8..e3c655b 100755 --- a/TP-TP-layered-soil-case-const-solution/TP-TP-layered_soil-const-solution.py +++ b/TP-TP-layered-soil-case-const-solution/TP-TP-layered_soil-const-solution.py @@ -16,26 +16,34 @@ import typing as tp import functools as ft import domainPatch as dp import LDDsimulation as ldd +import helpers as hlp # init sympy session sym.init_printing() -# ----------------------------------------------------------------------------# -# ------------------- MESH ---------------------------------------------------# -# ----------------------------------------------------------------------------# -mesh_resolution = 40 -# ----------------------------------------:-------------------------------------# -# ------------------- TIME ---------------------------------------------------# -# ----------------------------------------------------------------------------# +use_case = "TP-layered-soil-constant-solution" +solver_tol = 1E-6 + +############ GRID #######################ü +mesh_resolution = 30 timestep_size = 0.01 number_of_timesteps = 100 -# decide how many timesteps you want analysed. Analysed means, that we write -# out subsequent errors of the L-iteration within the timestep. -number_of_timesteps_to_analyse = 10 +# decide how many timesteps you want analysed. Analysed means, that we write out +# subsequent errors of the L-iteration within the timestep. +number_of_timesteps_to_analyse = 5 starttime = 0 -l_param_w = 40 -l_param_nw = 40 +Lw = 0.25 #/timestep_size +Lnw=Lw + +lambda_w = 4 +lambda_nw = 4 + +include_gravity = True +debugflag = False +analyse_condition = False + +output_string = "./output/post-fix-number_of_timesteps{}_".format(number_of_timesteps) # global domain subdomain0_vertices = [df.Point(-1.0,-1.0), # @@ -221,26 +229,26 @@ porosity = { # subdom_num : subdomain L for L-scheme L = { - 1: {'wetting' : 0.25, - 'nonwetting': 0.25}, - 2: {'wetting' : 0.25, - 'nonwetting': 0.25}, - 3: {'wetting' : 0.25, - 'nonwetting': 0.25}, - 4: {'wetting' : 0.25, - 'nonwetting': 0.25} + 1: {'wetting' : Lw, + 'nonwetting': Lnw}, + 2: {'wetting' : Lw, + 'nonwetting': Lnw}, + 3: {'wetting' : Lw, + 'nonwetting': Lnw}, + 4: {'wetting' : Lw, + 'nonwetting': Lnw} } # subdom_num : lambda parameter for the L-scheme lambda_param = { - 1: {'wetting': l_param_w, - 'nonwetting': l_param_nw},# - 2: {'wetting': l_param_w, - 'nonwetting': l_param_nw},# - 3: {'wetting': l_param_w, - 'nonwetting': l_param_nw},# - 4: {'wetting': l_param_w, - 'nonwetting': l_param_nw},# + 1: {'wetting': lambda_w, + 'nonwetting': lambda_nw},# + 2: {'wetting': lambda_w, + 'nonwetting': lambda_nw},# + 3: {'wetting': lambda_w, + 'nonwetting': lambda_nw},# + 4: {'wetting': lambda_w, + 'nonwetting': lambda_nw},# } @@ -303,7 +311,7 @@ def rel_perm1w_prime(s): def rel_perm1nw_prime(s): # relative permeabilty on subdomain1 - return 2*(1-s) + return -2*(1-s) # definition of the derivatives of the relative permeabilities # relative permeabilty functions on subdomain 1 @@ -313,7 +321,7 @@ def rel_perm2w_prime(s): def rel_perm2nw_prime(s): # relative permeabilty on subdomain1 - return 2*(1-s) + return -2*(1-s) _rel_perm1w_prime = ft.partial(rel_perm1w_prime) _rel_perm1nw_prime = ft.partial(rel_perm1nw_prime) @@ -409,83 +417,38 @@ p_e_sym = { 'nonwetting': -1+ 0*t}, } -pc_e_sym = { - 1: p_e_sym[1]['nonwetting'] - p_e_sym[1]['wetting'], - 2: p_e_sym[2]['nonwetting'] - p_e_sym[2]['wetting'], - 3: p_e_sym[3]['nonwetting'] - p_e_sym[3]['wetting'], - 4: p_e_sym[4]['nonwetting'] - p_e_sym[4]['wetting'] -} - -# turn above symbolic code into exact solution for dolphin and -# construct the rhs that matches the above exact solution. -dtS = dict() -div_flux = dict() -source_expression = dict() -exact_solution = dict() -initial_condition = dict() +pc_e_sym = dict() for subdomain, isR in isRichards.items(): - dtS.update({subdomain: dict()}) - div_flux.update({subdomain: dict()}) - source_expression.update({subdomain: dict()}) - exact_solution.update({subdomain: dict()}) - initial_condition.update({subdomain: dict()}) if isR: - subdomain_has_phases = ["wetting"] + pc_e_sym.update({subdomain: -p_e_sym[subdomain]['wetting']}) else: - subdomain_has_phases = ["wetting", "nonwetting"] - - # conditional for S_pc_prime - pc = pc_e_sym[subdomain] - dtpc = sym.diff(pc, t, 1) - dxpc = sym.diff(pc, x, 1) - dypc = sym.diff(pc, y, 1) - S = sym.Piecewise((S_pc_sym[subdomain](pc), pc > 0), (1, True)) - dS = sym.Piecewise((S_pc_sym_prime[subdomain](pc), pc > 0), (0, True)) - for phase in subdomain_has_phases: - # Turn above symbolic expression for exact solution into c code - exact_solution[subdomain].update( - {phase: sym.printing.ccode(p_e_sym[subdomain][phase])} - ) - # save the c code for initial conditions - initial_condition[subdomain].update( - {phase: sym.printing.ccode(p_e_sym[subdomain][phase].subs(t, 0))} - ) - if phase == "nonwetting": - dtS[subdomain].update( - {phase: -porosity[subdomain]*dS*dtpc} - ) - else: - dtS[subdomain].update( - {phase: porosity[subdomain]*dS*dtpc} - ) - pa = p_e_sym[subdomain][phase] - dxpa = sym.diff(pa, x, 1) - dxdxpa = sym.diff(pa, x, 2) - dypa = sym.diff(pa, y, 1) - dydypa = sym.diff(pa, y, 2) - mu = viscosity[subdomain][phase] - ka = relative_permeability[subdomain][phase] - dka = ka_prime[subdomain][phase] - rho = densities[subdomain][phase] - g = gravity_acceleration - - if phase == "nonwetting": - # x part of div(flux) for nonwetting - dxdxflux = -1/mu*dka(1-S)*dS*dxpc*dxpa + 1/mu*dxdxpa*ka(1-S) - # y part of div(flux) for nonwetting - dydyflux = -1/mu*dka(1-S)*dS*dypc*(dypa - rho*g) \ - + 1/mu*dydypa*ka(1-S) - else: - # x part of div(flux) for wetting - dxdxflux = 1/mu*dka(S)*dS*dxpc*dxpa + 1/mu*dxdxpa*ka(S) - # y part of div(flux) for wetting - dydyflux = 1/mu*dka(S)*dS*dypc*(dypa - rho*g) + 1/mu*dydypa*ka(S) - div_flux[subdomain].update({phase: dxdxflux + dydyflux}) - contructed_rhs = dtS[subdomain][phase] - div_flux[subdomain][phase] - source_expression[subdomain].update( - {phase: sym.printing.ccode(contructed_rhs)} - ) - # print(f"source_expression[{subdomain}][{phase}] =", source_expression[subdomain][phase]) + pc_e_sym.update({subdomain: p_e_sym[subdomain]['nonwetting'] + - p_e_sym[subdomain]['wetting']}) + + +symbols = {"x": x, + "y": y, + "t": t} +# turn above symbolic code into exact solution for dolphin and +# construct the rhs that matches the above exact solution. +exact_solution_example = hlp.generate_exact_solution_expressions( + symbols=symbols, + isRichards=isRichards, + symbolic_pressure=p_e_sym, + symbolic_capillary_pressure=pc_e_sym, + saturation_pressure_relationship=S_pc_sym, + saturation_pressure_relationship_prime=S_pc_sym_prime, + viscosity=viscosity, + porosity=porosity, + relative_permeability=relative_permeability, + relative_permeability_prime=ka_prime, + densities=densities, + gravity_acceleration=gravity_acceleration, + include_gravity=include_gravity, + ) +source_expression = exact_solution_example['source'] +exact_solution = exact_solution_example['exact_solution'] +initial_condition = exact_solution_example['initial_condition'] # Dictionary of dirichlet boundary conditions. dirichletBC = dict() @@ -521,8 +484,9 @@ write_to_file = { } # initialise LDD simulation class -simulation = ldd.LDDsimulation(tol=1E-14, debug=False, LDDsolver_tol=1E-5) -simulation.set_parameters(output_dir="./output/", +simulation = ldd.LDDsimulation(tol=1E-14, debug=debugflag, LDDsolver_tol=solver_tol) +simulation.set_parameters(use_case=use_case, + output_dir=output_string, subdomain_def_points=subdomain_def_points, isRichards=isRichards, interface_def_points=interface_def_points, @@ -544,12 +508,12 @@ simulation.set_parameters(output_dir="./output/", dirichletBC_expression_strings=dirichletBC, exact_solution=exact_solution, densities=densities, - include_gravity=True, + include_gravity=include_gravity, write2file=write_to_file, ) simulation.initialise() # print(simulation.__dict__) -simulation.run() +simulation.run(analyse_condition=analyse_condition) # simulation.LDDsolver(time=0, debug=True, analyse_timestep=True) # df.info(parameters, True) diff --git a/TP-TP-layered-soil-case-with-inner-patch-constant-solution/TP-TP-layered_soil_with_inner_patch_const_solution.py b/TP-TP-layered-soil-case-with-inner-patch-constant-solution/TP-TP-layered_soil_with_inner_patch_const_solution.py index a12790d..c287112 100755 --- a/TP-TP-layered-soil-case-with-inner-patch-constant-solution/TP-TP-layered_soil_with_inner_patch_const_solution.py +++ b/TP-TP-layered-soil-case-with-inner-patch-constant-solution/TP-TP-layered_soil_with_inner_patch_const_solution.py @@ -16,29 +16,36 @@ import typing as tp import functools as ft import domainPatch as dp import LDDsimulation as ldd +import helpers as hlp # init sympy session sym.init_printing() -# ----------------------------------------------------------------------------# -# ------------------- MESH ---------------------------------------------------# -# ----------------------------------------------------------------------------# -mesh_resolution = 40 -# ----------------------------------------:-----------------------------------# -# ------------------- TIME ---------------------------------------------------# -# ----------------------------------------------------------------------------# -timestep_size = 0.0001 -number_of_timesteps = 100 -# decide how many timesteps you want analysed. Analysed means, that we write -# out subsequent errors of the L-iteration within the timestep. -number_of_timesteps_to_analyse = 10 +use_case = "TP-layered-soil-inner-patch-constant-solution" +solver_tol = 1E-6 +max_iter_num = 100 + +############ GRID #######################ü +mesh_resolution = 30 +timestep_size = 0.001 +number_of_timesteps = 101 +# decide how many timesteps you want analysed. Analysed means, that we write out +# subsequent errors of the L-iteration within the timestep. +number_of_timesteps_to_analyse = 5 starttime = 0 -Lw = 100/timestep_size -Lnw = Lw +Lw = 1 #/timestep_size +Lnw=Lw + +lambda_w = 40 +lambda_nw = 40 + +include_gravity = True +debugflag = True +analyse_condition = False + +output_string = "./output/post-fix-number_of_timesteps{}_".format(number_of_timesteps) -l_param_w = 40 -l_param_nw = 40 # global domain subdomain0_vertices = [df.Point(-1.0,-1.0), # @@ -91,11 +98,19 @@ interface34_vertices = [interface36_vertices[1], interface23_vertices[2]] # interface36 -interface45_vertices = [interface56_vertices[0], - df.Point(0.7, -0.2), +# interface45_vertices = [interface56_vertices[0], +# df.Point(0.9, -0.2),#df.Point(0.7, -0.2), +# interface25_vertices[0] +# ] + +interface45_vertices_a = [interface56_vertices[0], + df.Point(0.7, -0.2),#df.Point(0.7, -0.2), + ] +interface45_vertices_b = [df.Point(0.7, -0.2),#df.Point(0.7, -0.2), interface25_vertices[0] ] + # # subdomain1. # subdomain1_vertices = [interface12_vertices[0], # interface12_vertices[1], @@ -146,7 +161,9 @@ interface_def_points = [interface12_vertices, interface25_vertices, interface34_vertices, interface36_vertices, - interface45_vertices, + # interface45_vertices, + interface45_vertices_a, + interface45_vertices_b, interface46_vertices, interface56_vertices, ] @@ -157,6 +174,7 @@ adjacent_subdomains = [[1,2], [3,4], [3,6], [4,5], + [4,5], [4,6], [5,6] ] @@ -219,7 +237,8 @@ subdomain3_outer_boundary_verts = { # subdomain3 subdomain4_vertices = [interface46_vertices[0], interface46_vertices[1], - interface45_vertices[1], + # interface45_vertices[1], + interface45_vertices_a[1], interface24_vertices[1], interface24_vertices[0], interface34_vertices[1] @@ -232,8 +251,8 @@ subdomain5_vertices = [interface56_vertices[0], interface56_vertices[2], interface25_vertices[1], interface25_vertices[0], - interface45_vertices[1], - interface45_vertices[0] + interface45_vertices_b[1], + interface45_vertices_b[0] ] subdomain5_outer_boundary_verts = { @@ -312,8 +331,8 @@ densities = dict() porosity = dict() Ldict = {'wetting': Lw, 'nonwetting': Lnw} -Lambda = {'wetting': l_param_w, - 'nonwetting': l_param_nw} +Lambda = {'wetting': lambda_w, + 'nonwetting': lambda_nw} L = dict() lambda_param = dict() for subdomain, isR in isRichards.items(): @@ -395,18 +414,18 @@ gravity_acceleration = 9.81 # # # subdom_num : lambda parameter for the L-scheme # lambda_param = { -# 1: {'wetting': l_param_w, -# 'nonwetting': l_param_nw},# -# 2: {'wetting': l_param_w, -# 'nonwetting': l_param_nw},# -# 3: {'wetting': l_param_w, -# 'nonwetting': l_param_nw},# -# 4: {'wetting': l_param_w, -# 'nonwetting': l_param_nw},# -# 5: {'wetting': l_param_w, -# 'nonwetting': l_param_nw},# -# 6: {'wetting': l_param_w, -# 'nonwetting': l_param_nw},# +# 1: {'wetting': lambda_w, +# 'nonwetting': lambda_nw},# +# 2: {'wetting': lambda_w, +# 'nonwetting': lambda_nw},# +# 3: {'wetting': lambda_w, +# 'nonwetting': lambda_nw},# +# 4: {'wetting': lambda_w, +# 'nonwetting': lambda_nw},# +# 5: {'wetting': lambda_w, +# 'nonwetting': lambda_nw},# +# 6: {'wetting': lambda_w, +# 'nonwetting': lambda_nw},# # } @@ -471,7 +490,7 @@ def rel_perm1w_prime(s): def rel_perm1nw_prime(s): # relative permeabilty on subdomain1 - return 2*(1-s) + return -2*(1-s) # # definition of the derivatives of the relative permeabilities # # relative permeabilty functions on subdomain 1 @@ -481,7 +500,7 @@ def rel_perm1nw_prime(s): # # def rel_perm2nw_prime(s): # # relative permeabilty on subdomain1 -# return 2*(l_param_w1-s) +# return 2*(lambda_w1-s) _rel_perm1w_prime = ft.partial(rel_perm1w_prime) _rel_perm1nw_prime = ft.partial(rel_perm1nw_prime) @@ -617,6 +636,7 @@ t = sym.symbols('t', positive=True) p_dict = {'wetting': -3 + 0*t, 'nonwetting': -1 + 0*t} + p_e_sym = dict() pc_e_sym = dict() for subdomain, isR in isRichards.items(): @@ -628,121 +648,43 @@ for subdomain, isR in isRichards.items(): p_e_sym[subdomain].update({phase: p_dict[phase]}) if isR: - pc_e_sym.update({subdomain: -p_e_sym[subdomain]['wetting'].copy()}) + pc_e_sym.update({subdomain: -p_e_sym[subdomain]['wetting']}) else: pc_e_sym.update({subdomain: p_e_sym[subdomain]['nonwetting'] - p_e_sym[subdomain]['wetting']}) -# p_e_sym = { -# 1: {'wetting': 1.0 - (1.0 + t*t)*(1.0 + (x-6.5)*(x-6.5) + (y-5.0)*(y-5.0)), -# 'nonwetting': - 2 - t*(1 + (y-5.0) + x**2)**2 -sym.sqrt(2+t**2)*(1 + (y-5.0)) }, -# 2: {'wetting': 1.0 - (1.0 + t*t)*(1.0 + (x-6.5)*(x-6.5) + (y-5.0)*(y-5.0)), -# 'nonwetting': - 2 - t*(1 + (y-5.0) + x**2)**2 -sym.sqrt(2+t**2)*(1 + (y-5.0)) }, -# 3: {'wetting': 1.0 - (1.0 + t*t)*(1.0 + (x-6.5)*(x-6.5)), -# 'nonwetting': - 2 - t*(1 + (y-5.0) + x**2)**2 -sym.sqrt(2+t**2)*(1 + (y-5.0)) }, -# 4: {'wetting': 1.0 - (1.0 + t*t)*(1.0 + (x-6.5)*(x-6.5)), -# 'nonwetting': - 2 - t*(1 + (y-5.0) + x**2)**2 -sym.sqrt(2+t**2)*(1 + (y-5.0)) }, -# 5: {'wetting': 1.0 - (1.0 + t*t)*(1.0 + (x-6.5)*(x-6.5)), -# 'nonwetting': - 2 - t*(1 + (y-5.0) + x**2)**2 -sym.sqrt(2+t**2)*(1 + (y-5.0)) }, -# 6: {'wetting': 1.0 - (1.0 + t*t)*(1.0 + (x-6.5)*(x-6.5)), -# 'nonwetting': - 2 - t*(1 + (y-5.0) + x**2)**2 -sym.sqrt(2+t**2)*(1 + (y-5.0)) }, -# # 2: {'wetting': 1.0 - (1.0 + t*t)*(10.0 + x*x + (y-5.0)*(y-5.0)), -# # 'nonwetting': - 2 - t*(1 + (y-5.0) + x**2)**2 -sym.sqrt(2+t**2)*(1 + (y-5.0))}, -# # 3: {'wetting': 1.0 - (1.0 + t*t)*(10.0 + x*x + (y-5.0)*(y-5.0)*3*sym.sin(-2*t+2*x)*sym.sin(1/2*y-1.2*t)), -# # 'nonwetting': - 2 - t*(1 + x**2)**2 -sym.sqrt(2+t**2)}, -# # 4: {'wetting': 1.0 - (1.0 + t*t)*(10.0 + x*x + (y-5.0)*(y-5.0)*3*sym.sin(-2*t+2*x)*sym.sin(1/2*y-1.2*t)), -# # 'nonwetting': - 2 - t*(1 + x**2)**2 -sym.sqrt(2+t**2)} -# } - -# pc_e_sym = { -# 1: p_e_sym[1]['nonwetting'] - p_e_sym[1]['wetting'], -# 2: p_e_sym[2]['nonwetting'] - p_e_sym[2]['wetting'], -# 3: p_e_sym[3]['nonwetting'] - p_e_sym[3]['wetting'], -# 4: p_e_sym[4]['nonwetting'] - p_e_sym[4]['wetting'], -# 5: p_e_sym[5]['nonwetting'] - p_e_sym[5]['wetting'], -# 6: p_e_sym[5]['nonwetting'] - p_e_sym[6]['wetting'] -# } - -# pc_e_sym = { -# 1: -p_e_sym[1]['wetting'], -# 2: -p_e_sym[2]['wetting'], -# 3: -p_e_sym[3]['wetting'], -# 4: -p_e_sym[4]['wetting'], -# 5: -p_e_sym[5]['wetting'], -# 6: -p_e_sym[6]['wetting'] -# } +# pc_e_sym = dict() +# for subdomain, isR in isRichards.items(): +# if isR: +# pc_e_sym.update({subdomain: -p_e_sym[subdomain]['wetting']}) +# else: +# pc_e_sym.update({subdomain: p_e_sym[subdomain]['nonwetting'] +# - p_e_sym[subdomain]['wetting']}) +symbols = {"x": x, + "y": y, + "t": t} # turn above symbolic code into exact solution for dolphin and # construct the rhs that matches the above exact solution. -dtS = dict() -div_flux = dict() -source_expression = dict() -exact_solution = dict() -initial_condition = dict() -for subdomain, isR in isRichards.items(): - dtS.update({subdomain: dict()}) - div_flux.update({subdomain: dict()}) - source_expression.update({subdomain: dict()}) - exact_solution.update({subdomain: dict()}) - initial_condition.update({subdomain: dict()}) - if isR: - subdomain_has_phases = ["wetting"] - else: - subdomain_has_phases = ["wetting", "nonwetting"] - - # conditional for S_pc_prime - pc = pc_e_sym[subdomain] - dtpc = sym.diff(pc, t, 1) - dxpc = sym.diff(pc, x, 1) - dypc = sym.diff(pc, y, 1) - S = sym.Piecewise((S_pc_sym[subdomain](pc), pc > 0), (1, True)) - dS = sym.Piecewise((S_pc_sym_prime[subdomain](pc), pc > 0), (0, True)) - for phase in subdomain_has_phases: - # Turn above symbolic expression for exact solution into c code - exact_solution[subdomain].update( - {phase: sym.printing.ccode(p_e_sym[subdomain][phase])} - ) - # save the c code for initial conditions - initial_condition[subdomain].update( - {phase: sym.printing.ccode(p_e_sym[subdomain][phase].subs(t, 0))} - ) - if phase == "nonwetting": - dtS[subdomain].update( - {phase: -porosity[subdomain]*dS*dtpc} - ) - else: - dtS[subdomain].update( - {phase: porosity[subdomain]*dS*dtpc} - ) - pa = p_e_sym[subdomain][phase] - dxpa = sym.diff(pa, x, 1) - dxdxpa = sym.diff(pa, x, 2) - dypa = sym.diff(pa, y, 1) - dydypa = sym.diff(pa, y, 2) - mu = viscosity[subdomain][phase] - ka = relative_permeability[subdomain][phase] - dka = ka_prime[subdomain][phase] - rho = densities[subdomain][phase] - g = gravity_acceleration - - if phase == "nonwetting": - # x part of div(flux) for nonwetting - dxdxflux = -1/mu*dka(1-S)*dS*dxpc*dxpa + 1/mu*dxdxpa*ka(1-S) - # y part of div(flux) for nonwetting - dydyflux = -1/mu*dka(1-S)*dS*dypc*(dypa - rho*g) \ - + 1/mu*dydypa*ka(1-S) - else: - # x part of div(flux) for wetting - dxdxflux = 1/mu*dka(S)*dS*dxpc*dxpa + 1/mu*dxdxpa*ka(S) - # y part of div(flux) for wetting - dydyflux = 1/mu*dka(S)*dS*dypc*(dypa - rho*g) + 1/mu*dydypa*ka(S) - div_flux[subdomain].update({phase: dxdxflux + dydyflux}) - contructed_rhs = dtS[subdomain][phase] - div_flux[subdomain][phase] - source_expression[subdomain].update( - {phase: sym.printing.ccode(contructed_rhs)} - ) - # print(f"source_expression[{subdomain}][{phase}] =", source_expression[subdomain][phase]) +exact_solution_example = hlp.generate_exact_solution_expressions( + symbols=symbols, + isRichards=isRichards, + symbolic_pressure=p_e_sym, + symbolic_capillary_pressure=pc_e_sym, + saturation_pressure_relationship=S_pc_sym, + saturation_pressure_relationship_prime=S_pc_sym_prime, + viscosity=viscosity, + porosity=porosity, + relative_permeability=relative_permeability, + relative_permeability_prime=ka_prime, + densities=densities, + gravity_acceleration=gravity_acceleration, + include_gravity=include_gravity, + ) +source_expression = exact_solution_example['source'] +exact_solution = exact_solution_example['exact_solution'] +initial_condition = exact_solution_example['initial_condition'] # Dictionary of dirichlet boundary conditions. dirichletBC = dict() @@ -778,8 +720,14 @@ write_to_file = { } # initialise LDD simulation class -simulation = ldd.LDDsimulation(tol=1E-14, debug=True, LDDsolver_tol=1E-7) -simulation.set_parameters(output_dir="./output/", +simulation = ldd.LDDsimulation( + tol=1E-14, + debug=debugflag, + LDDsolver_tol=solver_tol, + max_iter_num=max_iter_num + ) +simulation.set_parameters(use_case=use_case, + output_dir=output_string, subdomain_def_points=subdomain_def_points, isRichards=isRichards, interface_def_points=interface_def_points, @@ -801,12 +749,12 @@ simulation.set_parameters(output_dir="./output/", dirichletBC_expression_strings=dirichletBC, exact_solution=exact_solution, densities=densities, - include_gravity=True, + include_gravity=include_gravity, write2file=write_to_file, ) simulation.initialise() # print(simulation.__dict__) -simulation.run() +simulation.run(analyse_condition=analyse_condition) # simulation.LDDsolver(time=0, debug=True, analyse_timestep=True) # df.info(parameters, True) diff --git a/TP-TP-layered-soil-case-with-inner-patch/TP-TP-layered_soil_with_inner_patch-realistic.py b/TP-TP-layered-soil-case-with-inner-patch/TP-TP-layered_soil_with_inner_patch-realistic.py index 850d249..13ac7ea 100755 --- a/TP-TP-layered-soil-case-with-inner-patch/TP-TP-layered_soil_with_inner_patch-realistic.py +++ b/TP-TP-layered-soil-case-with-inner-patch/TP-TP-layered_soil_with_inner_patch-realistic.py @@ -21,29 +21,29 @@ import helpers as hlp # init sympy session sym.init_printing() -use_case = "TP-TP-layered-soil-with-inner-patch" -solver_tol = 1E-6 +use_case = "TP-TP-layered-soil-with-inner-patch-realistic-with-gravity" +solver_tol = 5E-6 ############ GRID #######################ü -mesh_resolution = 50 -timestep_size = 0.001 -number_of_timesteps = 15 +mesh_resolution = 60 +timestep_size = 0.0001 +number_of_timesteps = 30 # decide how many timesteps you want analysed. Analysed means, that we write out # subsequent errors of the L-iteration within the timestep. number_of_timesteps_to_analyse = 10 starttime = 0 -Lw = 0.25 #/timestep_size +Lw = 10 #/timestep_size Lnw=Lw -lambda_w = 41 -lambda_nw = 41 +lambda_w = 4 +lambda_nw = 4 -include_gravity = False -debugflag = False +include_gravity = True +debugflag = True analyse_condition = False -output_string = "./output/test-nondirichlet_number_of_timesteps{}_".format(number_of_timesteps) +output_string = "./output/realistic-with_gravity-post-bugfix-nondirichlet_number_of_timesteps{}_".format(number_of_timesteps) # global domain subdomain0_vertices = [df.Point(-1.0,-1.0), # diff --git a/TP-one-patch/TP-one-patch-purely-postive-pc.py b/TP-one-patch/TP-one-patch-purely-postive-pc.py index 783c93b..ca3085c 100755 --- a/TP-one-patch/TP-one-patch-purely-postive-pc.py +++ b/TP-one-patch/TP-one-patch-purely-postive-pc.py @@ -14,16 +14,16 @@ import helpers as hlp sym.init_printing() -solver_tol = 1E-6 +solver_tol = 4E-6 ############ GRID ####################### use_case="TP-one-patch-purely-positive-pc" -mesh_resolution = 30 +mesh_resolution = 60 timestep_size = 0.0005 number_of_timesteps = 2500 # decide how many timesteps you want analysed. Analysed means, that we write out # subsequent errors of the L-iteration within the timestep. -number_of_timesteps_to_analyse = 1 +number_of_timesteps_to_analyse = 5 starttime = 0 Lw = 0.25 #/timestep_size @@ -32,12 +32,12 @@ Lnw=Lw l_param_w = 40 l_param_nw = 40 -include_gravity = False -debugflag = True +include_gravity = True +debugflag = False analyse_condition = True use_case="TP-one-patch-purely-positive-pc" -output_string = "./output/pure_positive_pc_number_of_timesteps{}_".format(number_of_timesteps) +output_string = "./output/2019-08-26-pure_positive_pc_number_of_timesteps{}_".format(number_of_timesteps) ##### Domain and Interface #### # global simulation domain domain @@ -289,7 +289,7 @@ cutoff = gaussian/(gaussian + zero_on_shrinking) # sym.Piecewise((0, is_inside)) p_e_sym = { - 0: {'wetting': (-3 - (1+t*t)*(1 + x*x + y*y)), #*cutoff, + 0: {'wetting': (-6 - (1+t*t)*(1 + x*x + y*y)), #*cutoff, 'nonwetting': (-1 -t*(1.1+y + x**2))}, #*cutoff}, } -- GitLab