diff --git a/LDDsimulation/LDDsimulation.py b/LDDsimulation/LDDsimulation.py
index e250eeb30fc9fc34f09d81eb1a77e241375de1af..268c558c0479a503c2e26e8bf29c29e6cfa75839 100644
--- a/LDDsimulation/LDDsimulation.py
+++ b/LDDsimulation/LDDsimulation.py
@@ -15,7 +15,7 @@ import helpers as hlp
 import pandas as pd
 import sys
 import copy
-# import multiprocessing as mp
+import multiprocessing as mp
 # import errno
 import h5py
 from termcolor import colored
@@ -422,9 +422,8 @@ class LDDsimulation(object):
                     print(f"{errortype}: {error}\n")
         return self.output
 
-
-    def LDDsolver(self, time: float = None, #
-                  debug: bool = False, #
+    def LDDsolver(self,
+                  debug: bool = False,
                   analyse_timestep: bool = False,
                   analyse_condition: bool = False):
         """ calulate the solution on all patches for the next timestep
@@ -435,155 +434,200 @@ class LDDsimulation(object):
 
         PARAMATERS
         ---------------------
-        time    float, optional   time at which to run the solver. This is internally
-                                  set to self.t if no time is given. This variable
-                                  is there for development purposes.
-        debug   bool, optional    debug flag used to toggle the amount of output
-                                  being displayed.
+        debug   bool, optional    debug flag used to toggle the amount of
+                                  output being displayed.
         analyse_timestep   bool, optional,  flag to toggle wether or not to run
-                                  routines designed to output data within one timestep
-                                  iteration.
+                                  routines designed to output data within one
+                                  timestep iteration.
         """
-        # in case no time is given, use the time of the class.
-        if time is None:
-            time = self.t
-
-        # numpy array holding for each subdomain the maximum of the relative error
-        # of subsequent iterations of both phases on the subdomain.
-        # max(subsequent_error) is used as a measure if the solver is done or not.
-        max_subsequent_error = np.zeros(self._number_of_subdomains, dtype=float)
-        error_criterion = self.LDDsolver_tol#min(1E-9, subdomain.mesh.hmax()**2*subdomain.timestep_size)
-        # in case analyse_timestep is None, solution_over_iteration_within_timestep is None
-        solution_over_iteration_within_timestep = self.prepare_LDDsolver(analyse_timestep = analyse_timestep)
-        ### actual iteration starts here
+        time = self.t
+
+        # numpy array holding for each subdomain the maximum of the relative
+        # error of subsequent iterations of both phases on the subdomain.
+        # max(subsequent_error) is used as a measure if the solver is done or
+        # not.
+        max_subsequent_error = np.zeros(
+            self._number_of_subdomains, dtype=float
+            )
+        # in case analyse_timestep is None,
+        # solution_over_iteration_within_timestep is None
+        solution_over_iteration_within_timestep = self.prepare_LDDsolver(
+                analyse_timestep=analyse_timestep
+            )
+        # actual iteration starts here
         iteration = 0
         # gobal stopping criterion for the iteration.
         all_subdomains_are_done = False
         while iteration < self._max_iter_num and not all_subdomains_are_done:
             iteration += 1
-            # we need to loop over the subdomains and solve an L-scheme type step
-            # on each subdomain. here it should be possible to parallelise in
+            # we need to loop over the subdomains and solve an L-scheme type
+            # step on each subdomain.
+            # Here it should be possible to parallelise in
             # the future.
-            for sd_index, subdomain in self.subdomain.items():
+            for sd_index in self.subdomain.keys():
                 # set subdomain iteration to current iteration
-                subdomain.iteration_number = iteration
-                # solve the problem on subdomain
-                condition_nums = self.Lsolver_step(
-                                  subdomain_index=sd_index,#
-                                  debug=debug,
-                                  analyse_condition=analyse_condition
-                                  )
-                # bypass subsequent error calculation for the first iteration, becauses
-                # it is zero anyways.
-                #if iteration >= 1:
-                subsequent_iter_error = dict()
-                if debug:
-                    print("LDD simulation use case: {}".format(self.use_case))
-                for phase in subdomain.has_phases:
-                    error = df.Function(subdomain.function_space["pressure"][phase])
-                    error.assign(subdomain.pressure[phase] - subdomain.pressure_prev_iter[phase])
-                    error_calculated = df.norm(error, 'L2')
-                    subsequent_iter_error.update(
-                        {phase: error_calculated}
-                        )
-                    if debug:
-                        debugstring = "time = {} (at timestep".format(time) +\
-                            "{}): subsequent ".formal(self.timestep_num) +\
-                            "error on subdomain {} ".format(sd_index) +\
-                            "for {} phase in ".format(phase) +\
-                            "iteration {} = ".format(iteration)
-                        print(debugstring, subsequent_iter_error[phase])
-
-                # calculate the maximum over phase of subsequent errors for the
-                # stopping criterion.
-                max_subsequent_error[sd_index-1] = max(subsequent_iter_error.values())
-
-                if analyse_timestep:
-                    # save the calculated L2 errors to csv file for plotting
-                    # with pgfplots
-                    if self.write2file['subsequent_errors']:
-                        subsequent_error_filename = self.output_dir\
-                            +self.output_filename_parameter_part[sd_index]\
-                            +"subsequent_iteration_errors" +"_at_timestep"+\
-                            "{number}".format(number=self.timestep_num) +".csv"  #"{number:.{digits}f}".format(number=time, digits=4)
-                        self.write_subsequent_errors_to_csv(
-                            filename = subsequent_error_filename, #
-                            subdomain_index = sd_index,
-                            errors = subsequent_iter_error
-                            )
-                    if analyse_condition and self.write2file['condition_numbers']:
-                        # save the calculated condition numbers of the assembled
-                        # matrices a separate file for monitoring
-                        condition_number_filename = self.output_dir\
-                            +self.output_filename_parameter_part[sd_index]\
-                            +"condition_numbers" +"_at_timestep"+\
-                            "{number}".format(number=self.timestep_num) +".csv"
-                        self.write_condition_numbers_to_csv(
-                            filename = condition_number_filename, #
-                            subdomain_index = sd_index,
-                            condition_number = condition_nums
+                Lsolver_step = mp.Process(
+                            target=self.run_Lsolver_step,
+                            args=(
+                                iteration,
+                                sd_index,
+                                max_subsequent_error,
+                                solution_over_iteration_within_timestep,
+                                debug,
+                                analyse_timestep,
+                                analyse_condition,
                             )
-
-                # prepare next iteration
-                # write the newly calculated solution to the interface dictionaries
-                # of the subdomain for communication.
-                subdomain.write_pressure_to_interfaces()
-
-                if analyse_timestep and self.write2file['L_iterations_per_timestep']:
-                    subdomain.write_solution_to_xdmf(#
-                        file = solution_over_iteration_within_timestep[sd_index], #
-                        # time has been set to the target timestep by the solver.
-                        # the timeste pat wich we are currently is one timestep_size
-                        # less than that.
-                        time = time-self.timestep_size,#
-                        write_iter_for_fixed_time = True,
                         )
-
-                for phase in subdomain.has_phases:
-                    subdomain.pressure_prev_iter[phase].assign(
-                        subdomain.pressure[phase]
-                    )
+                Lsolver_step.start()
+                Lsolver_step.join()
+                # self.encapsulated_Lsolver_step(
+                #     analyse_timestep=analyse_timestep,
+                #     analyse_condition=analyse_condition,
+                #     debug=debug,
+                #     iteration=iteration,
+                #     subdomain_index=sd_index,
+                #     subdomain=subdomain,
+                #     max_subsequent_error=max_subsequent_error,
+                #     sol_over_iter_file=solution_over_iteration_within_timestep,
+                # )
             # end loop over subdomains
-            ##### stopping criterion for the solver.
+            # stopping criterion for the solver.
             total_subsequent_error = np.amax(max_subsequent_error)
             if debug:
-                print(f"the error criterion is tol = {error_criterion}")
-                print(f"time = {time} (at timestep {self.timestep_num}):: max subsequent error of both subdomains in iteration {iteration} is: ", total_subsequent_error)
-            if total_subsequent_error < error_criterion:
+                print(f"the error criterion is tol = {self.LDDsolver_tol}")
+                debugstring = f"time = {time} (at timestep " + \
+                    f"{self.timestep_num}):: max subsequent error of both" + \
+                    f"subdomains in iteration {iteration} is: "
+                print(debugstring, total_subsequent_error)
+            if total_subsequent_error < self.LDDsolver_tol:
                 # if debug:
-                print(f"all phases on all subdomains reached a subsequent error = {total_subsequent_error} < tol = {error_criterion} after {iteration} iterations.")
+                total_error_string = "all phases on all subdomains reached" + \
+                        f"a subsequent error = {total_subsequent_error} < " + \
+                        f"tol = {self.LDDsolver_tol} after " + \
+                        f"{iteration} iterations."
+                print(total_error_string)
                 all_subdomains_are_done = True
         # end iteration while loop.
 
+    def run_Lsolver_step(
+        self,
+        iteration: int,
+        subdomain_index: int,
+        max_subsequent_error: np.ndarray,
+        sol_over_iter_file: tp.Dict[int, tp.Type[SolutionFile]],
+        debug: bool = False,
+        analyse_timestep: bool = False,
+        analyse_condition: bool = False
+                    ):
+        """Encapsulate Lsolver_step.
+
+        This encapsulates a part of code originally part of the above
+        self.LDDsolver to make it work with multiprocessing.
+        """
+        time = self.t
+        subdomain = self.subdomain[subdomain_index]
+        subdomain.iteration_number = iteration
+        sd_index = subdomain_index
+        # solve the problem on subdomain
+        condition_nums = self.Lsolver_step(
+                          subdomain_index=sd_index,
+                          debug=debug,
+                          analyse_condition=analyse_condition
+                          )
+        # bypass subsequent error calculation for the first iteration, becauses
+        # it is zero anyways.
+        # if iteration >= 1:
+        subsequent_iter_error = dict()
+        if debug:
+            print("LDD simulation use case: {}".format(self.use_case))
+        for phase in subdomain.has_phases:
+            error = df.Function(subdomain.function_space["pressure"][phase])
+            error.assign(
+                subdomain.pressure[phase] - subdomain.pressure_prev_iter[phase]
+                )
+            error_calculated = df.norm(error, 'L2')
+            subsequent_iter_error.update(
+                {phase: error_calculated}
+                )
+            if debug:
+                debugstring = "time = {} (at timestep".format(time) +\
+                    "{}): subsequent ".format(self.timestep_num) +\
+                    "error on subdomain {} ".format(sd_index) +\
+                    "for {} phase in ".format(phase) +\
+                    "iteration {} = ".format(iteration)
+                print(debugstring, subsequent_iter_error[phase])
 
-    def prepare_LDDsolver(self, time: float = None, #
-                  debug: bool = False, #
-                  analyse_timestep: bool = False) -> tp.Dict[int, tp.Type[SolutionFile]]:
-        """ initialise LDD iteration for current time time.
+        # calculate the maximum over phase of subsequent errors for the
+        # stopping criterion.
+        max_subsequent_error[sd_index-1] = max(subsequent_iter_error.values())
 
-        This method executes a few prelininary steps before actually starting
-        the L-iteration. Namely,
-            - create solution files for analysing time step if analyse_timestep == True
-            - Dirichlet boundary condition terms are evaluated to the current time
-            - all subdomain iteration numbers  on interfaces are set to 0.
-            - all subdomain iteration numbers subdomain.iteration_number are set
-              to 0.
-            - source terms are evaluated to the current time.
-            - the calculated solution pressure from the previous timestep
-              is assigned to pressure_prev for each subdomain.
-            - gl0 terms are calculated.
-            - write pressure to previous iteration dictionaries on all interfaces.
+        if analyse_timestep:
+            # save the calculated L2 errors to csv file for plotting
+            # with pgfplots
+            if self.write2file['subsequent_errors']:
+                subsequent_error_filename = self.output_dir\
+                    + self.output_filename_parameter_part[sd_index]\
+                    + "subsequent_iteration_errors" + "_at_timestep"\
+                    + "{number}".format(number=self.timestep_num) + ".csv"
+                # "{number:.{digits}f}".format(number=time, digits=4)
+                self.write_subsequent_errors_to_csv(
+                    filename=subsequent_error_filename,
+                    subdomain_index=sd_index,
+                    errors=subsequent_iter_error
+                    )
+            if analyse_condition and self.write2file['condition_numbers']:
+                # save the calculated condition numbers of the assembled
+                # matrices a separate file for monitoring
+                condition_number_filename = self.output_dir\
+                    + self.output_filename_parameter_part[sd_index]\
+                    + "condition_numbers" + "_at_timestep" \
+                    + "{number}".format(number=self.timestep_num) + ".csv"
+                self.write_condition_numbers_to_csv(
+                    filename=condition_number_filename,
+                    subdomain_index=sd_index,
+                    condition_number=condition_nums
+                    )
+
+        # prepare next iteration
+        # write the newly calculated solution to the interface dictionaries
+        # of the subdomain for communication.
+        subdomain.write_pressure_to_interfaces()
+
+        if analyse_timestep and self.write2file['L_iterations_per_timestep']:
+            subdomain.write_solution_to_xdmf(
+                file=sol_over_iter_file[sd_index],
+                # time has been set to the target timestep by the solver.
+                # the timeste pat wich we are currently is one timestep_size
+                # less than that.
+                time=time-self.timestep_size,
+                write_iter_for_fixed_time=True,
+                )
+
+        for phase in subdomain.has_phases:
+            subdomain.pressure_prev_iter[phase].assign(
+                subdomain.pressure[phase]
+            )
+
+    def prepare_LDDsolver(
+            self,
+            time: float = None,
+            debug: bool = False,
+            analyse_timestep: bool = False) -> tp.Dict[int, tp.Type[SolutionFile]]:
+        """Initialise LDD iteration for current time time.
+
+        This method runs for each subdomain self.prepare_subdomain
 
         PARAMETERS
         ------------------------------------
-        time    float, optional   time at which to run the solver. This is internally
-                                  set to self.t if no time is given. This variable
-                                  is there for development purposes.
-        debug   bool, optional    debug flag used to toggle the amount of output
-                                  being displayed.
+        time    float, optional   time at which to run the solver. This is
+                                  internally set to self.t if no time is given.
+                                  This variable is there for development
+                                  purposes.
+        debug   bool, optional    debug flag used to toggle the amount of
+                                  output being displayed.
         analyse_timestep   bool, optional,  flag to toggle wether or not to run
-                                  routines designed to output data within one timestep
-                                  iteration.
+                                  routines designed to output data within one
+                                  timestep iteration.
 
         OUTPUT
         ------------------------------------
@@ -605,56 +649,140 @@ class LDDsimulation(object):
         else:
             solution_over_iteration_within_timestep = None
         # before the iteration gets started all iteration numbers must be nulled
-        for ind, subdomain in self.subdomain.items():
-            # create xdmf files for writing out iterations if analyse_timestep is
-            # given.
-            if analyse_timestep:
-                filename = self.output_dir+self.output_filename_parameter_part[ind]\
-                + "solution_iterations_at_timestep"\
-                + "{number}".format(number=self.timestep_num) +".xdmf"
-                solution_over_iteration_within_timestep.update( #
-                    {ind: SolutionFile(self._mpi_communicator, filename)}
+        for subdomain_index in self.subdomain.keys():
+            prepare_subdomain = mp.Process(
+                        target=self.prepare_subdomain,
+                        args=(
+                            subdomain_index,
+                            solution_over_iteration_within_timestep,
+                            debug,
+                            analyse_timestep,
+                        )
                     )
+            prepare_subdomain.start()
+            prepare_subdomain.join()
+            # self.prepare_subdomain(
+            #     subdomain_index=ind,
+            #     sol_over_iter_file=solution_over_iteration_within_timestep,
+            #     debug=debug,
+            #     analyse_timestep=analyse_timestep,
+            # )
 
-            # reset all interface[has_interface].current_iteration[ind] = 0, for
-            # index has_interface in subdomain.has_interface
-            if self.interface_def_points is not None:
-                subdomain.null_all_interface_iteration_numbers()
+        return solution_over_iteration_within_timestep
 
-            if subdomain.outer_boundary is not None:
-                self.update_DirichletBC_dictionary(time = time, #
-                                                   subdomain_index = ind,
-                                                   )
-            # this is the beginning of the solver, so iteration must be set
-            # to 0.
-            subdomain.iteration_number = 0
-            for phase in subdomain.has_phases:
-                # evaluate the sources to the current time.
-                subdomain.source[phase].t = time
-                # set the solution of the previous timestep as new prev_timestep pressure
-                if debug:
-                    print(colored(f"id(subdomain{subdomain.subdomain_index}.pressure_prev_timestep[{phase}])", "red"), " =\n", f"{id(subdomain.pressure_prev_timestep[phase])}")
-                    print(f"subdomain{subdomain.subdomain_index}.pressure_prev_timestep[{phase}].vector()[:]", " =\n", f"{subdomain.pressure_prev_timestep[phase].vector()[:]}")
-                    print(f"subdomain{subdomain.subdomain_index}.pressure[{phase}].vector()[:]", " =\n", f"{subdomain.pressure[phase].vector()[:]}")
-
-                subdomain.pressure_prev_timestep[phase].assign(
-                    subdomain.pressure[phase]
+    def prepare_subdomain(
+            self,
+            subdomain_index: int,
+            sol_over_iter_file,
+            debug: bool = False,
+            analyse_timestep: bool = False) -> tp.Dict[int, tp.Type[SolutionFile]]:
+        """Initialise LDD iteration for current time time and subdomain.
+
+        This method executes a few prelininary steps on the subdomain
+        with index subdomain_index before actually starting
+        the L-iteration. Namely,
+        - create solution files for analysing time step if
+        analyse_timestep == True
+        - Dirichlet boundary condition terms are evaluated to the current time
+        - all subdomain iteration numbers  on interfaces are set to 0.
+        - all subdomain iteration numbers subdomain.iteration_number are set
+          to 0.
+        - source terms are evaluated to the current time.
+        - the calculated solution pressure from the previous timestep
+          is assigned to pressure_prev for each subdomain.
+        - gl0 terms are calculated.
+        - write pressure to previous iteration dictionaries on all interfaces.
+
+        PARAMETERS
+        ------------------------------------
+        subdomain_index     int     Index of the subdomain that is prepared by
+                                    the method.
+        sol_over_iter_file  tp.Dict[None], eiter None or empty dictionary to
+                                    store the file object for saving iterations
+                                    over time.
+        debug   bool, optional    debug flag used to toggle the amount of
+                                  output being displayed.
+        analyse_timestep   bool, optional,  flag to toggle wether or not to run
+                                  routines designed to output data within one
+                                  timestep iteration.
+
+        OUTPUT
+        ------------------------------------
+        analysing_solution_file_dict   tp.Dict[ind, SolutionFile]   dictionary
+                                  containing the solution file for analysing
+                                  the timestep if analyse_timestep == True,
+                                  else None.
+        """
+
+        # remember:  this has been set to the target timestep by the solver.
+        # the timestep we are calculating the solution for.
+        time = self.t
+        subdomain = self.subdomain[subdomain_index]
+        solution_over_iteration_within_timestep = sol_over_iter_file
+
+        # create xdmf files for writing out iterations if analyse_timestep
+        # is given.
+        if analyse_timestep:
+            filename = self.output_dir + \
+                self.output_filename_parameter_part[subdomain_index] \
+                + "solution_iterations_at_timestep" \
+                + "{number}".format(number=self.timestep_num) + ".xdmf"
+            solution_over_iteration_within_timestep.update(
+                {subdomain_index: SolutionFile(
+                    self._mpi_communicator, filename
+                    )}
                 )
 
-                if debug:
-                    print(colored(f"id(subdomain{subdomain.subdomain_index}.pressure[{phase}])", "red"), " =\n", f"{id(subdomain.pressure[phase])}")
-                    print(f"subdomain{subdomain.subdomain_index}.pressure_prev_timestep[{phase}].vector()[:]", " =\n", f"{subdomain.pressure_prev_timestep[phase].vector()[:]}")
-            # we need to update the previous pressure dictionaries of all interfaces
-            # of the subdomain with the current solution, so that the subdomain.calc_gli_term
-            # method works properly. The current pressure dictionaries should already
-            # be populated with the current solution, sinces the solver writes the pressure
-            # to interfaces after each iteration.
-            if self.interface_def_points is not None:
-                subdomain.write_pressure_to_interfaces()
-                subdomain.write_pressure_to_interfaces(previous_iter=True)
-                subdomain.calc_gl0_term()
+        # reset all interface[has_interface].current_iteration[ind] = 0,
+        # for index has_interface in subdomain.has_interface
+        if self.interface_def_points is not None:
+            subdomain.null_all_interface_iteration_numbers()
+
+        if subdomain.outer_boundary is not None:
+            self.update_DirichletBC_dictionary(time=time,
+                                               subdomain_index=subdomain_index,
+                                               )
+        # this is the beginning of the solver, so iteration must be set
+        # to 0.
+        subdomain.iteration_number = 0
+        for phase in subdomain.has_phases:
+            # evaluate the sources to the current time.
+            subdomain.source[phase].t = time
+            # set the solution of the previous timestep as new prev_timestep
+            # pressure
+            if debug:
+                debugstr1 = f"subdomain{subdomain.subdomain_index}." + \
+                    f"pressure_prev_timestep[{phase}].vector()[:]" + " =\n" + \
+                    f"{subdomain.pressure_prev_timestep[phase].vector()[:]}"
+                print(debugstr1)
+                debugstr2 = f"subdomain{subdomain.subdomain_index}." + \
+                    f"pressure[{phase}].vector()[:]" + " =\n" + \
+                    f"{subdomain.pressure[phase].vector()[:]}"
+                print(debugstr2)
+
+            subdomain.pressure_prev_timestep[phase].assign(
+                subdomain.pressure[phase]
+            )
 
-        return solution_over_iteration_within_timestep
+            if debug:
+                debugstr3 = f"id(subdomain{subdomain.subdomain_index}." + \
+                    f"pressure[{phase}])"
+                debugstr4 = " =\n" + f"{id(subdomain.pressure[phase])}"
+                print(colored(debugstr3, "red"), debugstr4)
+                debugstr5 = f"subdomain{subdomain.subdomain_index}." + \
+                    f"pressure_prev_timestep[{phase}].vector()[:]" + " =\n" + \
+                    f"{subdomain.pressure_prev_timestep[phase].vector()[:]}"
+                print(debugstr5)
+        # we need to update the previous pressure dictionaries of all
+        # interfaces of the subdomain with the current solution, so that the
+        # subdomain.calc_gli_term method works properly.
+        # The current pressure dictionaries should already be populated with
+        # the current solution, sinces the solver writes the pressure
+        # to interfaces after each iteration.
+        if self.interface_def_points is not None:
+            subdomain.write_pressure_to_interfaces()
+            subdomain.write_pressure_to_interfaces(previous_iter=True)
+            subdomain.calc_gl0_term()
 
 
     def Lsolver_step(self, subdomain_index: int,#
diff --git a/Two-phase-Richards/multi-patch/layered_soil_with_inner_patch/TP-R-layered_soil_with_inner_patch-realistic.py b/Two-phase-Richards/multi-patch/layered_soil_with_inner_patch/TP-R-layered_soil_with_inner_patch-realistic.py
index fb77415e1106e64f7b4456a745984e499f1307dd..4e2fbf9c7f8fb1adbfcd0c764aba9ec76e7c4ebf 100755
--- a/Two-phase-Richards/multi-patch/layered_soil_with_inner_patch/TP-R-layered_soil_with_inner_patch-realistic.py
+++ b/Two-phase-Richards/multi-patch/layered_soil_with_inner_patch/TP-R-layered_soil_with_inner_patch-realistic.py
@@ -39,13 +39,13 @@ thisfile = "TP-R-layered_soil_with_inner_patch-realistic.py"
 
 # GENERAL SOLVER CONFIG  ######################################################
 # maximal iteration per timestep
-max_iter_num = 1
+max_iter_num = 500
 FEM_Lagrange_degree = 1
 
 # GRID AND MESH STUDY SPECIFICATIONS  #########################################
 mesh_study = True
 resolutions = {
-                1: 2e-6,  # h=2
+                # 1: 2e-6,  # h=2
                 # 2: 2e-6,  # h=1.1180
                 # 4: 2e-6,  # h=0.5590
                 8: 2e-6,  # h=0.2814
@@ -111,7 +111,7 @@ lambda56_nw = 4
 
 include_gravity = True
 debugflag = True
-analyse_condition = False
+analyse_condition = True
 
 # I/O CONFIG  #################################################################
 # when number_of_timesteps is high, it might take a long time to write all