diff --git a/gext/descriptors.py b/gext/descriptors.py
index e7813e40626fb3e6dffc7dc05e2a666a68f4b93e..145ba61f5fdc01951da635434b0f1e7bd3f62597 100644
--- a/gext/descriptors.py
+++ b/gext/descriptors.py
@@ -31,3 +31,14 @@ class Coulomb(Distance):
     def compute(self, coords: np.ndarray) -> np.ndarray:
         """Compute the Coulomb matrix as a descriptor."""
         return 1.0/super().compute(coords)
+
+class Flatten():
+
+    def __init__(self, **kwargs):
+        pass
+
+    def set_options(self, **kwargs):
+        pass
+
+    def compute(self, matrix: np.ndarray) -> np.ndarray:
+        return matrix.flatten()
diff --git a/gext/fitting.py b/gext/fitting.py
index 375328b0a9faa6b3d02cdd71092bd504eff469ec..958f9fdef73cef26bf15e0bb6443713b664bc197 100644
--- a/gext/fitting.py
+++ b/gext/fitting.py
@@ -28,27 +28,35 @@ class AbstractFitting(abc.ABC):
                 self.options[option] = default_value
 
     @abc.abstractmethod
-    def fit(self, vectors: List[np.ndarray], target:np.ndarray):
+    def train(self, descriptors: List[np.ndarray], gammas: List[np.ndarray]):
         """Base method for computing new fitting coefficients."""
         return np.zeros(0)
 
-    def linear_combination(self, vectors: List[np.ndarray],
-            coefficients: np. ndarray) -> np.ndarray:
-        """Given a set of vectors (or matrices) and the corresponding
-        coefficients, build their linear combination."""
-        result = np.zeros(vectors[0].shape, dtype=np.float64)
-        for coeff, vector in zip(coefficients, vectors):
-            result += vector*coeff
-        return result
+    @abc.abstractmethod
+    def extrapolate(self, descriptor: np.ndarray):
+        """Extrapolate to a new descriptor:"""
+        return np.zeros(0)
 
 class DiffFitting(AbstractFitting):
 
-    """least square minimization fitting of the diffvectors"""
+    """Least square minimization fitting with a reference."""
 
     supported_options = {
-        "regularization": 0.0,
+        "normalize": False,
+        "regularization": 1e-4,
+        "ref": -1
     }
 
+    def __init__(self, **kwargs):
+
+        super().__init__(**kwargs)
+
+        self.gammas = []
+        self.ref = np.zeros(0)
+        self.A = np.zeros(0)
+        self.DT = np.zeros(0)
+        self.trained = False
+
     def set_options(self, **kwargs):
         """Set options for least square minimization"""
         super().set_options(**kwargs)
@@ -57,41 +65,66 @@ class DiffFitting(AbstractFitting):
                 or self.options["regularization"] > 100:
             raise ValueError("Unsupported value for regularization")
 
-    def fit(self, vectors: List[np.ndarray], target: np.ndarray):
-        """Given a set of vectors and a target return the fitting
+    def train(self, descriptors: List[np.ndarray], gammas: List[np.ndarray]):
+        """Given a set of descriptors and a target return the fitting
         coefficients."""
-        if len(vectors) == 1:
+        if len(descriptors) == 1:
             raise ValueError("DiffFitting does not work for one vector")
 
-        ref = len(vectors) - 1
-        target = target - vectors[ref]
+        ref = len(descriptors) - 1
 
-        diff_vectors = []
-        for i in range(0, len(vectors)):
-            if i != ref:
-                diff_vectors.append(vectors[i] - vectors[ref])
+        self.ref = descriptors[ref]
 
-        matrix = np.array(diff_vectors).T
-        a = matrix.T @ matrix
-        b = matrix.T @ target
+        diff_gammas = []
+        diff_descriptors = []
+        for i in range(0, len(descriptors)):
+            if i != ref:
+                diff_descriptors.append(descriptors[i] - self.ref)
+                diff_gammas.append(gammas[i]) # we assume the ref is tangent
+        self.gammas = diff_gammas
+
+        D = np.array(diff_descriptors).T
+        A = D.T @ D
+        norm = 1.0
+        if self.options["normalize"]:
+            norm = np.linalg.norm(D)
         if self.options["regularization"] > 0.0:
-            a += np.identity(len(b))*self.options["regularization"]**2
-        coefficients = np.linalg.solve(a, b)
-
-        # Convert diff coefficients to normal coefficients (like in
-        # least square fitting)
-        coefficients = np.insert(coefficients, ref, 1.0 - np.sum(coefficients))
-
-        return coefficients
+            A += np.identity(len(A))*self.options["regularization"]**2*norm**2
+
+        self.A = A
+        self.DT = D.T
+        self.trained = True
+
+    def extrapolate(self, target: np.ndarray, vectors=None):
+        if not self.trained:
+            raise ValueError("The model is not trained")
+        b = self.DT @ (target - self.ref)
+        coefficients = np.linalg.solve(self.A, b)
+        if vectors is None:
+            vectors = self.gammas
+        result = np.zeros(vectors[0].shape)
+        for c, vector in zip(coefficients, vectors):
+            result += vector * c
+        return result
 
 class LeastSquare(AbstractFitting):
 
     """Simple least square minimization fitting."""
 
     supported_options = {
-        "regularization": 1e-3,
+        "normalize": False,
+        "regularization": 1e-4,
     }
 
+    def __init__(self, **kwargs):
+
+        super().__init__(**kwargs)
+
+        self.gammas = []
+        self.A = np.zeros(0)
+        self.DT = np.zeros(0)
+        self.trained = False
+
     def set_options(self, **kwargs):
         """Set options for least square minimization"""
         super().set_options(**kwargs)
@@ -100,25 +133,54 @@ class LeastSquare(AbstractFitting):
                 or self.options["regularization"] > 100:
             raise ValueError("Unsupported value for regularization")
 
-    def fit(self, vectors: List[np.ndarray], target: np.ndarray):
-        """Given a set of vectors and a target return the fitting
-        coefficients."""
-        matrix = np.array(vectors).T
-        a = matrix.T @ matrix
-        b = matrix.T @ target
+    def train(self, descriptors: List[np.ndarray], gammas: List[np.ndarray]):
+        self.gammas = gammas
+
+        D = np.array(descriptors).T
+        A = D.T @ D
+        norm = 1.0
+        if self.options["normalize"]:
+            norm = np.linalg.norm(D)
         if self.options["regularization"] > 0.0:
-            a += np.identity(len(b))*self.options["regularization"]**2
-        coefficients = np.linalg.solve(a, b)
-        return np.array(coefficients, dtype=np.float64)
+            A += np.identity(len(A))*self.options["regularization"]**2*norm**2
+
+        self.A = A
+        self.DT = D.T
+        self.trained = True
+
+    def extrapolate(self, target: np.ndarray, vectors=None):
+        if not self.trained:
+            raise ValueError("The model is not trained")
+        b = self.DT @ target
+        coefficients = np.linalg.solve(self.A, b)
+        if vectors is None:
+            vectors = self.gammas
+        result = np.zeros(vectors[0].shape)
+        for c, vector in zip(coefficients, vectors):
+            result += vector * c
+        return result
 
 class QuasiTimeReversible(AbstractFitting):
 
     """Quasi time reversible fitting scheme."""
 
     supported_options = {
-        "regularization": 1e-3,
+        "normalize": False,
+        "regularization": 1e-4,
+        "full_second_order": False
     }
 
+    def __init__(self, **kwargs):
+
+        super().__init__(**kwargs)
+
+        self.gammas = []
+        self.past_target = np.zeros(0)
+        self.A = np.zeros(0)
+        self.DT = np.zeros(0)
+        self.q = 0
+        self.trained = False
+
     def set_options(self, **kwargs):
         """Set options for quasi time reversible fitting"""
         super().set_options(**kwargs)
@@ -127,44 +189,65 @@ class QuasiTimeReversible(AbstractFitting):
                 or self.options["regularization"] > 100:
             raise ValueError("Unsupported value for regularization")
 
-    def fit(self, vectors: List[np.ndarray], target: np.ndarray):
-        """Given a set of vectors and a target return the fitting
+    def train(self, descriptors: List[np.ndarray], gammas: List[np.ndarray]):
+        """Given a set of descriptors and a target return the fitting
         coefficients in a quasi time reversible scheme."""
 
-        past_target = vectors[0]
-        matrix = np.array(vectors[1:]).T
+        self.past_target = descriptors[0]
+        self.gammas = gammas
 
-        q = matrix.shape[1]
-        if q == 1:
-            time_reversible_matrix = matrix
-        elif q%2 == 0:
-            time_reversible_matrix = matrix[:, :q//2] + matrix[:, :q//2-1:-1]
+        D = np.array(descriptors[1:]).T
+
+        self.q = D.shape[1]
+        if self.q == 1:
+            time_reversible_D = D
+        elif self.q%2 == 0:
+            time_reversible_D = D[:, :self.q//2] + D[:, :self.q//2-1:-1]
         else:
-            time_reversible_matrix = matrix[:, :q//2+1] + matrix[:, :q//2-1:-1]
+            time_reversible_D = D[:, :self.q//2+1] + D[:, :self.q//2-1:-1]
+
+        norm = 1.0
+        if self.options["normalize"]:
+            norm = np.linalg.norm(time_reversible_D)
 
-        a = time_reversible_matrix.T @ time_reversible_matrix
-        b = time_reversible_matrix.T @ (target + past_target)
+        A = time_reversible_D.T @ time_reversible_D
 
         if self.options["regularization"] > 0.0:
-            a += np.identity(len(b))*self.options["regularization"]**2
-        coefficients = np.linalg.solve(a, b)
+            A += np.identity(len(A))*self.options["regularization"]**2*norm**2
+
+        self.A = A
+        self.DT = time_reversible_D.T
+        self.trained = True
+
+    def extrapolate(self, target: np.ndarray, vectors=None):
 
-        if q == 1:
+        b = self.DT @ (target + self.past_target)
+        coefficients = np.linalg.solve(self.A, b)
+
+        if self.q == 1:
             full_coefficients = np.concatenate(([-1.0], coefficients))
-        elif q%2 == 0:
+        elif self.q%2 == 0:
             full_coefficients = np.concatenate(([-1.0], coefficients,
                 coefficients[::-1]))
         else:
             full_coefficients = np.concatenate(([-1.0], coefficients[:-1],
                 2.0*coefficients[-1:], coefficients[-2::-1]))
-        return np.array(full_coefficients, dtype=np.float64)
+
+        if vectors is None:
+            vectors = self.gammas
+
+        result = np.zeros(vectors[0].shape)
+        for vector, c in zip(vectors, full_coefficients):
+            result += vector * c
+        return result
 
 class PolynomialRegression(AbstractFitting):
 
     """Polynomial regression."""
 
     supported_options = {
-        "regularization": 1e-3,
+        "normalize": False,
+        "regularization": 1e-4,
         "ref": -1,
         "order": 1}
 
@@ -178,8 +261,8 @@ class PolynomialRegression(AbstractFitting):
         self.matrix = np.zeros(0, dtype=np.float64)
         self.gamma_shape = None
 
-    def fit(self, descriptor_list: List[np.ndarray], gamma_list: List[np.ndarray]):
-        """Given a set of vectors and a set of gammas, construct the
+    def train(self, descriptor_list: List[np.ndarray], gamma_list: List[np.ndarray]):
+        """Given a set of descriptors and a set of gammas, construct the
         transformation matrix."""
 
         if self.gamma_shape is None:
@@ -214,13 +297,24 @@ class PolynomialRegression(AbstractFitting):
             gammas = np.reshape(gamma_list,
                 (len(gamma_list), self.gamma_shape[0]*self.gamma_shape[1])).T
 
+        norm = 1.0
+        if self.options["normalize"]:
+            norm = np.linalg.norm(d)
+
         if self.options["order"] >= 2:
-            d = np.vstack((d, d**2))
+            if self.options["full_second_order"]:
+                outer = np.outer(d, d)
+                d = np.vstack((d, outer.flatten()))
+            else:
+                d = np.vstack((d, d**2))
+
+        A = d.T @ d
+        if self.options["regularization"] > 0.0:
+            A += np.identity(len(A))*self.options["regularization"]**2*norm**2
 
-        a = d @ np.linalg.inv(d.T @ d + np.identity(d.shape[1])*self.options["regularization"]**2)
-        self.matrix = a @ gammas.T
+        self.matrix = d @ np.linalg.inv(A) @ gammas.T
 
-    def apply(self, descriptor):
+    def extrapolate(self, descriptor):
         """Apply the matrix to the current descriptor."""
         if self.options["ref"]:
             descriptor -= self.descriptor_ref
diff --git a/gext/main.py b/gext/main.py
index ab0f781d86bc240df97263617437bd146f6ea99e..e442371353849704139368cf22f754622e095174 100644
--- a/gext/main.py
+++ b/gext/main.py
@@ -5,7 +5,7 @@ import numpy as np
 
 from . import grassmann
 from .fitting import LeastSquare, QuasiTimeReversible, PolynomialRegression, DiffFitting
-from .descriptors import Distance, Coulomb
+from .descriptors import Distance, Coulomb, Flatten
 from .buffer import CircularBuffer
 
 class Extrapolator:
@@ -78,6 +78,8 @@ class Extrapolator:
             self.descriptor_calculator = Distance()
         elif self.options["descriptor"] == "coulomb":
             self.descriptor_calculator = Coulomb()
+        elif self.options["descriptor"] == "flatten":
+            self.descriptor_calculator = Flatten()
         else:
             raise ValueError("Unsupported descriptor")
         self.descriptor_calculator.set_options(**descriptor_options)
@@ -108,6 +110,9 @@ class Extrapolator:
         if self.options["store_overlap"]:
             self.overlaps.push(overlap)
 
+        if self.tangent is None:
+            self._set_tangent(coeff)
+
     def guess(self, coords: np.ndarray, overlap=None) -> np.ndarray:
         """Get a new electronic density matrix to be used as a guess."""
         c_guess = self.guess_coefficients(coords, overlap)
@@ -139,20 +144,14 @@ class Extrapolator:
         descriptor = self._compute_descriptor(coords)
 
         # use the descriptors to find the fitting coefficients
-        if self.options["fitting"] == "polynomialregression":
-            self.fitting_calculator.fit(prev_descriptors, gammas)
-            gamma = self.fitting_calculator.apply(descriptor)
-        else:
-            fit_coefficients = self._fit(prev_descriptors, descriptor)
-            # use the fitting coefficients and the previous gammas to
-            # extrapolate a new gamma
-            gamma = self.fitting_calculator.linear_combination(gammas, fit_coefficients)
+        self.fitting_calculator.train(prev_descriptors, gammas)
+        gamma = self.fitting_calculator.extrapolate(descriptor)
 
-            if self.options["verbose"]:
-                fit_descriptor = self.fitting_calculator.linear_combination(
-                    prev_descriptors, fit_coefficients)
-                print("error on descriptor:", \
-                    np.linalg.norm(fit_descriptor - descriptor, ord=np.inf))
+        #if self.options["fitting"] in ["leastsquare"]:
+        #    fit_descriptor = self.fitting_calculator.extrapolate(descriptor,
+        #        vectors=prev_descriptors)
+        #    print("error on descriptor:", \
+        #        np.linalg.norm(fit_descriptor - descriptor, ord=np.inf))
 
         # if the overlap is not given, use the coefficients to fit
         # a new overlap
@@ -160,7 +159,7 @@ class Extrapolator:
             if self.options["fitting"] == "polynomialregression":
                 raise ValueError("The option polynomial regression needs the overlap")
             overlaps = self.overlaps.get(n)
-            overlap = self.fitting_calculator.linear_combination(overlaps, fit_coefficients)
+            overlap = self.fitting_calculator.extrapolate(descriptor, vectors=overlaps)
             inverse_sqrt_overlap = self._inverse_sqrt_overlap(overlap)
         else:
             inverse_sqrt_overlap = self._inverse_sqrt_overlap(overlap)
@@ -174,11 +173,11 @@ class Extrapolator:
         count = self.descriptors.count
         n = min(self.options["nsteps"], count)
         if "ref" in self.fitting_calculator.options:
-            ref = self.fitting_calculator.options["ref"]
+            index = self.fitting_calculator.options["ref"]
+            coefficients = self.coefficients.get(n)
+            return coefficients[index]
         else:
-            ref = 0
-        coefficients = self.coefficients.get(n)
-        return coefficients[ref]
+            return self.tangent
 
     def _crop_coeff(self, coeff) -> np.ndarray:
         """Crop the coefficient matrix to remove the virtual orbitals."""
@@ -218,8 +217,3 @@ class Extrapolator:
     def _compute_descriptor(self, coords) -> np.ndarray:
         """Given a set of coordinates compute the corresponding descriptor."""
         return self.descriptor_calculator.compute(coords)
-
-    def _fit(self, prev_descriptors, descriptor) -> np.ndarray:
-        """Fit the current descriptor using previous descriptors and
-        the specified fitting scheme."""
-        return self.fitting_calculator.fit(prev_descriptors, descriptor)