diff --git a/CHANGELOG.md b/CHANGELOG.md index f985f260..5276cdeb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -2,6 +2,17 @@ This document describes the changes made to the project. +## 1.2.0 (August 14, 2024): + +- Added the resetAll option in Set_Iter() to simplify the update process after iteration activation. +- Enhanced clarity in phase field functions, including both simulation and material aspects. +- Improved display options for geometric objects. +- Improved display functions. +- Provided clearer functionality in mesh and group element. +- Updated the interface with Gmsh. +- Improved phasefield examples. +- Enhanced the Digital Image Correlation (DIC) analysis module. + ## 1.1.0 (June 29, 2024): - Updated gmsh interface functions. diff --git a/EasyFEA/Geoms.py b/EasyFEA/Geoms.py index 19d09369..49d752c8 100644 --- a/EasyFEA/Geoms.py +++ b/EasyFEA/Geoms.py @@ -293,7 +293,7 @@ def Symmetry(self, point=(0,0,0), n=(1,0,0)) -> None: dec = newCoord - oldCoord [point.Translate(*dec[p]) for p, point in enumerate(self.points)] - def Plot(self, ax: plt.Axes=None, color:str="", name:str="", plotPoints=True) -> plt.Axes: + def Plot(self, ax: plt.Axes=None, color:str="", name:str="", lw=None, ls=None, plotPoints=True) -> plt.Axes: from EasyFEA import Display @@ -304,9 +304,9 @@ def Plot(self, ax: plt.Axes=None, color:str="", name:str="", plotPoints=True) -> lines, points = self.Get_coord_for_plot() if color != "": - ax.plot(*lines[:,:inDim].T, color=color, label=self.name) + ax.plot(*lines[:,:inDim].T, color=color, label=self.name, lw=lw, ls=ls) else: - ax.plot(*lines[:,:inDim].T, label=self.name) + ax.plot(*lines[:,:inDim].T, label=self.name, lw=lw, ls=ls) if plotPoints: ax.plot(*points[:,:inDim].T, ls='', marker='.',c='black') diff --git a/EasyFEA/fem/_gmsh_interface.py b/EasyFEA/fem/_gmsh_interface.py index 3a9c09d1..bef3b226 100644 --- a/EasyFEA/fem/_gmsh_interface.py +++ b/EasyFEA/fem/_gmsh_interface.py @@ -1035,10 +1035,14 @@ def Mesh_2D(self, contour: _Geom, inclusions: list[_Geom]=[], elemType=ElemType. # Crack creation crackLines, crackSurfaces, openPoints, openLines = self._Cracks_SetPhysicalGroups(cracks, entities2D) - if (len(cracks) > 0 and 'QUAD' in elemType) or len(additionalSurfaces) > 0: - # dont delete - surfaceTags = [s[1] for s in gmsh.model.getEntities(2)] - self._OrganiseSurfaces(surfaceTags, elemType, isOrganised) + # get created surfaces + surfaces = [entity[1] for entity in factory.getEntities(2)] + self._OrganiseSurfaces(surfaces, elemType, isOrganised) + # if (len(cracks) > 0 and 'QUAD' in elemType) or len(additionalSurfaces) > 0: + # # dont delete + # self._Synchronize() + # surfaceTags = [s[1] for s in gmsh.model.getEntities(2)] + # self._OrganiseSurfaces(surfaceTags, elemType, isOrganised) self._Refine_Mesh(refineGeoms, contour.meshSize) @@ -1721,6 +1725,9 @@ def types(elemType: str): dict_results: dict[str, list[np.ndarray]] = {result: [] for result in results} + # activates the first iteration + simu.Set_Iter(0, resetAll=True) + for i in range(simu.Niter): simu.Set_Iter(i) [dict_results[result].append(reshape(simu.Result(result))) for result in results] diff --git a/EasyFEA/fem/_group_elems.py b/EasyFEA/fem/_group_elems.py index ed814c2a..3c17a8cd 100644 --- a/EasyFEA/fem/_group_elems.py +++ b/EasyFEA/fem/_group_elems.py @@ -152,7 +152,7 @@ def coordGlob(self) -> np.ndarray: @coordGlob.setter def coordGlob(self, coord: np.ndarray) -> None: if coord.shape == self.__coordGlob.shape: - self.__coordGlob = coord + self.__coordGlob = coord self._InitMatrix() @property @@ -308,7 +308,7 @@ def Get_dN_e_pg(self, matrixType: MatrixType) -> np.ndarray: dN_pg = self.Get_dN_pg(matrixType) # Derivation of shape functions in the real base - dN_e_pg: np.ndarray = np.einsum('epik,pkj->epij', invF_e_pg, dN_pg, optimize='optimal') + dN_e_pg: np.ndarray = np.einsum('epdk,pkn->epdn', invF_e_pg, dN_pg, optimize='optimal') self.__dict_dN_e_pg[matrixType] = dN_e_pg return self.__dict_dN_e_pg[matrixType].copy() @@ -329,7 +329,7 @@ def Get_ddN_e_pg(self, matrixType: MatrixType) -> np.ndarray: ddN_pg = self.Get_ddN_pg(matrixType) - ddN_e_pg = np.array(np.einsum('epik,pkj->epij', invF_e_pg, ddN_pg, optimize='optimal')) + ddN_e_pg = np.array(np.einsum('epdk,pkn->epdn', invF_e_pg, ddN_pg, optimize='optimal')) self.__dict_ddN_e_pg[matrixType] = ddN_e_pg return self.__dict_ddN_e_pg[matrixType].copy() @@ -345,11 +345,7 @@ def Get_Nv_e_pg(self) -> np.ndarray: invF_e_pg = self.Get_invF_e_pg(matrixType) Nv_pg = self.Get_Nv_pg(matrixType) - - Ne = self.Ne nPe = self.nPe - jacobian_e_pg = self.Get_jacobian_e_pg(matrixType) - pg = self.Get_gauss(matrixType) Nv_e_pg = invF_e_pg @ Nv_pg @@ -372,11 +368,7 @@ def Get_dNv_e_pg(self) -> np.ndarray: invF_e_pg = self.Get_invF_e_pg(matrixType) dNv_pg = self.Get_dNv_pg(matrixType) - - Ne = self.Ne nPe = self.nPe - jacobian_e_pg = self.Get_jacobian_e_pg(matrixType) - pg = self.Get_gauss(matrixType) dNv_e_pg = invF_e_pg @ dNv_pg @@ -398,12 +390,8 @@ def Get_ddNv_e_pg(self) -> np.ndarray: matrixType = MatrixType.beam invF_e_pg = self.Get_invF_e_pg(matrixType) - ddNv_pg = self.Get_ddNv_pg(matrixType) - - Ne = self.Ne + ddNv_pg = self.Get_ddNv_pg(matrixType) nPe = self.nPe - jacobian_e_pg = self.Get_jacobian_e_pg(matrixType) - pg = self.Get_gauss(matrixType) ddNv_e_pg = invF_e_pg @ invF_e_pg @ ddNv_pg @@ -490,7 +478,7 @@ def Get_leftDispPart(self, matrixType: MatrixType) -> np.ndarray: def Get_ReactionPart_e_pg(self, matrixType: MatrixType) -> np.ndarray: """Returns the part that builds the reaction term (scalar). - ReactionPart_e_pg = jacobian_e_pg * weight_pg * r_e_pg * N_pg' * N_pg\n + ReactionPart_e_pg = r_e_pg * jacobian_e_pg * weight_pg * N_pg' * N_pg\n Returns -> jacobian_e_pg * weight_pg * N_pg' * N_pg """ @@ -509,24 +497,22 @@ def Get_ReactionPart_e_pg(self, matrixType: MatrixType) -> np.ndarray: return self.__dict_phaseField_ReactionPart_e_pg[matrixType].copy() - def Get_DiffusePart_e_pg(self, matrixType: MatrixType, A: np.ndarray) -> np.ndarray: + def Get_DiffusePart_e_pg(self, matrixType: MatrixType) -> np.ndarray: """Returns the part that builds the diffusion term (scalar). - DiffusePart_e_pg = jacobian_e_pg * weight_pg * k * dN_e_pg' * A * dN_e_pg\n + DiffusePart_e_pg = k_e_pg * jacobian_e_pg * weight_pg * dN_e_pg' * A * dN_e_pg\n - Returns -> jacobian_e_pg * weight_pg * dN_e_pg' * A * dN_e_pg + Returns -> jacobian_e_pg * weight_pg * dN_e_pg' """ assert matrixType in MatrixType.Get_types() if matrixType not in self.__dict_DiffusePart_e_pg.keys(): - assert len(A.shape) == 2, "A must be a 2D array." - jacobien_e_pg = self.Get_jacobian_e_pg(matrixType) weight_pg = self.Get_gauss(matrixType).weights dN_e_pg = self.Get_dN_e_pg(matrixType) - DiffusePart_e_pg = np.einsum('ep,p,epki,kl,eplj->epij', jacobien_e_pg, weight_pg, dN_e_pg, A, dN_e_pg, optimize='optimal') + DiffusePart_e_pg = np.einsum('ep,p,epij->epji', jacobien_e_pg, weight_pg, dN_e_pg, optimize='optimal') self.__dict_DiffusePart_e_pg[matrixType] = DiffusePart_e_pg @@ -534,7 +520,7 @@ def Get_DiffusePart_e_pg(self, matrixType: MatrixType, A: np.ndarray) -> np.ndar def Get_SourcePart_e_pg(self, matrixType: MatrixType) -> np.ndarray: """Returns the part that builds the source term (scalar). - SourcePart_e_pg = jacobian_e_pg, weight_pg, f_e_pg, N_pg'\n + SourcePart_e_pg = f_e_pg * jacobian_e_pg, weight_pg, N_pg'\n Returns -> jacobian_e_pg, weight_pg, N_pg' """ @@ -1232,7 +1218,7 @@ def Get_Nodes_Line(self, line: Line) -> np.ndarray: def Get_Nodes_Domain(self, domain: Domain) -> np.ndarray: """Returns nodes in the domain.""" - assert isinstance(circle, Domain) + assert isinstance(domain, Domain) coordo = self.coord @@ -1442,7 +1428,7 @@ def Get_pointsInElem(self, coordinates: np.ndarray, elem: int) -> np.ndarray: indexReord = np.append(np.arange(1, nPe), 0) # Vectors i for edge segments vect_i_b = coordo[connectMesh[indexReord]] - coordo[connectMesh] - # vect_i_b = np.einsum("ni,n->ni", vect_i_b, 1/np.linalg.norm(vect_i_b, axis=1), optimize="optimal") + vect_i_b = np.einsum("ni,n->ni", vect_i_b, 1/np.linalg.norm(vect_i_b, axis=1), optimize="optimal") # normal vector to element face vect_n = np.cross(vect_i_b[0], -vect_i_b[-1]) @@ -1564,20 +1550,19 @@ def __Get_Mapping(self, coordinates_n: np.ndarray, elements_e: np.ndarray, needC # get groupElem datas inDim = self.inDim sysCoord_e = self.sysCoord_e # base change matrix for each element - matrixType = MatrixType.rigi + matrixType = MatrixType.mass jacobian_e_pg = self.Get_jacobian_e_pg(matrixType, absoluteValues=False) invF_e_pg = self.Get_invF_e_pg(matrixType) dN_tild = self._dNtild() - nPg = invF_e_pg.shape[1] - gaussCoord_e_pg = self.Get_GaussCoordinates_e_p(matrixType) xiOrigin = self.origin # origin of the reference element (xi, eta) - useIterative = False - # # Check whether iterative resolution is required - # # calculates the ratio between jacob min and max to detect if the element is distorted - # diff_e = jacobian_e_pg.max(1) * 1/jacobian_e_pg.min(1) - # error_e = 1 - diff_e # a perfect element has an error max <= 1e-12 - # # a distorted element has a max error greater than 0 + # useIterative = False + # Check whether iterative resolution is required + # calculates the ratio between jacob min and max to detect if the element is distorted + diff_e = jacobian_e_pg.max(1) * 1/jacobian_e_pg.min(1) + error_e = np.abs(1 - diff_e) # a perfect element has an error max <= 1e-12 + # a distorted element has a max error greater than 0 + useIterative_e = error_e > 1e-12 # useIterative = np.max(error_e) > 1e-12 else: coordInElem_n = None @@ -1622,36 +1607,27 @@ def ResearchFunction(e: int): x0 = coordoElemBase[0,:dim] # orign of the real element (x,y) xPs = coordinatesBase_n[:,:dim] # points coordinates (x,y) - - # the fastest way, but may lead to shape functions that give values outside [0,1]. - xiP: np.ndarray = xiOrigin + (xPs - x0) @ np.mean(invF_e_pg[e], 0) - - # if not useIterative: - # if nPg == 1: - # # invF_e_pg is constant in the element - # xiP: np.ndarray = xiOrigin + (xPs - x0) @ invF_e_pg[e,0] - # else: - # # If the element have more than 1 integration point, it is necessary to choose the closest integration points. Because invF_e_pg is not constant in the element - # # for each node detected, we'll calculate its distance from all integration points and see where it's closest - # dist = np.zeros((xPs.shape[0], nPg)) - # for p in range(nPg): - # dist[:,p] = np.linalg.norm(xPs - gaussCoord_e_pg[e, p, :dim], axis=1) - # invMin = invF_e_pg[e, np.argmin(dist, axis=1)] - # xiP: np.ndarray = xiOrigin + (xPs - x0) @ invMin - # else: - # # Here we need to construct the Jacobian matrices. This is the longest method here - # def Eval(xi: np.ndarray, xP): - # dN = _GroupElem._Evaluates_Functions(dN_tild, xi.reshape(1, -1)) - # F = dN[0] @ coordoElemBase[:,:dim] # jacobian matrix - # J = x0 - xP + (xi - xiOrigin) @ F # cost function - # return J - - # xiP = [] - # for xP in xPs: - # res = least_squares(Eval, 0*xP, args=(xP,)) - # xiP.append(res.x) - - # xiP = np.array(xiP) + + if not useIterative_e[e]: + # the fastest way + # available only for undistorted meshes ! + # always valid for TRI3 and structured meshes. + xiP: np.ndarray = xiOrigin + (xPs - x0) @ invF_e_pg[e,0] + + else: + # Here we need to construct the Jacobian matrices. This is the longest method here + def Eval(xi: np.ndarray, xP): + dN = _GroupElem._Evaluates_Functions(dN_tild, xi.reshape(1, -1)) + F = dN[0] @ coordoElemBase[:,:dim] # jacobian matrix + J = x0 + (xi - xiOrigin) @ F - xP # cost function + return J + + xiP = [] + for xP in xPs: + res = least_squares(Eval, 0*xP, args=(xP,)) + xiP.append(res.x) + + xiP = np.array(xiP) coordInElem_n[nodesInElement,:] = xiP.copy() diff --git a/EasyFEA/fem/_mesh.py b/EasyFEA/fem/_mesh.py index e0d9f1fc..5e746258 100644 --- a/EasyFEA/fem/_mesh.py +++ b/EasyFEA/fem/_mesh.py @@ -157,7 +157,7 @@ def Translate(self, dx: float=0.0, dy: float=0.0, dz: float=0.0) -> None: oldCoord = self.coordGlob newCoord = oldCoord + np.array([dx, dy, dz]) for grp in self.dict_groupElem.values(): - grp.coordGlob = newCoord + grp.coordGlob = newCoord self._Notify('The mesh has been modified') @@ -429,23 +429,23 @@ def Get_leftDispPart(self, matrixType: MatrixType) -> np.ndarray: def Get_ReactionPart_e_pg(self, matrixType: MatrixType) -> np.ndarray: """Returns the part that builds the reaction term (scalar). - ReactionPart_e_pg = jacobian_e_pg * weight_pg * r_e_pg * N_pg' * N_pg\n + ReactionPart_e_pg = r_e_pg * jacobian_e_pg * weight_pg * N_pg' * N_pg\n Returns -> jacobian_e_pg * weight_pg * N_pg' * N_pg """ return self.groupElem.Get_ReactionPart_e_pg(matrixType) - def Get_DiffusePart_e_pg(self, matrixType: MatrixType, A: np.ndarray) -> np.ndarray: + def Get_DiffusePart_e_pg(self, matrixType: MatrixType) -> np.ndarray: """Returns the part that builds the diffusion term (scalar). - DiffusePart_e_pg = jacobian_e_pg * weight_pg * k * dN_e_pg' * A * dN_e_pg\n + DiffusePart_e_pg = k_e_pg * jacobian_e_pg * weight_pg * dN_e_pg' * A * dN_e_pg\n - Returns -> jacobian_e_pg * weight_pg * dN_e_pg' * A * dN_e_pg + Returns -> jacobian_e_pg * weight_pg * dN_e_pg' """ - return self.groupElem.Get_DiffusePart_e_pg(matrixType, A) + return self.groupElem.Get_DiffusePart_e_pg(matrixType) def Get_SourcePart_e_pg(self, matrixType: MatrixType) -> np.ndarray: """Returns the part that builds the source term (scalar). - SourcePart_e_pg = jacobian_e_pg, weight_pg, f_e_pg, N_pg'\n + SourcePart_e_pg = f_e_pg * jacobian_e_pg, weight_pg, N_pg'\n Returns -> jacobian_e_pg, weight_pg, N_pg' """ @@ -875,6 +875,10 @@ def Calc_projector(oldMesh: Mesh, newMesh: Mesh) -> sp.csr_matrix: assert oldMesh.dim == newMesh.dim, "Mesh dimensions must be the same." tic = Tic() + + distoredMesh = np.max(np.abs(1 - oldMesh.Get_Quality('jacobian'))) > 1e-12 + if distoredMesh: + Display.MyPrintError("Warning: distorted elements have been detected in the mesh.\nThey may lead to projection errors!") detectedNodes, detectedElements_e, connect_e_n, coordo_n = oldMesh.groupElem.Get_Mapping(newMesh.coord) # detectedNodes (size(connect_e_n)) are the nodes detected in detectedElements_e diff --git a/EasyFEA/materials/_elastic.py b/EasyFEA/materials/_elastic.py index cd23451b..0f4ce12a 100644 --- a/EasyFEA/materials/_elastic.py +++ b/EasyFEA/materials/_elastic.py @@ -560,8 +560,9 @@ def _Behavior(self, dim: int=None, P: np.ndarray=None): material_cM = Heterogeneous_Array(material_cM) - # # Verify that C = S^-1# - # assert np.linalg.norm(material_sM - np.linalg.inv(material_cM)) < 1e-10 + # # Verify that S = C^-1 + # assert np.linalg.norm(material_sM - np.linalg.inv(material_cM)) < 1e-10 + # # Verify that C = S^-1 # assert np.linalg.norm(material_cM - np.linalg.inv(material_sM)) < 1e-10 # Performs a base change to orient the material in space diff --git a/EasyFEA/materials/_phasefield.py b/EasyFEA/materials/_phasefield.py index 98c53d34..5f65c680 100644 --- a/EasyFEA/materials/_phasefield.py +++ b/EasyFEA/materials/_phasefield.py @@ -435,13 +435,13 @@ def __Split_Amor(self, Epsilon_e_pg: np.ndarray): dim = material.dim if dim == 2: - Ivect = np.array([1,1,0]).reshape((3,1)) + I = np.array([1,1,0]).reshape((3,1)) size = 3 else: - Ivect = np.array([1,1,1,0,0,0]).reshape((6,1)) + I = np.array([1,1,1,0,0,0]).reshape((6,1)) size = 6 - IxI = np.array(Ivect.dot(Ivect.T)) + IxI = I @ I.T spherP_e_pg = np.einsum('ep,ij->epij', Rp_e_pg, IxI, optimize='optimal') spherM_e_pg = np.einsum('ep,ij->epij', Rm_e_pg, IxI, optimize='optimal') @@ -514,7 +514,7 @@ def __Split_Miehe(self, Epsilon_e_pg: np.ndarray, verif=False): I = np.array([1,1,0]).reshape((3,1)) elif dim == 3: I = np.array([1,1,1,0,0,0]).reshape((6,1)) - IxI = I.dot(I.T) + IxI = I @ I.T # Calculation of spherical part spherP_e_pg = np.einsum('ep,ij->epij', Rp_e_pg, IxI, optimize='optimal') @@ -739,11 +739,12 @@ def __Split_He(self, Epsilon_e_pg: np.ndarray, verif=False): assert not material.isHeterogeneous, "He decomposition has not been implemented for heterogeneous materials" # for heterogeneous materials how to make sqrtm ? + # TODO obtain the eigenvalues and eigenvectors of the 2D case with the 3D spectral decomposition? sqrtC = sqrtm(C) if verif : # Verif C^1/2 * C^1/2 = C - testC = np.dot(sqrtC, sqrtC) - C + testC = sqrtC @ sqrtC - C assert np.linalg.norm(testC)/np.linalg.norm(C) < 1e-12 inv_sqrtC = np.linalg.inv(sqrtC) @@ -769,8 +770,14 @@ def __Split_He(self, Epsilon_e_pg: np.ndarray, verif=False): # cM_e_pg = np.einsum('epji,jk,epkl->epil', projM_e_pg, C, projM_e_pg, optimize='optimal') # faster - cP_e_pg = np.transpose(projP_e_pg, (0,1,3,2)) @ C @ projP_e_pg - cM_e_pg = np.transpose(projM_e_pg, (0,1,3,2)) @ C @ projM_e_pg + # cP_e_pg = np.transpose(projP_e_pg, (0,1,3,2)) @ C @ projP_e_pg + # cM_e_pg = np.transpose(projM_e_pg, (0,1,3,2)) @ C @ projM_e_pg + + cP_e_pg = C @ projP_e_pg + cM_e_pg = C @ projM_e_pg + + # cP_e_pg = projP_e_pg @ C + # cM_e_pg = projM_e_pg @ C tic.Tac("Split",f"cP_e_pg and cM_e_pg", False) @@ -1269,18 +1276,20 @@ def __Construction_Gij(Ma, Mb): # Decomposition vector_e_pg = vectorP_e_pg + vectorM_e_pg decomp = vector_e_pg-(vectorP + vectorM) - if np.linalg.norm(vector_e_pg) > 0: - verifDecomp = np.linalg.norm(decomp)/np.linalg.norm(vector_e_pg) - assert verifDecomp <= 1e-12, "vector_e_pg != vectorP_e_pg + vectorM_e_pg" + if np.linalg.norm(vector_e_pg) > 0: + # verif_decomp = np.linalg.norm(decomp,axis=-1)/np.linalg.norm(vector_e_pg,axis=-1) + # assert np.max(verif_decomp) <= 1e-11, "vector_e_pg != vectorP_e_pg + vectorM_e_pg" + verif_decomp = np.linalg.norm(decomp)/np.linalg.norm(vector_e_pg) + assert verif_decomp <= 1e-12, "vector_e_pg != vectorP_e_pg + vectorM_e_pg" # Orthogonality - ortho_vP_vM = np.abs(np.einsum('epi,epi->ep',vectorP, vectorM, optimize='optimal')) - ortho_vM_vP = np.abs(np.einsum('epi,epi->ep',vectorM, vectorP, optimize='optimal')) + ortho_vP_vM = np.abs(np.einsum('epi,epi->ep', vectorP, vectorM, optimize='optimal')) + ortho_vM_vP = np.abs(np.einsum('epi,epi->ep', vectorM, vectorP, optimize='optimal')) ortho_v_v = np.abs(np.einsum('epi,epi->ep', vector_e_pg, vector_e_pg, optimize='optimal')) if ortho_v_v.min() > 0: - vertifOrthoEpsPM = np.max(ortho_vP_vM/ortho_v_v) - assert vertifOrthoEpsPM <= 1e-12 - vertifOrthoEpsMP = np.max(ortho_vM_vP/ortho_v_v) - assert vertifOrthoEpsMP <= 1e-12 + verif_PM = np.max(ortho_vP_vM/ortho_v_v) + assert verif_PM <= 1e-12 + verif_MP = np.max(ortho_vM_vP/ortho_v_v) + assert verif_MP <= 1e-12 return projP, projM \ No newline at end of file diff --git a/EasyFEA/simulations/Solvers.py b/EasyFEA/simulations/Solvers.py index 9070e17e..db6cf6a9 100644 --- a/EasyFEA/simulations/Solvers.py +++ b/EasyFEA/simulations/Solvers.py @@ -408,6 +408,9 @@ def _PETSc(A: sparse.csr_matrix, b: sparse.csr_matrix, x0: np.ndarray, kspType=' # __comm = MPI.COMM_WORLD # nprocs = __comm.Get_size() # rank = __comm.Get_rank() + + # TODO add bound constrain + # https://petsc.org/release/petsc4py/reference/petsc4py.PETSc.SNES.html ? __comm = None petsc4py.init(sys.argv, comm=__comm) diff --git a/EasyFEA/simulations/_beam.py b/EasyFEA/simulations/_beam.py index b1053cff..eeeed038 100644 --- a/EasyFEA/simulations/_beam.py +++ b/EasyFEA/simulations/_beam.py @@ -542,7 +542,7 @@ def Save_Iter(self): self._results.append(iter) - def Set_Iter(self, iter=-1) -> list[dict]: + def Set_Iter(self, iter: int=-1, resetAll=False) -> dict: results = super().Set_Iter(iter) diff --git a/EasyFEA/simulations/_dic.py b/EasyFEA/simulations/_dic.py index 8a2f3c94..03b5bfeb 100644 --- a/EasyFEA/simulations/_dic.py +++ b/EasyFEA/simulations/_dic.py @@ -11,96 +11,160 @@ import cv2 # need opencv-python library # utilities -from ..utilities import Tic, Folder +from ..utilities import Tic, Folder, Display +from ..utilities._observers import Observable, _IObserver # fem from ..fem import Mesh, BoundaryCondition -class DIC: +class DIC(_IObserver): - def __init__(self, mesh: Mesh, idxImgRef: int, imgRef: np.ndarray, - forces: np.ndarray=None, displacements: np.ndarray=None, lr=0.0, verbosity=False): + def __init__(self, mesh: Mesh, idxImgRef: int, imgRef: np.ndarray, lr=0.0, + forces: np.ndarray=None, displacements: np.ndarray=None, verbosity=False): """Creates a DIC analysis. Parameters ---------- mesh : Mesh - ROI mesh + Mesh used to construct the roi idxImgRef : int index of reference image in forces imgRef : np.ndarray reference image + lr : float, optional + regularization length, by default 0.0 forces : np.ndarray, optional force vectors, by default None displacements : np.ndarray, optional displacement vectors, by default None - lr : float, optional - regularization length, by default 0.0 verbosity : bool, optional analysis can write to console, by default False Returns ------- - AnalyseDiC - Object for image correlation + DIC + DIC object """ + # mesh + assert mesh.dim == 2, "Must be a 2D mesh." + mesh._Add_observer(self) + self.__mesh = mesh + + # images + self.__idxImgRef: int = idxImgRef + assert isinstance(imgRef, np.ndarray), "Must be a numpy array." + self.__imgRef = imgRef + + # solutions self._forces = forces """forces measured during the tests.""" - self._displacements = displacements """displacements measured during the tests.""" + + # results + self.__list_idx: list[int] = [] + self.__list_u: list[np.ndarray] = [] + self.__list_img: list[np.ndarray] = [] + + self._verbosity: bool = verbosity - self._mesh: Mesh = mesh - """pixel-based mesh used for image correlation.""" - assert mesh.dim == 2, "Must be a 2D mesh." - - self._meshCoef = None - """scaled mesh.""" - self._coef = 1.0 - """scaling coef (image scale [mm/px]).""" - - self.__Nn: int = mesh.Nn - self.__dim: int = mesh.dim - self.__nDof: int = self.__Nn * self.__dim - self.__ldic: float = self.__Get_ldic() + # initialize ROI and shape functions and shape function derivatives + self.__init__roi() + self.__init__Phi_opLap() + + # regul + self._lr = lr + # Updating self._lr will automatically update the matrices + # That's why we can comment on the following line + # self.Compute_L_M(imgRef) - self._idxImgRef: int = idxImgRef - """Reference image index in _loads.""" + # mesh properties - self._imgRef: np.ndarray = imgRef + @property + def mesh(self) -> Mesh: + """pixel-based mesh used for image correlation.""" + return self.__mesh + + @property + def ldic(self) -> float: + """8 * mean(meshSize)""" + return self.Get_ldic() + + def Get_scaled_mesh(self, imgScale=1.0) -> Mesh: + assert imgScale != 0.0 + meshC = self.__mesh.copy() + [meshC._Remove_observer(observer) for observer in meshC.observers.copy()] + meshC.coordGlob = meshC.coordGlob * imgScale + return meshC + + # image properties + @property + def idxImgRef(self) -> int: + """Reference image index in _loads (or in the folder).""" + return self.__idxImgRef + + @property + def imgRef(self) -> np.ndarray: """Image used as reference.""" + return self.__imgRef.copy() + + @property + def shape(self) -> tuple[int, int]: + """Image shapes required""" + return self.__imgRef.shape - self.__shapeImages: tuple[int, int] = imgRef.shape - """Shape of images to be used for analysis.""" - - self._list_u_exp: list[np.ndarray] = [] - """List containing calculated displacement fields.""" - - self._list_idx_exp: list[int] = [] - """List containing indexes for which the displacement field has been calculated.""" - - self._list_img_exp: list[np.ndarray] = [] - """List containing images for which the displacement field has been calculated.""" + # regularisation properties - self.__lr: float = lr + @property + def _lr(self) -> float: """regulation length.""" + return self.__lr + + @_lr.setter + def _lr(self, value: float) -> None: + """# Warning!\n + Changing this parameter will automatically update the matrices with the Compute_L_M function!""" + assert value >= 0.0, "lr must be >= 0.0" + self.__lr = value + self._Compute_L_M(self.__imgRef) + + @property + def alpha(self) -> float: + if self._lr == 0.0: + return 0 + else: + return (self.ldic/self._lr)**2 - self._verbosity: bool = verbosity - - # initialize ROI and shape functions and shape function derivatives - self.__init__roi() - - self.__init__Phi_opLap() - - self.Compute_L_M(imgRef) + # solution properties + + @property + def list_idx(self) -> list[int]: + """Copy of the list containing indexes for which the displacement field has been calculated.""" + return self.__list_idx.copy() + + @property + def list_u(self) -> list[np.ndarray]: + """Copy of the list containing the calculated displacement fields.""" + return self.__list_u.copy() + + @property + def list_img(self) -> list[np.ndarray]: + """Copy of the list containing images for which the displacement field has been calculated.""" + return self.__list_img.copy() + + def _Update(self, observable: Observable, event: str) -> None: + if isinstance(observable, Mesh): + raise Exception("The current implementation does not allow you to make any modifications to the mesh.") + else: + Display.MyPrintError("Notification not yet implemented") def __init__roi(self) -> None: """ROI initialization.""" tic = Tic() - imgRef = self._imgRef - mesh = self._mesh + imgRef = self.__imgRef + mesh = self.__mesh # recovery of pixel coordinates coordPx = np.arange(imgRef.shape[1]).reshape((1,-1)).repeat(imgRef.shape[0], 0).ravel() @@ -109,6 +173,7 @@ def __init__roi(self) -> None: # recovery of pixels used in elements with their coordinates pixels, __, connectPixel, coordPixelInElem = mesh.groupElem.Get_Mapping(coordPixel) + # mean_pixels = np.mean([connectPixel[e].size for e in range(mesh.Ne)])s self.__connectPixel: np.ndarray = connectPixel """connectivity matrix which links the pixels used for each element.""" @@ -116,47 +181,56 @@ def __init__roi(self) -> None: """pixel coordinates in the reference element.""" # ROI creation - self._roi: np.ndarray = np.zeros(coordPx.shape[0]) - self._roi[pixels] = 1 - self._roi = np.asarray(self._roi == 1, dtype=bool) - """vector filter for accessing the pixels contained in the mesh.""" - self._ROI: np.ndarray = self._roi.reshape(self.__shapeImages) - """matrix filter for accessing the pixels contained in the mesh.""" + roi: np.ndarray = np.zeros(coordPx.shape[0]) + roi[pixels] = 1 + self.__roi = np.asarray(roi == 1, dtype=bool) tic.Tac("DIC", "ROI", self._verbosity) + @property + def roi(self) -> np.ndarray[bool]: + """roi as a vector.""" + return self.__roi.copy() + + @property + def ROI(self) -> np.ndarray[bool]: + """roi as a matrix.""" + return self.roi.reshape(self.shape) + def __init__Phi_opLap(self) -> None: """Initializing shape functions and the Laplacian operator.""" - mesh = self._mesh - dim = self.__dim - nDof = self.__nDof + mesh = self.__mesh + dim = 2 + Ndof = self.__mesh.Nn * dim connectPixel = self.__connectPixel coordInElem = self.__coordPixelInElem # Initializing shape functions and the Laplacian operator - matrixType="mass" + matrixType = "mass" Ntild = mesh.groupElem._Ntild() - dN_pg = mesh.groupElem.Get_dN_pg(matrixType) - invF_e_pg = mesh.groupElem.Get_invF_e_pg(matrixType) - jacobien_e_pg = mesh.Get_jacobian_e_pg(matrixType) - poid_pg = mesh.Get_weight_pg(matrixType) + jacobian_e_pg = mesh.Get_jacobian_e_pg(matrixType) # (e, p) + weight_pg = mesh.Get_weight_pg(matrixType) # (p) + dN_e_pg = mesh.groupElem.Get_dN_e_pg(matrixType) # (e, p, dim, nPe) # ---------------------------------------------- - # Construction of shape function matrix for pixels + # Construction of shape function matrix for pixels (N) # ---------------------------------------------- lines_x = [] lines_y = [] columns_Phi = [] values_phi = [] - # Evaluation of shape functions for the pixels used - phi_n_pixels = np.array([np.reshape([Ntild[n,0](coordInElem[:,0], coordInElem[:,1])], -1) for n in range(mesh.nPe)]) + # Evaluating shape functions for the pixels used + x_p, y_p = coordInElem[:,0], coordInElem[:,1] + phi_n_pixels = np.array([np.reshape([Ntild[n,0](x_p, y_p)], -1) for n in range(mesh.nPe)]) tic = Tic() - - # TODO possible without the loop? + + # Possible without the loop? + # No, it is not possible without the loop because connectPixel doesn't have the same number of columns in each row. + # In addition, if you remove it, you'll have to make several list comprehension. for e in range(mesh.Ne): # Retrieve element nodes and pixels @@ -167,176 +241,156 @@ def __init__Phi_opLap(self) -> None: # line construction linesX = BoundaryCondition.Get_dofs_nodes(["x","y"], nodes, ["x"]).reshape(-1,1).repeat(pixels.size) - linesY = BoundaryCondition.Get_dofs_nodes(["x","y"], nodes, ["y"]).reshape(-1,1).repeat(pixels.size) + # linesY = BoundaryCondition.Get_dofs_nodes(["x","y"], nodes, ["y"]).reshape(-1,1).repeat(pixels.size) + # same as + linesY = linesX + 1 # construction of columns in which to place values - colonnes = pixels.reshape(1,-1).repeat(mesh.nPe, 0).ravel() + colonnes = pixels.reshape(1,-1).repeat(mesh.nPe, 0).ravel() lines_x.extend(linesX) lines_y.extend(linesY) columns_Phi.extend(colonnes) - values_phi.extend(np.reshape(phi, -1)) + values_phi.extend(np.reshape(phi, -1)) + + self._N_x = sparse.csr_matrix((values_phi, (lines_x, columns_Phi)), (Ndof, coordInElem.shape[0])) + """Shape function matrix x (Ndof, nPixels)""" + self._N_y = sparse.csr_matrix((values_phi, (lines_y, columns_Phi)), (Ndof, coordInElem.shape[0])) + """Shape function matrix y (Ndof, nPixels)""" - self._Phi_x = sparse.csr_matrix((values_phi, (lines_x, columns_Phi)), (nDof, coordInElem.shape[0])) - """Shape function matrix x (nDof, nPixels)""" - self._Phi_y = sparse.csr_matrix((values_phi, (lines_y, columns_Phi)), (nDof, coordInElem.shape[0])) - """Shape function matrix y (nDof, nPixels)""" + Op: sparse.csr_matrix = self._N_x @ self._N_x.T + self._N_y @ self._N_y.T - Op = self._Phi_x @ self._Phi_x.T + self._Phi_y @ self._Phi_y.T self.__Op_LU = splu(Op.tocsc()) tic.Tac("DIC", "Phi_x and Phi_y", self._verbosity) # ---------------------------------------------- - # Construction of the Laplacian operator + # Construction of the Laplacian operator (R) # ---------------------------------------------- - - dN_e_pg = np.array(np.einsum('epki,pkj->epij', invF_e_pg, dN_pg, optimize='optimal')) - - dNxdx = dN_e_pg[:,:,0] - dNydy = dN_e_pg[:,:,1] + dNdx = dN_e_pg[:,:,0] + dNdy = dN_e_pg[:,:,1] ind_x = np.arange(0, mesh.nPe*dim, dim) - ind_y = ind_x + 1 + ind_y = ind_x + 1 - dN_vector = np.zeros((dN_e_pg.shape[0], dN_e_pg.shape[1], 3, mesh.nPe*dim)) - dN_vector[:,:,0,ind_x] = dNxdx - dN_vector[:,:,1,ind_y] = dNydy - dN_vector[:,:,2,ind_x] = dNydy; dN_vector[:,:,2,ind_y] = dNxdx + dN_x = np.zeros((mesh.Ne, weight_pg.size, 2, 2*mesh.nPe)) + dN_y = np.zeros_like(dN_x) - B_e = np.einsum('ep,p,epji,epjk->eik', jacobien_e_pg, poid_pg, dN_vector, dN_vector, optimize='optimal') - - # Retrieve rows and columns or apply 0s - lignes0 = np.arange(mesh.nPe*dim).repeat(mesh.nPe) - ddlsX = np.arange(0, mesh.nPe*dim, dim) - colonnes0 = np.concatenate((ddlsX+1, ddlsX)).reshape(1,-1).repeat(mesh.nPe, axis=0).ravel() + dN_x[:,:,0,ind_x] = dNdx + dN_x[:,:,1,ind_y] = dNdx + Bx_e = np.einsum('ep,p,epdi,epdj->eij', jacobian_e_pg, weight_pg, dN_x, dN_x, optimize='optimal') - B_e[:,lignes0, colonnes0] = 0 + dN_y[:,:,0,ind_x] = dNdy + dN_y[:,:,1,ind_y] = dNdy + By_e = np.einsum('ep,p,epdi,epdj->eij', jacobian_e_pg, weight_pg, dN_y, dN_y, optimize='optimal') - linesB = mesh.linesVector_e - columnsB = mesh.columnsVector_e + B_e = Bx_e + By_e - self._opLap = sparse.csr_matrix((B_e.ravel(), (linesB.ravel(), columnsB.ravel())), (nDof, nDof)) - """Laplacian operator""" + lines = mesh.linesVector_e.ravel() + columns = mesh.columnsVector_e.ravel() - tic.Tac("DIC", "Laplacian operator", self._verbosity) + self._R = sparse.csr_matrix((B_e.ravel(), (lines, columns)), (Ndof, Ndof)) + """Laplacian operator""" - def __Get_ldic(self) -> float: - """Calculation ldic the characteristic length of the mesh, i.e. 8 x the average length of the edges of the elements.""" + tic.Tac("DIC", "Laplacian operator", self._verbosity) - indexReord = np.append(np.arange(1, self._mesh.nPe), 0) - coord = self._mesh.coord - connect = self._mesh.connect + def Get_ldic(self, coef=8) -> float: + """Get the characteristic length of the mesh, i.e. 8 x the average length of the edges of the elements.""" + assert coef > 0 # Calculation of average element size - bords_e_b_c = coord[connect[:,indexReord]] - coord[connect] # edge vectors - h_e_b = np.sqrt(np.sum(bords_e_b_c**2, 2)) # edge lengths - ldic = 8 * np.mean(h_e_b) - + ldic = 8 * self.__mesh.Get_meshSize(False).mean() + return ldic - def __Get_v(self) -> np.ndarray: + def Get_w(self) -> np.ndarray: """Returns characteristic sinusoidal displacement corresponding to element size.""" - ldic = self.__ldic + ldic = self.ldic - coordX = self._mesh.coord[:,0] - coordY = self._mesh.coord[:,1] + x_n = self.__mesh.coord[:,0] + y_n = self.__mesh.coord[:,1] - v = np.cos(2*np.pi*coordX/ldic) * np.sin(2*np.pi*coordY/ldic) + w = np.cos(2*np.pi*x_n/ldic) * np.sin(2*np.pi*y_n/ldic) - v = v.repeat(2) + w = w.repeat(2) - return v + return w - def Compute_L_M(self, img: np.ndarray, lr=None) -> None: - """Updating matrix to produce for DIC with TIKONOV.""" + def _Compute_L_M(self, img: np.ndarray) -> None: + """Updating DIC matrices.""" tic = Tic() - - if lr is None: - lr = self.__lr - else: - assert lr >= 0.0, "lr must be >= 0" - self.__lr = lr # Recover image gradient grid_Gradfy, grid_Gradfx = np.gradient(img) gradY = grid_Gradfy.ravel() gradX = grid_Gradfx.ravel() - roi = self._roi + roi = self.roi - self.L = self._Phi_x @ sparse.diags(gradX) + self._Phi_y @ sparse.diags(gradY) + self.L = self._N_x @ sparse.diags(gradX) + self._N_y @ sparse.diags(gradY) - self.M_Dic = self.L[:,roi] @ self.L[:,roi].T + self._M_dic: sparse.csr_matrix = self.L[:,roi] @ self.L[:,roi].T - v = self.__Get_v() # plane wave - - coef_M_Dic = v.T @ self.M_Dic @ v - coef_Op = v.T @ self._opLap @ v - - self.__coef_M_Dic = coef_M_Dic - self.__coef_opLap = coef_Op - - if lr == 0.0: - self.__alpha = 0 - else: - self.__alpha = (self.__ldic/lr)**2 - - self._M = self.M_Dic / coef_M_Dic + self.__alpha * self._opLap / coef_Op + w = self.Get_w() + # coefs + w_M = w.T @ self._M_dic @ w + w_R = w.T @ self._R @ w + self.__w_M = w_M + self.__w_R = w_R + + self._M_reg: sparse.csr_matrix = 1/w_M * self._M_dic + self.alpha/w_R * self._R - # self._M_LU = splu(self._M.tocsc(), permc_spec="MMD_AT_PLUS_A") - self._M_LU = splu(self._M.tocsc()) + # self._M_reg_LU = splu(self._M.tocsc(), permc_spec="MMD_AT_PLUS_A") + self._M_reg_LU = splu(self._M_reg.tocsc()) tic.Tac("DIC", "Construct L and M", self._verbosity) - def __Get_u_from_images(self, img1: np.ndarray, img2: np.ndarray) -> np.ndarray: + def _Get_u_from_images(self, img1: np.ndarray, img2: np.ndarray) -> np.ndarray: """Use open cv to calculate displacements between images.""" - DIS = cv2.DISOpticalFlow_create() + DIS = cv2.DISOpticalFlow_create() IMG1_uint8 = np.uint8(img1*2**(8-round(np.log2(img1.max())))) IMG2_uint8 = np.uint8(img2*2**(8-round(np.log2(img1.max())))) Flow = DIS.calc(IMG1_uint8,IMG2_uint8,None) - # Project these displacements onto the mesh nodes - mapx = Flow[:,:,0] - mapy = Flow[:,:,1] - - Phix = self._Phi_x - Phiy = self._Phi_y - - Op_LU = self.__Op_LU + # Project these displacements onto the pixels + ux_p = Flow[:,:,0] + uy_p = Flow[:,:,1] - b = Phix @ mapx.ravel() + Phiy @ mapy.ravel() + b = self._N_x @ ux_p.ravel() + self._N_y @ uy_p.ravel() - DofValues = Op_LU.solve(b) + u0 = self.__Op_LU.solve(b) - return DofValues + return u0 def __Test_img(self, img: np.ndarray) -> None: """Function to test whether the image is the right size.""" - assert img.shape == self.__shapeImages, f"The image entered is the wrong size. Must be {self.__shapeImages}" + assert img.shape == self.shape, f"Wrong shape, must be {self.shape}" def __Get_imgRef(self, imgRef) -> np.ndarray: """Function that returns the reference image or checks whether the image entered is the correct size.""" if imgRef is None: - imgRef = self._imgRef + imgRef = self.__imgRef else: - assert isinstance(imgRef, np.ndarray), "The reference image must be an numpy array." - assert imgRef.size == self._roi.size, f"The reference image entered is the wrong size. Must be {self.__shapeImages}" + assert isinstance(imgRef, np.ndarray), "Must be an numpy array." + assert imgRef.size == self.roi.size, f"Wrong shape, must be {self.shape}" return imgRef - def Solve(self, img: np.ndarray, iterMax=1000, tolConv=1e-6, imgRef=None, verbosity=True) -> np.ndarray: + def Solve(self, img: np.ndarray, u0: np.ndarray=None, iterMax=1000, tolConv=1e-6, imgRef=None, verbosity=True) -> np.ndarray: """Displacement field between the img and the imgRef. Parameters ---------- img : np.ndarray image used for calculation + u0 : np.ndarray, optional + initial displacement field, by default None\n + If u0 == None, the field is initialized with _Get_u_from_images(imgRef, img) iterMax : int, optional maximum number of iterations, by default 1000 tolConv : float, optional @@ -354,54 +408,63 @@ def Solve(self, img: np.ndarray, iterMax=1000, tolConv=1e-6, imgRef=None, verbos self.__Test_img(img) imgRef = self.__Get_imgRef(imgRef) - # initalization of displacement vector - u = self.__Get_u_from_images(imgRef, img) + + # initial displacement vector + if u0 is None: + u0 = self._Get_u_from_images(imgRef, img) + else: + assert u0.size == self.__mesh.Nn * 2, "u0 must be a vector of dimension (Nn*2, 1)" + u = u0.copy() # Recovery of image pixel coordinates gridX, gridY = np.meshgrid(np.arange(imgRef.shape[1]),np.arange(imgRef.shape[0])) coordX, coordY = gridX.ravel(), gridY.ravel() img_fct = interpolate.RectBivariateSpline(np.arange(img.shape[0]),np.arange(img.shape[1]),img) - roi = self._roi + roi = self.roi f = imgRef.ravel()[roi] # reference image as a vector and retrieving pixels in the roi # Here the small displacement hypothesis is used # The gradient of the two images is assumed to be identical # For large displacements, the matrices would have to be recalculated using Compute_L_M - opLapReg = self.__alpha * self._opLap / self.__coef_opLap # operator laplacian regularized - Lcoef = self.L[:,roi] / self.__coef_M_Dic + R_reg = self.alpha * self._R / self.__w_R + Lcoef = self.L[:,roi] / self.__w_M for iter in range(iterMax): - ux_p, uy_p = self.__Calc_pixelDisplacement(u) + ux_p, uy_p = self.Calc_pixelDisplacement(u) g = img_fct.ev((coordY + uy_p)[roi], (coordX + ux_p)[roi]) r = f - g - b = Lcoef @ r - opLapReg @ u - du = self._M_LU.solve(b) + b = Lcoef @ r - R_reg @ u + du = self._M_reg_LU.solve(b) u += du norm_b = np.linalg.norm(b) if verbosity: - print(f"Iter {iter+1:2d} ||b|| {norm_b:.3} ", end='\r') + print(f"Iter {iter+1:2d} ||b|| {norm_b:.1e} ", end='\r') - if norm_b <= tolConv: + # if iter == 0: + # b0 = norm_b.copy() + # if norm_b < b0*tolConv: + if norm_b < tolConv: break - if iter+1 > iterMax: + if iter+1 == iterMax: raise Exception("Image correlation analysis did not converge.") return u - def Residual(self, u: np.ndarray, img: np.ndarray, imgRef=None) -> np.ndarray: - """Residual calculation between images (f-g). + def Calc_r_dic(self, u: np.ndarray, img: np.ndarray, imgRef=None) -> np.ndarray: + """Residual correlation between images\n + f(x) - g(x + u) Parameters ---------- u : np.ndarray - displacement field + displacement field (Ndof) img : np.ndarray image used for calculation imgRef : np.ndarray, optional @@ -425,35 +488,22 @@ def Residual(self, u: np.ndarray, img: np.ndarray, imgRef=None) -> np.ndarray: f = imgRef.ravel() # reference image as a vector and retrieving pixels in the roi - ux_p, uy_p = self.__Calc_pixelDisplacement(u) + ux_p, uy_p = self.Calc_pixelDisplacement(u) g = img_fct.ev((coordY + uy_p), (coordX + ux_p)) r = f - g - r_dic = np.reshape(r, self.__shapeImages) + r_dic = np.reshape(r, self.shape) return r_dic - def Set_meshCoef_coef(self, mesh: Mesh, imgScale: float) -> None: - """Set mesh size and scaling factor - - Parameters - ---------- - mesh : Mesh - mesh - imgScale : float - scaling coefficient [mm/px] - """ - assert isinstance(mesh, Mesh) and mesh.dim == 2, "Must be a 2D mesh." - self._meshCoef = mesh - self._coef = imgScale - - - def __Calc_pixelDisplacement(self, u: np.ndarray) -> tuple[np.ndarray, np.ndarray]: - """Calculates pixel displacement from mesh node displacement using shape functions.""" + def Calc_pixelDisplacement(self, u: np.ndarray) -> tuple[np.ndarray, np.ndarray]: + """Calculates pixel displacement from mesh node displacement using shape functions.""" + + assert u.size == self.__mesh.Nn * 2, f"The displacement vector field is the wrong dimension. Must be of dimension {self.__mesh.Nn * 2}" - ux_p = u @ self._Phi_x - uy_p = u @ self._Phi_y + ux_p = u @ self._N_x + uy_p = u @ self._N_y return ux_p, uy_p @@ -469,23 +519,26 @@ def Add_Result(self, idx: int, u_exp: np.ndarray, img: np.ndarray) -> None: img : np.ndarray image used """ - if idx not in self._list_idx_exp: + Ndof = self.__mesh.Nn * 2 + + if idx not in self.list_idx: self.__Test_img(img) - if u_exp.size != self.__nDof: - print(f"The displacement vector field is the wrong dimension. Must be of dimension {self.__nDof}") + if u_exp.size != Ndof: + print(f"The displacement vector field is the wrong dimension. Must be of dimension {Ndof}") return - self._list_idx_exp.append(idx) - self._list_u_exp.append(u_exp) - self._list_img_exp.append(img) + self.__list_idx.append(idx) + self.__list_u.append(u_exp) + self.__list_img.append(img) def Save(self, folder: str, filename: str="dic") -> None: """Saves the dic analysis as 'filename.pickle'.""" path_dic = Folder.New_File(f"{filename}.pickle", folder) with open(path_dic, 'wb') as file: + # don't remove self.__Op_LU = None - self._M_LU = None + self._M_reg_LU = None pickle.dump(self, file) # ---------------------------------------------- diff --git a/EasyFEA/simulations/_elastic.py b/EasyFEA/simulations/_elastic.py index 0d069322..a9599fbe 100644 --- a/EasyFEA/simulations/_elastic.py +++ b/EasyFEA/simulations/_elastic.py @@ -103,7 +103,7 @@ def __Construct_Local_Matrix(self) -> tuple[np.ndarray, np.ndarray]: weight_pg = mesh.Get_weight_pg(matrixType) nPg = weight_pg.size - N_vecteur_pg = mesh.Get_N_vector_pg(matrixType) + N_pg = mesh.Get_N_vector_pg(matrixType) rho = self.rho B_dep_e_pg = mesh.Get_B_e_pg(matrixType) @@ -121,7 +121,7 @@ def __Construct_Local_Matrix(self) -> tuple[np.ndarray, np.ndarray]: # Mass rho_e_pg = Reshape_variable(rho, Ne, nPg) - Mu_e = np.einsum(f'ep,p,pki,ep,pkj->eij', jacobian_e_pg, weight_pg, N_vecteur_pg, rho_e_pg, N_vecteur_pg, optimize="optimal") + Mu_e = np.einsum(f'ep,p,pdi,ep,pdj->eij', jacobian_e_pg, weight_pg, N_pg, rho_e_pg, N_pg, optimize="optimal") if self.dim == 2: thickness = self.material.thickness @@ -200,7 +200,7 @@ def Save_Iter(self): self._results.append(iter) - def Set_Iter(self, iter= -1) -> list[dict]: + def Set_Iter(self, iter: int=-1, resetAll=False) -> dict: results = super().Set_Iter(iter) diff --git a/EasyFEA/simulations/_phasefield.py b/EasyFEA/simulations/_phasefield.py index 08eec56f..22f9d1d9 100644 --- a/EasyFEA/simulations/_phasefield.py +++ b/EasyFEA/simulations/_phasefield.py @@ -439,42 +439,44 @@ def __Calc_psiPlus_e_pg(self): def __Construct_Damage_Matrix(self) -> tuple[np.ndarray, np.ndarray]: """Construct the elementary matrices for the damage problem.""" - phaseFieldModel = self.phaseFieldModel + pfm = self.phaseFieldModel # Data - k = phaseFieldModel.k + k = pfm.k + A = pfm.A PsiP_e_pg = self.__Calc_psiPlus_e_pg() - r_e_pg = phaseFieldModel.get_r_e_pg(PsiP_e_pg) - f_e_pg = phaseFieldModel.get_f_e_pg(PsiP_e_pg) + r_e_pg = pfm.get_r_e_pg(PsiP_e_pg) + f_e_pg = pfm.get_f_e_pg(PsiP_e_pg) - matrixType=MatrixType.mass + matrixType = MatrixType.mass mesh = self.mesh Ne = mesh.Ne nPg = r_e_pg.shape[1] + dN_e_pg = mesh.Get_dN_e_pg(matrixType) # K * Laplacien(d) + r * d = F - ReactionPart_e_pg = mesh.Get_ReactionPart_e_pg(matrixType) # -> jacobian_e_pg * weight_pg * Nd_pg' * Nd_pg - DiffusePart_e_pg = mesh.Get_DiffusePart_e_pg(matrixType, phaseFieldModel.A) # -> jacobian_e_pg, weight_pg, Bd_e_pg', A, Bd_e_pg - SourcePart_e_pg = mesh.Get_SourcePart_e_pg(matrixType) # -> jacobian_e_pg, weight_pg, Nd_pg' + ReactionPart_e_pg = mesh.Get_ReactionPart_e_pg(matrixType) # -> jacobian_e_pg * weight_pg * N_pg' * N_pg + DiffusePart_e_pg = mesh.Get_DiffusePart_e_pg(matrixType) # -> jacobian_e_pg * weight_pg * dN_e_pg' + SourcePart_e_pg = mesh.Get_SourcePart_e_pg(matrixType) # -> jacobian_e_pg, weight_pg, N_pg' tic = Tic() - # Part that involves the reaction term r -> jacobian_e_pg * weight_pg * r_e_pg * Nd_pg' * Nd_pg + # Part that involves the reaction term r -> r_e_pg * jacobian_e_pg * weight_pg * N_pg' * N_pg K_r_e = np.einsum('ep,epij->eij', r_e_pg, ReactionPart_e_pg, optimize='optimal') - # The part that involves diffusion K -> jacobian_e_pg, weight_pg, k, Bd_e_pg', Bd_e_pg + # The part that involves diffusion K -> k_e_pg * jacobian_e_pg * weight_pg * dN_e_pg' * A * dN_e_pg k_e_pg = Reshape_variable(k, Ne, nPg) - K_K_e = np.einsum('ep,epij->eij', k_e_pg, DiffusePart_e_pg, optimize='optimal') + K_K_e = np.einsum('ep,epij,jk,epkl->eil', k_e_pg, DiffusePart_e_pg, A, dN_e_pg, optimize='optimal') - # Source part Fd_e -> jacobian_e_pg, weight_pg, f_e_pg, Nd_pg' + # Source part Fd_e -> f_e_pg * jacobian_e_pg, weight_pg, N_pg' Fd_e = np.einsum('ep,epij->eij', f_e_pg, SourcePart_e_pg, optimize='optimal') Kd_e = K_r_e + K_K_e if self.dim == 2: # THICKNESS not used in femobject ! - thickness = phaseFieldModel.thickness + thickness = pfm.thickness Kd_e *= thickness Fd_e *= thickness @@ -539,7 +541,7 @@ def Save_Iter(self): self._results.append(iter) - def Set_Iter(self, iter=-1) -> list[dict]: + def Set_Iter(self, iter: int=-1, resetAll=False) -> dict: results = super().Set_Iter(iter) @@ -555,7 +557,7 @@ def Set_Iter(self, iter=-1) -> list[dict]: self.__updatedDamage = False self.__updatedDisplacement = False - if self.phaseFieldModel.solver == self.phaseFieldModel.SolverType.History: + if resetAll and self.phaseFieldModel.solver == self.phaseFieldModel.SolverType.History: # It's really useful to do this otherwise when we calculate psiP there will be a problem self.__old_psiP_e_pg = [] self.__old_psiP_e_pg = self.__Calc_psiPlus_e_pg() # update psi+ with the current state diff --git a/EasyFEA/simulations/_simu.py b/EasyFEA/simulations/_simu.py index 27d04d35..58465390 100644 --- a/EasyFEA/simulations/_simu.py +++ b/EasyFEA/simulations/_simu.py @@ -130,8 +130,10 @@ def Save_Iter(self) -> None: return iter @abstractmethod - def Set_Iter(self, iter: int=-1) -> list[dict]: - """Sets the simulation to the specified iteration (usually the last one) and returns the list of dictionary.""" + def Set_Iter(self, iter: int=-1, resetAll=False) -> dict: + """Sets the simulation to the specified iteration (usually the last one) and then reset the required variables if necessary (resetAll).\n + Returns the simulation results dictionary.""" + iter = int(iter) assert isinstance(iter, int), "Must provide an integer." diff --git a/EasyFEA/simulations/_thermal.py b/EasyFEA/simulations/_thermal.py index a32fa79c..db5cf187 100644 --- a/EasyFEA/simulations/_thermal.py +++ b/EasyFEA/simulations/_thermal.py @@ -163,7 +163,7 @@ def Save_Iter(self): self._results.append(iter) - def Set_Iter(self, iter=-1) -> list[dict]: + def Set_Iter(self, iter: int=-1, resetAll=False) -> dict: results = super().Set_Iter(iter) diff --git a/EasyFEA/utilities/Display.py b/EasyFEA/utilities/Display.py index 8844c3c9..7d3c7d2e 100644 --- a/EasyFEA/utilities/Display.py +++ b/EasyFEA/utilities/Display.py @@ -116,24 +116,28 @@ def Plot_Result(obj, result: Union[str,np.ndarray], deformFactor=0.0, coef=1.0, # Builds boundary markers for the colorbar min, max = clim - if isinstance(result, str) and result == "damage": - min = values.min()-1e-12 - max = np.max([values.max()+1e-12, 1]) - ticks = np.linspace(0,1,11) # ticks colorbar + if min == None and max == None: + if isinstance(result, str) and result == "damage": + min = values.min()-1e-12 + max = np.max([values.max()+1e-12, 1]) + ticks = np.linspace(min,max,11) + # ticks = np.linspace(0,1,11) # ticks colorbar + else: + max = np.max(values)+1e-12 if max == None else max + min = np.min(values)-1e-12 if min == None else min + ticks = np.linspace(min,max,11) + levels = np.linspace(min, max, ncolors) else: - max = np.max(values)+1e-12 if max == None else max - min = np.min(values)-1e-12 if min == None else min - ticks = np.linspace(min,max,11) - levels = np.linspace(min, max, ncolors) + ticks = np.linspace(min, max, 11) + levels = np.linspace(min, max, ncolors) + if ncolors != 256: norm = colors.BoundaryNorm(boundaries=np.linspace(min, max, ncolors), ncolors=256) else: norm = None if ax is not None: - [collection.colorbar.remove() - for collection in ax.collections - if collection.colorbar is not None] + _Remove_colorbar(ax) ax.clear() fig = ax.figure # change the plot dimentsion if the given axes is in 3d @@ -959,6 +963,9 @@ def Plot_Energy(simu, load=np.array([]), displacement=np.array([]), plotSolMax=T times = [] if plotSolMax : listSolMax = [] + # activates the first iteration + simu.Set_Iter(0, resetAll=True) + for i, iteration in enumerate(iterations): # Update simulation at iteration i @@ -1116,6 +1123,9 @@ def Movie_Simu(simu, result: str, folder: str, filename='video.gif', N:int=200, ax = Init_Axes(simu.mesh.inDim) fig = ax.figure + # activates the first iteration + simu.Set_Iter(0, resetAll=True) + def DoAnim(fig: plt.Figure, i): simu.Set_Iter(iterations[i]) ax = fig.axes[0] @@ -1274,6 +1284,12 @@ def _Get_list_faces(mesh: Mesh, dimElem:int) -> list[list[int]]: return list_faces +def _Remove_colorbar(ax: plt.Axes) -> None: + [collection.colorbar.remove() + for collection in ax.collections + if collection.colorbar is not None] + + def Init_Axes(dim: int=2, elev=105, azim=-90) -> Union[plt.Axes, Axes3D]: if dim == 1 or dim == 2: ax = plt.subplots()[1] diff --git a/EasyFEA/utilities/Paraview_Interface.py b/EasyFEA/utilities/Paraview_Interface.py index 1d0aa871..3c20934c 100644 --- a/EasyFEA/utilities/Paraview_Interface.py +++ b/EasyFEA/utilities/Paraview_Interface.py @@ -63,6 +63,9 @@ def Make_Paraview(simu, folder: str, N=200, details=False, nodesField=[], elemen if len(nodesField) == 0 and len(elementsField) == 0: Display.MyPrintError("The simulation has no solution fields to display in paraview.") + # activates the first iteration + simu.Set_Iter(0, resetAll=True) + for i, iter in enumerate(iterations): f = Folder.Join(folder,f'solution_{iter}.vtu') diff --git a/EasyFEA/utilities/PyVista_Interface.py b/EasyFEA/utilities/PyVista_Interface.py index 93525cf4..9580db93 100644 --- a/EasyFEA/utilities/PyVista_Interface.py +++ b/EasyFEA/utilities/PyVista_Interface.py @@ -539,6 +539,9 @@ def Movie_simu(simu, result: str, folder: str, filename='video.gif', N:int=200, step = np.max([1, Niter//N]) iterations: np.ndarray = np.arange(0, Niter, step) + # activates the first iteration + simu.Set_Iter(0, resetAll=True) + def DoAnim(plotter, i): simu.Set_Iter(iterations[i]) Plot(simu, result, deformFactor, coef, nodeValues, plotter=plotter, **kwargs) @@ -625,10 +628,14 @@ def _Plotter(off_screen=False, add_axes=True, shape=(1,1), linkViews=True): def _setCameraPosition(plotter: pv.Plotter, inDim: int): plotter.camera_position = 'xy' - if inDim == 3: + if inDim == 3: plotter.camera.elevation += 25 plotter.camera.azimuth += 10 plotter.camera.reset_clipping_range() + # if inDim == 3: + # plotter.camera.elevation += 15 + # plotter.camera.azimuth += 5 + # plotter.camera.reset_clipping_range() def _pvGrid(obj, result: Union[str, np.ndarray]=None, deformFactor=0.0, nodeValues=True) -> pv.UnstructuredGrid: """Construct the pyvista mesh from obj (_Simu, Mesh, _GroupElem and _Geoms object)""" diff --git a/examples/Phasefield/PlateWithHole.py b/examples/Phasefield/PlateWithHole.py index 1fcb5c36..f3efb8aa 100644 --- a/examples/Phasefield/PlateWithHole.py +++ b/examples/Phasefield/PlateWithHole.py @@ -35,7 +35,7 @@ showFig = True saveParaview = False -makeMovie = False +makeMovie = True # Material materialType = "Elas_Isot" # ["Elas_Isot", "Elas_IsotTrans"] @@ -48,7 +48,7 @@ # splits = ["Bourdin","Amor","Miehe","Stress","He","AnisotStrain","AnisotStress","Zhang"] # splits = ["Zhang"] # splits = ["AnisotStrain","AnisotStress","Zhang"] -splits = ["AnisotStress"] +splits = ["Miehe"] regus = ["AT1"] # ["AT1", "AT2"] # regus = ["AT1", "AT2"] @@ -76,11 +76,25 @@ def DoMesh(L: float, h: float, diam: float, thickness: float, l0: float, split: domain = Domain(point, Point(L, h), clD) circle = Circle(Point(L/2, h/2), diam, clD, isHollow=True) + folder = Folder.New_File("",results=True) + ax = Display.Init_Axes() + domain.Plot(ax, color='k', plotPoints=False) + circle.Plot(ax, color='k', plotPoints=False) + # if refineGeom != None: + # refineGeom.Plot(ax, color='k', plotPoints=False) + # ax.scatter(((L+diam)/2, L/2), (h/2, (h+diam)/2), c='k') + ax.axis('off') + Display.Save_fig(folder,"sample",True) + if dim == 2: mesh = Mesher().Mesh_2D(domain, [circle], ElemType.TRI3, refineGeoms=[refineGeom]) elif dim == 3: mesh = Mesher().Mesh_Extrude(domain, [circle], [0,0,thickness], [4], ElemType.HEXA8,refineGeoms=[refineGeom]) + ax = Display.Plot_Mesh(mesh, lw=0.3, facecolors='white') + ax.axis('off'); ax.set_title("") + Display.Save_fig(folder, "mesh", transparent=True) + return mesh # ---------------------------------------------- @@ -308,8 +322,47 @@ def Loading(ud: float): if saveParaview: Paraview_Interface.Make_Paraview(simu, folder) - if makeMovie: - Display.Movie_Simu(simu, "damage", folder, "damage.mp4", N=200, plotMesh=False, deformFactor=1.5) + if makeMovie: + + from EasyFEA import PyVista_Interface as pvi + + iterations = np.arange(0, len(simu.results)-30, 1) + + simu.Set_Iter(-1) + nodes_upper = simu.mesh.Nodes_Conditions(lambda x,y,z: y==h) + depMax = simu.Result('displacement_norm')[nodes_upper].max() + deformFactor = L*.05/depMax + + K = simu.Get_K_C_M_F()[-1] + + nodes = list(set(simu.mesh.nodes) - set(simu.mesh.Nodes_Cylinder(Circle(Point(L/2,h/2), diam)))) + + def Func(plotter, n): + + simu.Set_Iter(iterations[n]) + + grid = pvi._pvGrid(simu, 'damage', deformFactor) + + tresh = grid.threshold((0,0.8)) + + # pvi.Plot_Elements(simu.mesh, nodes, dimElem=1, color='k', plotter=plotter, line_width=2) + pvi.Plot(tresh, 'damage', deformFactor, show_edges=False, plotter=plotter, clim=(0,1)) + + pvi.Plot_BoundaryConditions() + + + # _, _, coord, _ = pvi._Init_obj(simu, deformFactor) + # posY = coord[nodes_upper, 1].mean() + # b = thickness/2 + # cyl1 = pvi.pv.Cylinder((L/2, -b/2, thickness/2), (0,-1,0), h/2, b) + # cyl2 = pvi.pv.Cylinder((L/2, posY+b/2, thickness/2), (0,1,0), h/2, b) + # pvi.Plot(cyl1, opacity=0.5, show_edges=True, color='grey', plotter=plotter) + # pvi.Plot(cyl2, opacity=0.5, show_edges=True, color='grey', plotter=plotter) + + pvi.Movie_func(Func, iterations.size, folder, 'damage.mp4') + + + # Display.Movie_Simu(simu, "damage", folder, "damage.mp4", N=200, plotMesh=False, deformFactor=1.5) if doSimu: Tic.Plot_History(folder, details=False) diff --git a/examples/Phasefield/PostProcess.py b/examples/Phasefield/PostProcess.py index 79a66f2c..0babb6cf 100644 --- a/examples/Phasefield/PostProcess.py +++ b/examples/Phasefield/PostProcess.py @@ -14,44 +14,46 @@ # Configuration # ---------------------------------------------- # "PlateWithHole_Benchmark", "PlateWithHole_FCBA", "Shear_Benchmark", "Tension_Benchmark" "L_Shape_Benchmark" - simulation = "PlateWithHole_Benchmark" + # simulation = "Shear_Benchmark" + simulation = "Shear_Benchmark" meshTest = False loadSimu = True - plotDamage = True - savefig = False + savefig = True - folder = Folder.New_File(simulation, results=True) + folder_results = Folder.New_File(simulation, results=True) if savefig: if meshTest: - folder_save = Folder.Join(folder, "Test", "_Post processing") + folder_save = Folder.Join(folder_results, "Test", "_Post processing") else: - folder_save = Folder.Join(folder, "_Post processing") + folder_save = Folder.Join(folder_results, "_Post processing") else: folder_save="" list_mat = ["Elas_Isot"] # ["Elas_Isot", "Elas_IsotTrans", "Elas_Anisot"] - # list_regu = ["AT1", "AT2"] # ["AT1", "AT2"] - list_regu = ["AT2"] # ["AT1", "AT2"] + list_regu = ["AT2", "AT1"] # ["AT1", "AT2"] + # list_regu = ["AT2"] # ["AT1", "AT2"] list_simpli2D = ["DP"] # ["CP","DP"] list_solver = ["History"] # list_split = ["Bourdin","Amor","Miehe","He","Zhang"] - # list_split = ["Bourdin","Amor","Miehe","He","Stress","AnisotStrain","AnisotStress","Zhang"] + # list_split = ["Bourdin","Amor","Miehe","He","AnisotStrain","AnisotStress","Zhang"] # list_split = ["Bourdin","He","AnisotStrain","AnisotStress","Zhang"] - # list_split = ["He","AnisotStrain","AnisotStress", "Zhang"] - list_split = ["He"] + list_split = ["AnisotStrain","AnisotStress", "He", "Zhang"] + # list_split = ["AnisotStress", "He"] + # list_split = ["Bourdin","Amor","Miehe"] + # list_split = ["AnisotStress"] - # listOptimMesh=[True, False] # [True, False] - listOptimMesh=[True] # [True, False] + # listOptimMesh=[False, True] # [True, False] + listOptimMesh=[False] # [True, False] - listTol = [1e-0, 1e-1, 1e-2] # [1e-0, 1e-1, 1e-2, 1e-3, 1e-4] + # listTol = [1e-0, 1e-1, 1e-2, 1e-3, 1e-4, 1e-5] # [1e-0, 1e-1, 1e-2, 1e-3, 1e-4] # listTol = [1e-0, 1e-2] - # listTol = [1e-0] + listTol = [1e-0] # listnL = [100] # [100] [100, 120, 140, 180, 200] listnL = [0] @@ -59,12 +61,15 @@ listTheta = [0] # listTheta = [-0, -10, -20, -30, -45, -60, -70, -80, -90] - # snapshot = [18.5, 24.6, 25, 28, 35] - # snapshot = [24.6] + # snapshots = [18.5, 24.6, 25, 28, 35] + # snapshots = [9.05, 10.5, 14.5] + # snapshots = [18.6,24.6,24.8] + # snapshots = [24.6, 25] snapshots = [] - # depMax = 20e-5 - depMax = 2.5e-5 + depMax = 20e-5 + # depMax = 2.5e-5 + # depMax = 3.5e-5 # Génération des configurations listConfig = [] @@ -103,7 +108,7 @@ tic = Tic() - foldername = Folder.PhaseField_Folder(folder, material=comp, split=split, regu=regu, simpli2D=simpli2D, tolConv=tolConv, solver=solveur, test=meshTest, optimMesh=optimMesh, closeCrack=False, nL=nL, theta=theta) + foldername = Folder.PhaseField_Folder(folder_results, material=comp, split=split, regu=regu, simpli2D=simpli2D, tolConv=tolConv, solver=solveur, test=meshTest, optimMesh=optimMesh, closeCrack=False, nL=nL, theta=theta) fileForceDep = Folder.Join(foldername, "force-displacement.pickle") fileSimu = Folder.Join(foldername, "simulation.pickle") @@ -111,23 +116,15 @@ nomSimu = foldername.split(comp+'_')[-1] # text = nomSimu - text = split - text = foldername.replace(Folder.Get_Path(foldername), "")[1:] - - # Loads force and displacement - if Folder.Exists(fileForceDep): - load, displacement = Simulations.Load_Force_Displacement(foldername) - - if depMax == 0: - depMax = displacement[-1] - - indexLim = np.where(displacement <= depMax)[0] - - ax_load.plot(displacement[indexLim], np.abs(load[indexLim]), label=text) + # text = split + # text = f"{split}_{regu}_tol{tolConv:1.0e}" + # text = f"{tolConv:1.0e}" + # text = f"{split}_{regu}" + # text = foldername.replace(Folder.Get_Path(foldername), "")[1:] - else: - if nomSimu not in missingSimulations: missingSimulations.append(nomSimu) - Display.MyPrintError("Data are not available:\n"+fileForceDep) + text = f"{split} {regu}" + # if optimMesh: + # text += f" optim" if (loadSimu or plotDamage) and Folder.Exists(fileSimu): # Load simulation @@ -141,6 +138,35 @@ else: if nomSimu not in missingSimulations: missingSimulations.append(nomSimu) Display.MyPrintError("Simu is not available:\n"+fileForceDep) + + + # Loads force and displacement + if Folder.Exists(fileForceDep): + force, displacement = Simulations.Load_Force_Displacement(foldername) + + if depMax == 0: + depMax = displacement[-1] + + indexLim = np.where(displacement <= depMax)[0] + + # text += f" ({temps_str:.3} {unite})" + + ls = None + c = None + + # ls = '--' if optimMesh else None + # c = ax_load.get_lines()[-1].get_color() if optimMesh else None + + ls = '--' if regu == "AT1" else None + c = ax_load.get_lines()[-1].get_color() if regu == "AT1" else None + + ax_load.plot(displacement[indexLim]*1e3, np.abs(force[indexLim])*1e-6, c=c, label=text, ls=ls) + + + else: + if nomSimu not in missingSimulations: missingSimulations.append(nomSimu) + Display.MyPrintError("Data are not available:\n"+fileForceDep) + if plotDamage and Folder.Exists(fileSimu): @@ -152,33 +178,62 @@ colorBarIsClose = False # Displays last damage - Display.Plot_Result(simu, "damage", nodeValues=True, colorbarIsClose=colorBarIsClose, - folder=folder_save, filename=f"{split} tol{tolConv} last", plotMesh=False, - title=f"{split}_{regu}_tol{tolConv}") + filename = foldername.replace(Folder.Get_Path(foldername), "")[1:] - # Recover snapshot iterations - for dep in snapshots: - - i = np.where(displacement*1e6>=dep)[0][0] - - simu.Set_Iter(i) + ax = Display.Plot_Result(simu, "damage", ncolors=21, colorbarIsClose=False) + ax.axis('off'); ax.set_title("") + Display.Save_fig(folder_save, f"{filename} last") - filenameDamage = f"{nomSimu}, ud = {np.round(displacement[i]*1e6,2)}" - # titleDamage = filenameDamage + # Display.Plot_Result(simu, "damage", ncolors=21, nodeValues=True, colorbarIsClose=colorBarIsClose, + # folder=folder_save, filename=f"{split} tol{tolConv} last", plotMesh=False, + # title=f"{split}_{regu}_tol{tolConv}") - titleDamage = split - # filenameDamage = f"PlateBench {comp}_{split}_{regu}_{simpli2D}" + # Recover snapshot iterations - Display.Plot_Result(simu, "damage", nodeValues=True, colorbarIsClose=colorBarIsClose, folder=folder_save, filename=filenameDamage,title=titleDamage) + if len(snapshots) > 0: + # ax_load_2 = Display.Init_Axes() + # ax_load_2.plot(displacement[indexLim]*1e3, np.abs(force[indexLim])*1e-6, label=text) + # ax_load_2.set_xlabel("Déplacement [mm]") + # ax_load_2.set_ylabel("Force [kN/mm]") + + for d, dep in enumerate(snapshots): + + try: + i = np.where(displacement*1e6>=dep)[0][0] + except IndexError: + continue + + simu.Set_Iter(i) + filenameDamage = f"{nomSimu}, ud = {np.round(displacement[i]*1e6,2)}" + # titleDamage = filenameDamage + + # titleDamage = split + # filenameDamage = f"PlateBench {comp}_{split}_{regu}_{simpli2D}" + + # Display.Plot_Result(simu, "damage", nodeValues=True, colorbarIsClose=colorBarIsClose, folder=folder_save, filename=filenameDamage,title=titleDamage) + + ax = Display.Plot_Result(simu, "damage", ncolors=21, colorbarIsClose=True) + ax.axis('off'); ax.set_title("") + # Display._Remove_colorbar(ax) + filename = foldername.replace(Folder.Get_Path(foldername), "")[1:] + Display.Save_fig(folder_save, f"{filename} snap={dep}", True) # , "png", 200 + + # ax_load_2.scatter(displacement[i]*1e3, force[i]*1e-6, c='k', marker='+', zorder=2) + + # plt.figure(ax_load_2.figure) + # Display.Save_fig(folder_save, f"load displacement {filename}") + # text = text.replace("AnisotStrain","Spectral") tic.Tac("PostProcessing", split, False) + ax_load.set_xlabel("Déplacement [mm]") + ax_load.set_ylabel("Force [kN/mm]") # ax_load.set_xlabel("displacement [µm]") # ax_load.set_ylabel("load [kN/mm]") - ax_load.set_xlabel("displacement") - ax_load.set_ylabel("load") + # ax_load.set_xlabel("displacement") + # ax_load.set_ylabel("load") ax_load.grid() ax_load.legend() plt.figure(ax_load.figure) diff --git a/examples/Phasefield/Shear.py b/examples/Phasefield/Shear.py index f763dc1d..0f59c4d2 100644 --- a/examples/Phasefield/Shear.py +++ b/examples/Phasefield/Shear.py @@ -23,7 +23,7 @@ # ---------------------------------------------- dim = 2 -doSimu = False +doSimu = True # Mesh meshTest = True openCrack = True @@ -37,14 +37,14 @@ # splits = ["Bourdin","Amor","Miehe","Stress"] # Splits Isotropes # splits = ["He","AnisotStrain","AnisotStress","Zhang"] # Splits Anisotropes sans bourdin # splits = ["Bourdin","Amor","Miehe","Stress","He","AnisotStrain","AnisotStress","Zhang"] -splits = ["Amor","Miehe"] +splits = ["Bourdin","Miehe"] # regus = ["AT1", "AT2"] -regus = ["AT1"] # "AT1", "AT2" +regus = ["AT2"] # "AT1", "AT2" # PostProcessing -plotResult = False -showResult = False +plotResult = True +showResult = True plotMesh = False plotEnergy = False saveParaview = False; Nparaview=400 @@ -93,7 +93,16 @@ def DoMesh(split: str) -> Mesh: l3 = Line(ptC3, ptC4, meshSize, openCrack) l4 = Line(ptC4, ptC1, meshSize, openCrack) cracks = [Contour([l1, l2, l3, l4], isOpen=openCrack)] - + + # folder = Folder.New_File("",results=True) + # ax = Display.Init_Axes() + # contour.Get_Contour().Plot(ax, color='k', plotPoints=False) + # cracks[0].Plot(ax, color='k', plotPoints=False) + # # if refineDomain != None: + # # refineDomain.Plot(ax, color='k', plotPoints=False) + # ax.axis('off') + # Display.Save_fig(folder,"sample",True) + if dim == 2: mesh = Mesher().Mesh_2D(contour, [], ElemType.TRI3, cracks, [refineDomain]) elif dim == 3: @@ -224,12 +233,24 @@ def Loading(dep): Display.Plot_Iter_Summary(simu, folder, None, None) Display.Plot_BoundaryConditions(simu) Display.Plot_Force_Displacement(force*1e-6, displacement*1e6, 'ud [µm]', 'f [kN/mm]', folder) - Display.Plot_Result(simu, "damage", nodeValues=True, plotMesh=False, folder=folder, filename="damage") + ax = Display.Plot_Result(simu, "damage", nodeValues=True, plotMesh=False, folder=folder, filename="damage", ncolors=25, clim=(0,1)) + + + + # ax = Display.Plot_Result(simu, "damage", 1.5, ncolors=21, clim=(0,0.9)) + # ax.axis('off'); ax.set_title("") + # Display.Save_fig(folder, "deform damage") + + + pass if plotMesh: # pvi.Plot_Mesh(simu.mesh).show() # pvi.Plot(simu, "ux", show_edges=True).show() - Display.Plot_Mesh(simu.mesh) + ax = Display.Plot_Mesh(simu.mesh, lw=0.3, facecolors='white') + ax.axis('off'); ax.set_title("") + Display.Save_fig(folder, "mesh", transparent=True) + if saveParaview: Paraview_Interface.Make_Paraview(simu, folder, Nparaview) diff --git a/pyproject.toml b/pyproject.toml index 65ab3e58..1ca0dd13 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta" [project] name = "EasyFEA" -version = "1.1.0" +version = "1.2.0" description = "User-friendly Python library that simplifies finite element analysis." authors = [ {name = "Matthieu Noel", email = "matthieu.noel7@gmail.com"} diff --git a/tests/Materials_test.py b/tests/Materials_test.py index a9d4f1be..8fc5d51e 100644 --- a/tests/Materials_test.py +++ b/tests/Materials_test.py @@ -67,7 +67,7 @@ def setUp(self): # phasefieldModel self.splits = PhaseField.Get_splits() self.regularizations = PhaseField.Get_regularisations() - self.phaseFieldModels = [] + self.phaseFieldModels: list[PhaseField] = [] splits_Isot = [PhaseField.SplitType.Amor, PhaseField.SplitType.Miehe, PhaseField.SplitType.Stress] @@ -338,47 +338,40 @@ def test_split_phaseField(self): for pfm in self.phaseFieldModels: - assert isinstance(pfm, PhaseField) - - comportement = pfm.material - - if isinstance(comportement, _Elas): - c = comportement.C + mat: _Elas = pfm.material + c = mat.C - print(f"{type(comportement).__name__} {comportement.simplification} {pfm.split} {pfm.regularization}") + print(f"{type(mat).__name__} {mat.simplification} {pfm.split} {pfm.regularization}") - if comportement.dim == 2: + if mat.dim == 2: Epsilon_e_pg = Epsilon2D_e_pg - elif comportement.dim == 3: + elif mat.dim == 3: Epsilon_e_pg = Epsilon3D_e_pg cP_e_pg, cM_e_pg = pfm.Calc_C(Epsilon_e_pg.copy(), verif=True) # Test that cP + cM = c - cpm = cP_e_pg+cM_e_pg - decompC = c-cpm - verifC = np.linalg.norm(decompC)/np.linalg.norm(c) - if pfm.split != "He": - self.assertTrue(np.abs(verifC) <= tol) + cpm = cP_e_pg + cM_e_pg + decompC = c - cpm + verifC = np.linalg.norm(decompC, axis=(-2,-1))/np.linalg.norm(c, axis=(-2,-1)) + self.assertTrue(np.max(verifC) <= tol) # Test that SigP + SigM = Sig Sig_e_pg = np.einsum('ij,epj->epi', c, Epsilon_e_pg, optimize='optimal') - SigP = np.einsum('epij,epj->epi', cP_e_pg, Epsilon_e_pg, optimize='optimal') SigM = np.einsum('epij,epj->epi', cM_e_pg, Epsilon_e_pg, optimize='optimal') decompSig = Sig_e_pg-(SigP+SigM) - verifSig = np.linalg.norm(decompSig)/np.linalg.norm(Sig_e_pg) + verifSig = np.linalg.norm(decompSig, axis=(-2,-1))/np.linalg.norm(Sig_e_pg, axis=(-2,-1)) if np.linalg.norm(Sig_e_pg)>0: - self.assertTrue(np.abs(verifSig) <= tol) - - + self.assertTrue(np.max(verifSig) <= tol) + # Test that Eps:C:Eps = Eps:(cP+cM):Eps - energiec = np.einsum('epj,ij,epi->ep', Epsilon_e_pg, c, Epsilon_e_pg, optimize='optimal') - energiecP = np.einsum('epj,epij,epi->ep', Epsilon_e_pg, cP_e_pg, Epsilon_e_pg, optimize='optimal') - energiecM = np.einsum('epj,epij,epi->ep', Epsilon_e_pg, cM_e_pg, Epsilon_e_pg, optimize='optimal') - verifEnergie = np.linalg.norm(energiec-(energiecP+energiecM))/np.linalg.norm(energiec) - if np.linalg.norm(energiec)>0: - self.assertTrue(np.abs(verifEnergie) <= tol) + energy_c = np.einsum('epj,ij,epi->ep', Epsilon_e_pg, c, Epsilon_e_pg, optimize='optimal') + energy_cP = np.einsum('epj,epij,epi->ep', Epsilon_e_pg, cP_e_pg, Epsilon_e_pg, optimize='optimal') + energy_cM = np.einsum('epj,epij,epi->ep', Epsilon_e_pg, cM_e_pg, Epsilon_e_pg, optimize='optimal') + verifEnergy = np.linalg.norm(energy_c-(energy_cP+energy_cM))/np.linalg.norm(energy_c) + if np.linalg.norm(energy_c)>0: + self.assertTrue(np.max(verifEnergy) <= tol) if __name__ == '__main__': unittest.main(verbosity=2) \ No newline at end of file