diff --git a/Solver/src/AcousticSolver/SpatialDiscretization.f90 b/Solver/src/AcousticSolver/SpatialDiscretization.f90 index 8ee80b702b..8e23497670 100644 --- a/Solver/src/AcousticSolver/SpatialDiscretization.f90 +++ b/Solver/src/AcousticSolver/SpatialDiscretization.f90 @@ -111,7 +111,7 @@ end subroutine Finalize_SpaceAndTimeMethods ! !//////////////////////////////////////////////////////////////////////// ! - SUBROUTINE ComputeTimeDerivative( mesh, particles, time, mode, HO_Elements, element_mask) + SUBROUTINE ComputeTimeDerivative( mesh, particles, time, mode, HO_Elements, element_mask, Level) IMPLICIT NONE ! ! --------- @@ -124,6 +124,7 @@ SUBROUTINE ComputeTimeDerivative( mesh, particles, time, mode, HO_Elements, elem integer, intent(in) :: mode logical, intent(in), optional :: HO_Elements logical, intent(in), optional :: element_mask(:) + integer, intent(in), optional :: Level ! ! --------------- ! Local variables @@ -167,7 +168,7 @@ END SUBROUTINE ComputeTimeDerivative ! This routine computes the time derivative element by element, without considering the Riemann Solvers ! This is useful for estimating the isolated truncation error ! - SUBROUTINE ComputeTimeDerivativeIsolated( mesh, particles, time, mode, HO_Elements, element_mask) + SUBROUTINE ComputeTimeDerivativeIsolated( mesh, particles, time, mode, HO_Elements, element_mask, Level) IMPLICIT NONE ! ! --------- @@ -179,7 +180,8 @@ SUBROUTINE ComputeTimeDerivativeIsolated( mesh, particles, time, mode, HO_Elemen REAL(KIND=RP) :: time integer, intent(in) :: mode logical, intent(in), optional :: HO_Elements - logical, intent(in), optional :: element_mask(:) + logical, intent(in), optional :: element_mask(:) + integer, intent(in), optional :: Level ! ! --------------- ! Local variables diff --git a/Solver/src/CahnHilliardSolver/SpatialDiscretization.f90 b/Solver/src/CahnHilliardSolver/SpatialDiscretization.f90 index dd2d89b2d2..d1aff5d3e9 100644 --- a/Solver/src/CahnHilliardSolver/SpatialDiscretization.f90 +++ b/Solver/src/CahnHilliardSolver/SpatialDiscretization.f90 @@ -118,7 +118,7 @@ end subroutine Initialize_SpaceAndTimeMethods ! !//////////////////////////////////////////////////////////////////////// ! - subroutine ComputeTimeDerivative( mesh, particles, time, mode, HO_Elements, element_mask) + subroutine ComputeTimeDerivative( mesh, particles, time, mode, HO_Elements, element_mask, Level) IMPLICIT NONE ! ! --------- @@ -131,6 +131,7 @@ subroutine ComputeTimeDerivative( mesh, particles, time, mode, HO_Elements, elem integer, intent(in) :: mode logical, intent(in), optional :: HO_Elements logical, intent(in), optional :: element_mask(:) + integer, intent(in), optional :: Level ! ! --------------- ! Local variables @@ -372,7 +373,7 @@ subroutine ComputeTimeDerivative( mesh, particles, time, mode, HO_Elements, elem end subroutine ComputeTimeDerivative - subroutine ComputeTimeDerivativeIsolated( mesh, particles, time, mode, HO_Elements, element_mask) + subroutine ComputeTimeDerivativeIsolated( mesh, particles, time, mode, HO_Elements, element_mask, Level) IMPLICIT NONE ! ! --------- @@ -384,7 +385,8 @@ subroutine ComputeTimeDerivativeIsolated( mesh, particles, time, mode, HO_Elemen REAL(KIND=RP) :: time integer, intent(in) :: mode logical, intent(in), optional :: HO_Elements - logical, intent(in), optional :: element_mask(:) + logical, intent(in), optional :: element_mask(:) + integer, intent(in), optional :: Level error stop 'ComputeTimeDerivativeIsolated not implemented for Cahn-Hilliard' end subroutine ComputeTimeDerivativeIsolated diff --git a/Solver/src/IncompressibleNSSolver/SpatialDiscretization.f90 b/Solver/src/IncompressibleNSSolver/SpatialDiscretization.f90 index c4d01b7d77..a11b876b77 100644 --- a/Solver/src/IncompressibleNSSolver/SpatialDiscretization.f90 +++ b/Solver/src/IncompressibleNSSolver/SpatialDiscretization.f90 @@ -173,7 +173,7 @@ end subroutine Finalize_SpaceAndTimeMethods ! !//////////////////////////////////////////////////////////////////////// ! - SUBROUTINE ComputeTimeDerivative( mesh, particles, time, mode, HO_Elements, element_mask) + SUBROUTINE ComputeTimeDerivative( mesh, particles, time, mode, HO_Elements, element_mask, Level) IMPLICIT NONE ! ! --------- @@ -186,6 +186,7 @@ SUBROUTINE ComputeTimeDerivative( mesh, particles, time, mode, HO_Elements, elem integer, intent(in) :: mode logical, intent(in), optional :: HO_Elements logical, intent(in), optional :: element_mask(:) + integer, intent(in), optional :: Level ! ! --------------- ! Local variables @@ -263,7 +264,7 @@ END SUBROUTINE ComputeTimeDerivative ! This routine computes the time derivative element by element, without considering the Riemann Solvers ! This is useful for estimating the isolated truncation error ! - SUBROUTINE ComputeTimeDerivativeIsolated( mesh, particles, time, mode, HO_Elements, element_mask) + SUBROUTINE ComputeTimeDerivativeIsolated( mesh, particles, time, mode, HO_Elements, element_mask, Level) use EllipticDiscretizationClass IMPLICIT NONE ! @@ -277,6 +278,7 @@ SUBROUTINE ComputeTimeDerivativeIsolated( mesh, particles, time, mode, HO_Elemen integer, intent(in) :: mode logical, intent(in), optional :: HO_Elements logical, intent(in), optional :: element_mask(:) + integer, intent(in), optional :: Level ! ! --------------- ! Local variables diff --git a/Solver/src/MultiphaseSolver/SpatialDiscretization.f90 b/Solver/src/MultiphaseSolver/SpatialDiscretization.f90 index 4d861a24b2..2de5ef3bd5 100644 --- a/Solver/src/MultiphaseSolver/SpatialDiscretization.f90 +++ b/Solver/src/MultiphaseSolver/SpatialDiscretization.f90 @@ -215,7 +215,7 @@ end subroutine Finalize_SpaceAndTimeMethods ! !//////////////////////////////////////////////////////////////////////// ! - SUBROUTINE ComputeTimeDerivative( mesh, particles, time, mode, HO_Elements, element_mask) + SUBROUTINE ComputeTimeDerivative( mesh, particles, time, mode, HO_Elements, element_mask, Level) IMPLICIT NONE ! ! --------- @@ -228,23 +228,33 @@ SUBROUTINE ComputeTimeDerivative( mesh, particles, time, mode, HO_Elements, elem integer, intent(in) :: mode logical, intent(in), optional :: HO_Elements logical, intent(in), optional :: element_mask(:) - logical, allocatable :: face_mask(:) - real(kind=RP) :: mu_smag, delta + integer, intent(in), optional :: Level ! ! --------------- ! Local variables ! --------------- ! - INTEGER :: k, eID, fID, i, j + INTEGER :: k, eID, fID, i, j, ierr, locLevel, lID real(kind=RP) :: sqrtRho, invMa2 class(Element), pointer :: e logical :: compute_element + logical, allocatable :: face_mask(:) + real(kind=RP) :: mu_smag, delta + + if (present(Level)) then + locLevel = Level + else + locLevel = 1 + end if + + associate ( MLIter_eID => mesh % MLRK % MLIter_eID, MLIter => mesh % MLRK % MLIter , MLIter_fID => mesh % MLRK % MLIter_fID) + if (present(element_mask)) then call CreateFaceMask(mesh, element_mask, face_mask) endif -!$omp parallel shared(mesh, time) private(compute_element) +!$omp parallel shared(mesh, time, locLevel) private(compute_element) ! !/////////////////////////////////////////////////// ! 1st step: Get chemical potential @@ -256,8 +266,9 @@ SUBROUTINE ComputeTimeDerivative( mesh, particles, time, mode, HO_Elements, elem ! select case (mode) case (CTD_IGNORE_MODE,CTD_IMEX_EXPLICIT) -!$omp do schedule(runtime) - do eID = 1, size(mesh % elements) +!$omp do schedule(runtime) private(eID) + do lID = 1, MLIter(locLevel,1) + eID = MLIter_eID(lID) compute_element = .true. if (present(element_mask)) compute_element = element_mask(eID) @@ -289,7 +300,7 @@ SUBROUTINE ComputeTimeDerivative( mesh, particles, time, mode, HO_Elements, elem ! Prolong Cahn-Hilliard concentration to faces ! -------------------------------------------- ! - call mesh % ProlongSolutionToFaces(NCOMP, .false., element_mask) + call mesh % ProlongSolutionToFaces(NCOMP, element_mask=element_mask, Level=locLevel) ! ! ---------------- ! Update MPI Faces @@ -305,7 +316,7 @@ SUBROUTINE ComputeTimeDerivative( mesh, particles, time, mode, HO_Elements, elem ! Get concentration (lifted) gradients (also prolong to faces) ! ------------------------------------------------------------ ! - call CHDiscretization % ComputeGradient(NCOMP, NCOMP, mesh, time, chGradientVariables, .false., element_mask) + call CHDiscretization % ComputeGradient(NCOMP, NCOMP, mesh, time, chGradientVariables, element_mask=element_mask, Level = locLevel) ! ! -------------------- ! Update MPI Gradients @@ -323,12 +334,13 @@ SUBROUTINE ComputeTimeDerivative( mesh, particles, time, mode, HO_Elements, elem ! ! Get the concentration Laplacian (into QDot => cDot) - call ComputeLaplacian(mesh, time, element_mask) + call ComputeLaplacian(mesh, time, element_mask, locLevel) select case (mode) case (CTD_IGNORE_MODE, CTD_IMEX_EXPLICIT) -!$omp do schedule(runtime) - do eID = 1, size(mesh % elements) +!$omp do schedule(runtime) private(eID) + do lID = 1, MLIter(locLevel,1) + eID = MLIter_eID(lID) compute_element = .true. if (present(element_mask)) compute_element = element_mask(eID) @@ -345,8 +357,9 @@ SUBROUTINE ComputeTimeDerivative( mesh, particles, time, mode, HO_Elements, elem end do !$omp end do case (CTD_IMEX_IMPLICIT) -!$omp do schedule(runtime) - do eID = 1, size(mesh % elements) +!$omp do schedule(runtime) private(eID) + do lID = 1, MLIter(locLevel,1) + eID = MLIter_eID(lID) compute_element = .true. if (present(element_mask)) compute_element = element_mask(eID) @@ -373,7 +386,7 @@ SUBROUTINE ComputeTimeDerivative( mesh, particles, time, mode, HO_Elements, elem !$omp single call mesh % SetStorageToEqn(MU_BC) !$omp end single - call mesh % ProlongSolutionToFaces(NCOMP, .false., element_mask) + call mesh % ProlongSolutionToFaces(NCOMP, element_mask=element_mask, Level=locLevel) ! ! ---------------- ! Update MPI Faces @@ -398,7 +411,7 @@ SUBROUTINE ComputeTimeDerivative( mesh, particles, time, mode, HO_Elements, elem ! Get concentration (lifted) gradients (also prolong to faces) ! ------------------------------------------------------------ ! - call CHDiscretization % ComputeGradient(NCOMP, NCOMP, mesh, time, chGradientVariables, .false., element_mask) + call CHDiscretization % ComputeGradient(NCOMP, NCOMP, mesh, time, chGradientVariables, element_mask=element_mask) ! ! -------------------- ! Update MPI Gradients @@ -444,7 +457,7 @@ SUBROUTINE ComputeTimeDerivative( mesh, particles, time, mode, HO_Elements, elem ! Prolong solution to faces ! ------------------------- ! - call mesh % ProlongSolutionToFaces(NCONS, .false., element_mask) + call mesh % ProlongSolutionToFaces(NCONS, element_mask=element_mask, Level=locLevel) ! ! ---------------- ! Update MPI Faces @@ -461,14 +474,15 @@ SUBROUTINE ComputeTimeDerivative( mesh, particles, time, mode, HO_Elements, elem ! Get the density and invMa2 in faces and elements ! ------------------------------------- ! -!$omp do schedule(runtime) - do eID = 1, size(mesh % elements) +!$omp do schedule(runtime) private(eID) + do lID = 1, MLIter(locLevel,1) + eID = MLIter_eID(lID) compute_element = .true. if (present(element_mask)) compute_element = element_mask(eID) if (compute_element) then - mesh % elements(eID) % storage % rho = dimensionless % rho(2) + (dimensionless % rho(1)-dimensionless % rho(2))*mesh % elements(eID) % storage % Q(IMC,:,:,:) + mesh % elements(eID) % storage % rho = dimensionless % rho(2) + (dimensionless % rho(1)-dimensionless % rho(2)) * mesh % elements(eID) % storage % Q(IMC,:,:,:) mesh % elements(eID) % storage % rho = min(max(mesh % elements(eID) % storage % rho, dimensionless % rho_min),dimensionless % rho_max) if (use_non_constant_speed_of_sound ) then @@ -483,15 +497,16 @@ SUBROUTINE ComputeTimeDerivative( mesh, particles, time, mode, HO_Elements, elem end do !$omp end do nowait -!$omp do schedule(runtime) - do fID = 1, size(mesh % faces) +!$omp do schedule(runtime) private(fID) + do lID = 1, MLIter(locLevel,2) + fID = MLIter_fID(lID) compute_element = .true. if (present(element_mask)) compute_element = face_mask(fID) if (compute_element) then - - mesh % faces(fID) % storage(1) % rho = dimensionless % rho(2) + (dimensionless % rho(1)-dimensionless % rho(2))*mesh % faces(fID) % storage(1) % Q(IMC,:,:) - mesh % faces(fID) % storage(2) % rho = dimensionless % rho(2) + (dimensionless % rho(1)-dimensionless % rho(2))*mesh % faces(fID) % storage(2) % Q(IMC,:,:) + + mesh % faces(fID) % storage(1) % rho = dimensionless % rho(2) + (dimensionless % rho(1)-dimensionless % rho(2))* mesh % faces(fID) % storage(1) % Q(IMC,:,:) + mesh % faces(fID) % storage(2) % rho = dimensionless % rho(2) + (dimensionless % rho(1)-dimensionless % rho(2))* mesh % faces(fID) % storage(2) % Q(IMC,:,:) mesh % faces(fID) % storage(1) % rho = min(max(mesh % faces(fID) % storage(1) % rho, dimensionless % rho_min),dimensionless % rho_max) mesh % faces(fID) % storage(2) % rho = min(max(mesh % faces(fID) % storage(2) % rho, dimensionless % rho_min),dimensionless % rho_max) @@ -517,7 +532,7 @@ SUBROUTINE ComputeTimeDerivative( mesh, particles, time, mode, HO_Elements, elem ! Compute local entropy variables gradient ! ---------------------------------------- ! - call ViscousDiscretization % ComputeLocalGradients( NCONS, NCONS, mesh , time , mGradientVariables) + call ViscousDiscretization % ComputeLocalGradients( NCONS, NCONS, mesh , time , mGradientVariables, Level = locLevel) ! ! -------------------- ! Update MPI Gradients @@ -533,8 +548,9 @@ SUBROUTINE ComputeTimeDerivative( mesh, particles, time, mode, HO_Elements, elem ! Add the Non-Conservative term to QDot ! ------------------------------------- ! -!$omp do schedule(runtime) private(i,j,k,e,sqrtRho,invMa2) - do eID = 1, size(mesh % elements) +!$omp do schedule(runtime) private(i,j,k,e,sqrtRho,invMa2,eID) + do lID = 1, MLIter(locLevel,1) ! + eID = MLIter_eID(lID) compute_element = .true. if (present(element_mask)) compute_element = element_mask(eID) @@ -594,7 +610,7 @@ SUBROUTINE ComputeTimeDerivative( mesh, particles, time, mode, HO_Elements, elem call multiphase % SetStarMobility(multiphase % M0) end select - call ComputeNSTimeDerivative(mesh, time, element_mask) + call ComputeNSTimeDerivative(mesh, time, element_mask, Level = locLevel) call multiphase % SetStarMobility(multiphase % M0) @@ -672,7 +688,7 @@ SUBROUTINE ComputeTimeDerivative( mesh, particles, time, mode, HO_Elements, elem !$omp end do end select !$omp end parallel - + end associate if (present(element_mask)) deallocate(face_mask) ! END SUBROUTINE ComputeTimeDerivative @@ -684,12 +700,13 @@ END SUBROUTINE ComputeTimeDerivative ! !//////////////////////////////////////////////////////////////////////////////////// ! - subroutine ComputeNSTimeDerivative( mesh , t, element_mask ) + subroutine ComputeNSTimeDerivative( mesh , t, element_mask, Level ) use SpongeClass, only: sponge, addSourceSponge use ActuatorLine, only: farm, ForcesFarm implicit none type(HexMesh) :: mesh real(kind=RP) :: t + integer, intent(in), optional :: Level procedure(UserDefinedSourceTermNS_f) :: UserDefinedSourceTermNS logical, intent(in), optional :: element_mask(:) ! @@ -697,23 +714,67 @@ subroutine ComputeNSTimeDerivative( mesh , t, element_mask ) ! Local variables ! --------------- ! - integer :: eID , i, j, k, ierr, fID + integer :: eID , i, j, k, ierr, fID, iFace, iEl, locLevel,lID real(kind=RP) :: sqrtRho, invSqrtRho real(kind=RP) :: mu_smag, delta real(kind=RP), dimension(NCONS) :: Source logical :: compute_element logical, allocatable :: face_mask(:) + if (present(Level)) then + locLevel = Level + else + locLevel = 1 + end if + + associate ( MLRK => mesh % MLRK) + associate ( MLIter_eID => MLRK % MLIter_eID, & + MLIter => MLRK % MLIter, & + MLIter_eID_Seq => MLRK % MLIter_eID_Seq, & + MLIter_eID_MPI => MLRK % MLIter_eID_MPI, & + MLIter_fID => MLRK % MLIter_fID, & + MLIter_fID_Interior => MLRK % MLIter_fID_Interior, & + MLIter_fID_Boundary => MLRK % MLIter_fID_Boundary, & + MLIter_fID_MPI => MLRK % MLIter_fID_MPI ) if (present(element_mask)) then call CreateFaceMask(mesh, element_mask, face_mask) endif + + if ( LESModel % active) then +!$omp do schedule(runtime) private(i,j,k,delta,mu_smag,eID) + do lID = 1, MLIter(locLevel,1) + eID = MLIter_eID(lID) + associate(e => mesh % elements(eID)) + delta = (e % geom % Volume / product(e % Nxyz + 1)) ** (1.0_RP / 3.0_RP) + + do k = 0, e % Nxyz(3) ; do j = 0, e % Nxyz(2) ; do i = 0, e % Nxyz(1) + call LESModel % ComputeViscosity(delta, e % geom % dWall(i,j,k), e % storage % Q(:,i,j,k), & + e % storage % U_x(:,i,j,k), & + e % storage % U_y(:,i,j,k), & + e % storage % U_z(:,i,j,k), & + e % storage % mu_turb_NS(i,j,k) ) + e % storage % mu_NS(1,i,j,k) = e % storage % mu_turb_NS(i,j,k) + + end do ; end do ; end do + end associate + end do +!$omp end do + end if + +! +! Compute viscosity at interior and boundary faces +! ------------------------------------------------ + call compute_viscosity_at_faces(MLIter(locLevel,3), 2, MLIter_fID_Interior(1:MLIter(locLevel,3)), mesh) + call compute_viscosity_at_faces(MLIter(locLevel,4), 1, MLIter_fID_Boundary(1:MLIter(locLevel,4)), mesh) + ! ! **************** ! Volume integrals ! **************** ! -!$omp do schedule(runtime) - do eID = 1 , size(mesh % elements) +!$omp do schedule(runtime) private(eID) + do lID = 1, MLIter(locLevel,1) + eID = MLIter_eID(lID) compute_element = .true. if (present(element_mask)) compute_element = element_mask(eID) @@ -724,81 +785,50 @@ subroutine ComputeNSTimeDerivative( mesh , t, element_mask ) !$omp end do nowait - if ( LESModel % active) then - !!$omp do schedule(runtime) private(i,j,k,delta,mu_smag) - do eID = 1, size(mesh % elements) - compute_element = .true. - if (present(element_mask)) compute_element = element_mask(eID) - - if (compute_element) then - - associate(e => mesh % elements(eID)) - delta = (e % geom % Volume / product(e % Nxyz + 1)) ** (1.0_RP / 3.0_RP) - do k = 0, e % Nxyz(3) ; do j = 0, e % Nxyz(2) ; do i = 0, e % Nxyz(1) - call LESModel % ComputeViscosity(delta, e % geom % dWall(i,j,k), e % storage % Q(:,i,j,k), & - e % storage % U_x(:,i,j,k), & - e % storage % U_y(:,i,j,k), & - e % storage % U_z(:,i,j,k), & - e % storage % mu_turb_NS(i,j,k) ) - ! mu_smag) - ! ! e % storage % mu_NS(1,i,j,k) = e % storage % mu_NS(1,i,j,k) + mu_smag - ! ! e % storage % mu_NS(2,i,j,k) = e % storage % mu_NS(2,i,j,k) + mu_smag * dimensionless % mu_to_kappa - e % storage % mu_NS(1,i,j,k) = e % storage % mu_NS(1,i,j,k) + e % storage % mu_turb_NS(i,j,k) - ! e % storage % mu_NS(2,i,j,k) = e % storage % mu_NS(2,i,j,k) + e % storage % mu_turb_NS(i,j,k) * dimensionless % mu_to_kappa - end do ; end do ; end do - end associate - endif - end do - !!$omp end do - end if - -! -! Compute viscosity at interior and boundary faces -! ------------------------------------------------ - call compute_viscosity_at_faces(size(mesh % faces_interior), 2, mesh % faces_interior, mesh) - call compute_viscosity_at_faces(size(mesh % faces_boundary), 1, mesh % faces_boundary, mesh) - - ! ! ****************************************** ! Compute Riemann solver of non-shared faces ! ****************************************** ! -!$omp do schedule(runtime) - do fID = 1, size(mesh % faces) - compute_element = .true. - if (present(element_mask)) compute_element = face_mask(fID) - +!$omp do schedule(runtime) private(fID) + do iFace = 1, MLIter(locLevel,3) + fID = MLIter_fID_Interior(iFace) + compute_element = .true. + if (present(element_mask)) compute_element = face_mask(fID) + if (compute_element) then + call computeElementInterfaceFlux_MU(mesh % faces(fID)) + end if + end do +!$omp end do nowait - associate( f => mesh % faces(fID)) - select case (f % faceType) - case (HMESH_INTERIOR) - CALL computeElementInterfaceFlux_MU( f ) - - case (HMESH_BOUNDARY) - CALL computeBoundaryFlux_MU(f, t) - - end select - end associate - endif - end do -!$omp end do +!$omp do schedule(runtime) private(fID) + do iFace = 1, MLIter(locLevel,4) + fID = MLIter_fID_Boundary(iFace) + compute_element = .true. + if (present(element_mask)) compute_element = face_mask(fID) + + if (compute_element) then + call computeBoundaryFlux_MU(mesh % faces(fID), t) + end if + end do +!$omp end do ! ! ************************************************************************************* ! Element without shared faces: Surface integrals, scaling of elements with Jacobian, ! sqrt(rho) ! ************************************************************************************* ! -!$omp do schedule(runtime) private(i,j,k,sqrtRho,invSqrtRho) - do eID = 1, size(mesh % elements) +!$omp do schedule(runtime) private(i,j,k,eID,sqrtRho,invSqrtRho) + do iEl = 1, MLIter(locLevel,5) + eID = MLIter_eID_Seq(iEl) compute_element = .true. if (present(element_mask)) compute_element = element_mask(eID) if (compute_element) then associate(e => mesh % elements(eID)) - if ( e % hasSharedFaces ) cycle + call TimeDerivative_FacesContribution(e, t, mesh) do k = 0, e % Nxyz(3) ; do j = 0, e % Nxyz(2) ; do i = 0, e % Nxyz(1) @@ -834,24 +864,20 @@ subroutine ComputeNSTimeDerivative( mesh , t, element_mask ) ! ! Compute viscosity at MPI faces ! ------------------------------ - call compute_viscosity_at_faces(size(mesh % faces_mpi), 2, mesh % faces_mpi, mesh) + call compute_viscosity_at_faces(MLIter(locLevel,7), 2, MLIter_fID_MPI(1:MLIter(locLevel,7)), mesh) ! ! ************************************** ! Compute Riemann solver of shared faces ! ************************************** ! -!$omp do schedule(runtime) - do fID = 1, size(mesh % faces) +!$omp do schedule(runtime) private(fID) + do iFace = 1, MLIter(locLevel,7) + fID = MLIter_fID_MPI(iFace) compute_element = .true. if (present(element_mask)) compute_element = face_mask(fID) if (compute_element) then - associate( f => mesh % faces(fID)) - select case (f % faceType) - case (HMESH_MPI) - CALL computeMPIFaceFlux_MU( f ) - end select - end associate + CALL computeMPIFaceFlux_MU( mesh % faces(fID) ) endif end do !$omp end do @@ -860,14 +886,14 @@ subroutine ComputeNSTimeDerivative( mesh , t, element_mask ) ! Surface integrals and scaling of elements with shared faces ! *********************************************************** ! -!$omp do schedule(runtime) private(i,j,k, sqrtRho, invSqrtRho) - do eID = 1, size(mesh % elements) +!$omp do schedule(runtime) private(i,j,k, eID, sqrtRho, invSqrtRho) + do iEl = 1, MLIter(locLevel,6) + eID = MLIter_eID_MPI(iEl) compute_element = .true. if (present(element_mask)) compute_element = element_mask(eID) if (compute_element) then associate(e => mesh % elements(eID)) - if ( .not. e % hasSharedFaces ) cycle call TimeDerivative_FacesContribution(e, t, mesh) do k = 0, e % Nxyz(3) ; do j = 0, e % Nxyz(2) ; do i = 0, e % Nxyz(1) @@ -902,8 +928,9 @@ subroutine ComputeNSTimeDerivative( mesh , t, element_mask ) ! *************** ! Add source term ! *************** -!$omp do schedule(runtime) private(i,j,k, InvSqrtRho) - do eID = 1, mesh % no_of_elements +!$omp do schedule(runtime) private(i,j,k, InvSqrtRho, eID) + do lID = 1, MLIter(locLevel,1) + eID = MLIter_eID(lID) compute_element = .true. if (present(element_mask)) compute_element = element_mask(eID) @@ -925,11 +952,12 @@ subroutine ComputeNSTimeDerivative( mesh , t, element_mask ) !for the sponge, loops are in the internal subroutine as values are precalculated !The scale with sqrtRho is done in the subroutines, not done againg here call addSourceSponge(sponge,mesh) - call ForcesFarm(farm, mesh, t) + call ForcesFarm(farm, mesh, t, Level=locLevel) ! Add all the source terms -!$omp do schedule(runtime) private(i,j,k) - do eID = 1, mesh % no_of_elements +!$omp do schedule(runtime) private(i,j,k, Source, eID) + do lID = 1, MLIter(locLevel,1) + eID = MLIter_eID(lID) compute_element = .true. if (present(element_mask)) compute_element = element_mask(eID) @@ -950,8 +978,9 @@ subroutine ComputeNSTimeDerivative( mesh , t, element_mask ) ! no wall function for MULTIPHASE if( mesh% IBM% active ) then if( .not. mesh% IBM% semiImplicit ) then -!$omp do schedule(runtime) private(i,j,k,Source) - do eID = 1, mesh % no_of_elements +!$omp do schedule(runtime) private(i,j,k,Source, eID) + do lID = 1, MLIter(locLevel,1) + eID = MLIter_eID(lID) compute_element = .true. if (present(element_mask)) compute_element = element_mask(eID) @@ -974,6 +1003,9 @@ subroutine ComputeNSTimeDerivative( mesh , t, element_mask ) end if end if + end associate + end associate + if (present(element_mask)) deallocate(face_mask) end subroutine ComputeNSTimeDerivative @@ -1075,6 +1107,8 @@ SUBROUTINE computeElementInterfaceFlux_MU(f) call GetmTwoFluidsViscosity(f % storage(1) % Q(IMC,i,j), muL) call GetmTwoFluidsViscosity(f % storage(2) % Q(IMC,i,j), muR) + muL = muL +f % storage(1) % mu_NS(1,i,j) ! Add subgrid viscosity + muR = muR +f % storage(2) % mu_NS(1,i,j) ! Add subgrid viscosity ! ! - Premultiply velocity gradients by the viscosity ! ----------------------------------------------- @@ -1173,6 +1207,8 @@ SUBROUTINE computeMPIFaceFlux_MU(f) call GetmTwoFluidsViscosity(f % storage(1) % Q(IMC,i,j), muL) call GetmTwoFluidsViscosity(f % storage(2) % Q(IMC,i,j), muR) + muL = muL +f % storage(1) % mu_NS(1,i,j) ! Add subgrid viscosity + muR = muR +f % storage(2) % mu_NS(1,i,j) ! Add subgrid viscosity ! ! - Premultiply velocity gradients by the viscosity ! ----------------------------------------------- @@ -1297,6 +1333,7 @@ SUBROUTINE computeBoundaryFlux_MU(f, time) ! -------------- ! call GetmTwoFluidsViscosity(f % storage(1) % Q(IMC,i,j), mu) + mu = mu + f % storage(1) % mu_NS(1,i,j) ! Add subgrid viscosity call mViscousFlux(NCONS, NCONS, f % storage(1) % Q(:,i,j), & f % storage(1) % U_x(:,i,j), & @@ -1350,30 +1387,48 @@ END SUBROUTINE computeBoundaryFlux_MU ! !//////////////////////////////////////////////////////////////////////////////////////// ! - subroutine ComputeLaplacian( mesh , t, element_mask) + subroutine ComputeLaplacian( mesh , t, element_mask, Level) implicit none type(HexMesh) :: mesh real(kind=RP) :: t logical, intent(in), optional :: element_mask(:) + integer, intent(in), optional :: Level ! ! --------------- ! Local variables ! --------------- ! - integer :: eID , i, j, k, ierr, fID + integer :: eID , i, j, k, ierr, fID, iFace, iEl, lID, locLevel logical :: compute_element logical, allocatable :: face_mask(:) if (present(element_mask)) then call CreateFaceMask(mesh, element_mask, face_mask) - endif + endif + + if (present(Level)) then + locLevel = Level + else + locLevel = 1 + end if + + associate ( MLRK => mesh % MLRK) + associate ( MLIter_eID => MLRK % MLIter_eID, & + MLIter => MLRK % MLIter, & + MLIter_eID_Seq => MLRK % MLIter_eID_Seq, & + MLIter_eID_MPI => MLRK % MLIter_eID_MPI, & + MLIter_fID => MLRK % MLIter_fID, & + MLIter_fID_Interior => MLRK % MLIter_fID_Interior, & + MLIter_fID_Boundary => MLRK % MLIter_fID_Boundary, & + MLIter_fID_MPI => MLRK % MLIter_fID_MPI ) ! ! **************** ! Volume integrals ! **************** ! -!$omp do schedule(runtime) - do eID = 1 , size(mesh % elements) +!$omp do schedule(runtime) private(eID) + do lID = 1, MLIter(locLevel,1) + eID = MLIter_eID(lID) compute_element = .true. if (present(element_mask)) compute_element = element_mask(eID) @@ -1387,51 +1442,47 @@ subroutine ComputeLaplacian( mesh , t, element_mask) ! Compute Riemann solver of non-shared faces ! ****************************************** ! -!$omp do schedule(runtime) - do fID = 1, size(mesh % faces) - compute_element = .true. +!$omp do schedule(runtime) private(fID) + do iFace = 1, MLIter(locLevel,3) + fID = MLIter_fID_Interior(iFace) + compute_element = .true. if (present(element_mask)) compute_element = face_mask(fID) if (compute_element) then + call Laplacian_computeElementInterfaceFlux(mesh % faces(fID)) + end if + end do +!$omp end do nowait - associate( f => mesh % faces(fID)) - select case (f % faceType) - case (HMESH_INTERIOR) - CALL Laplacian_computeElementInterfaceFlux( f ) - - case (HMESH_BOUNDARY) - CALL Laplacian_computeBoundaryFlux(f, t) - - case (HMESH_MPI) - - case default - print*, "Unrecognized face type" - errorMessage(STD_OUT) - error stop - - end select - end associate - endif - end do -!$omp end do +!$omp do schedule(runtime) private(fID) + do iFace = 1, MLIter(locLevel,4) + fID = MLIter_fID_Boundary(iFace) + compute_element = .true. + if (present(element_mask)) compute_element = face_mask(fID) + + if (compute_element) then + call Laplacian_computeBoundaryFlux(mesh % faces(fID), t) + end if + end do +!$omp end do ! ! *************************************************************** ! Surface integrals and scaling of elements with non-shared faces ! *************************************************************** ! -!$omp do schedule(runtime) private(i, j, k) - do eID = 1, size(mesh % elements) +!$omp do schedule(runtime) private(i,j,k,eID) + do iEl = 1, MLIter(locLevel,5) + eID = MLIter_eID_Seq(iEl) compute_element = .true. if (present(element_mask)) compute_element = element_mask(eID) if (compute_element) then associate(e => mesh % elements(eID)) - if ( e % hasSharedFaces ) cycle call Laplacian_FacesContribution(e, t, mesh) do k = 0, e % Nxyz(3) ; do j = 0, e % Nxyz(2) ; do i = 0, e % Nxyz(1) - e % storage % QDot(:,i,j,k) = e % storage % QDot(:,i,j,k) / e % geom % jacobian(i,j,k) + e % storage % QDot(:,i,j,k) = e % storage % QDot(:,i,j,k) / e % geom % jacobian(i,j,k) end do ; end do ; end do end associate endif @@ -1452,19 +1503,15 @@ subroutine ComputeLaplacian( mesh , t, element_mask) ! Compute Riemann solver of shared faces ! ************************************** ! -!$omp do schedule(runtime) - do fID = 1, size(mesh % faces) +!$omp do schedule(runtime) private(fID) + do iFace = 1, MLIter(locLevel,7) + fID = MLIter_fID_MPI(iFace) compute_element = .true. if (present(element_mask)) compute_element = face_mask(fID) if (compute_element) then - associate( f => mesh % faces(fID)) - select case (f % faceType) - case (HMESH_MPI) - CALL Laplacian_computeMPIFaceFlux( f ) - end select - end associate - endif + call Laplacian_computeMPIFaceFlux(mesh % faces(fID)) + end if end do !$omp end do ! @@ -1472,14 +1519,15 @@ subroutine ComputeLaplacian( mesh , t, element_mask) ! Surface integrals and scaling of elements with shared faces ! *********************************************************** ! -!$omp do schedule(runtime) private(i, j, k) - do eID = 1, size(mesh % elements) +!$omp do schedule(runtime) private(i,j,k,eID) + do iEl = 1, MLIter(locLevel,6) + eID = MLIter_eID_MPI(iEl) compute_element = .true. if (present(element_mask)) compute_element = element_mask(eID) if (compute_element) then associate(e => mesh % elements(eID)) - if ( .not. e % hasSharedFaces ) cycle + call Laplacian_FacesContribution(e, t, mesh) do k = 0, e % Nxyz(3) ; do j = 0, e % Nxyz(2) ; do i = 0, e % Nxyz(1) @@ -1497,7 +1545,9 @@ subroutine ComputeLaplacian( mesh , t, element_mask) !$omp end single end if #endif - + end associate + end associate + if (present(element_mask)) deallocate(face_mask) end subroutine ComputeLaplacian @@ -1583,6 +1633,7 @@ subroutine Laplacian_VolumetricContribution( e , t ) ! ! Compute contravariant flux ! -------------------------- + e % storage % mu_NS(1,:,:,:) = 0.0_RP ! Need to zero out for Laplacian ComputeInnerFluxes call CHDiscretization % ComputeInnerFluxes (NCOMP, NCOMP, CHDivergenceFlux, GetCHViscosity, e , contravariantFlux ) ! ! ************************ @@ -1631,11 +1682,12 @@ subroutine compute_viscosity_at_faces(no_of_faces, no_of_sides, face_ids, mesh) if ( LESModel % Active ) then -!!$omp do schedule(runtime) private(iFace,i,j,delta,mu_smag) +!$omp do schedule(runtime) private(i,j,delta,mu_smag) do iFace = 1, no_of_faces associate(f => mesh % faces(face_ids(iFace))) delta = sqrt(f % geom % surface / product(f % Nf + 1)) + do j = 0, f % Nf(2) ; do i = 0, f % Nf(1) do side = 1, no_of_sides call LESModel % ComputeViscosity(delta, f % geom % dWall(i,j), f % storage(side) % Q(:,i,j), & @@ -1643,13 +1695,13 @@ subroutine compute_viscosity_at_faces(no_of_faces, no_of_sides, face_ids, mesh) f % storage(side) % U_y(:,i,j), & f % storage(side) % U_z(:,i,j), & mu_smag) - f % storage(side) % mu_NS(1,i,j) = f % storage(side) % mu_NS(1,i,j) + mu_smag - !f % storage(side) % mu_NS(2,i,j) = f % storage(side) % mu_NS(2,i,j) + mu_smag * dimensionless % mu_to_kappa + f % storage(side) % mu_NS(1,i,j) = mu_smag + end do end do ; end do end associate end do -!!$omp end do +!$omp end do end if end subroutine compute_viscosity_at_faces @@ -1825,7 +1877,7 @@ subroutine Laplacian_computeBoundaryFlux(f, time) end subroutine Laplacian_computeBoundaryFlux - SUBROUTINE ComputeTimeDerivativeIsolated( mesh, particles, time, mode, HO_Elements, element_mask) + SUBROUTINE ComputeTimeDerivativeIsolated( mesh, particles, time, mode, HO_Elements, element_mask, Level) use EllipticDiscretizationClass IMPLICIT NONE ! @@ -1839,6 +1891,7 @@ SUBROUTINE ComputeTimeDerivativeIsolated( mesh, particles, time, mode, HO_Elemen integer, intent(in) :: mode logical, intent(in), optional :: HO_Elements logical, intent(in), optional :: element_mask(:) + integer, intent(in), optional :: Level end subroutine ComputeTimeDerivativeIsolated diff --git a/Solver/src/NavierStokesSolver/SpatialDiscretization.f90 b/Solver/src/NavierStokesSolver/SpatialDiscretization.f90 index 6535aaba53..ac44a61c29 100644 --- a/Solver/src/NavierStokesSolver/SpatialDiscretization.f90 +++ b/Solver/src/NavierStokesSolver/SpatialDiscretization.f90 @@ -224,7 +224,7 @@ end subroutine Finalize_SpaceAndTimeMethods ! !//////////////////////////////////////////////////////////////////////// ! - SUBROUTINE ComputeTimeDerivative( mesh, particles, time, mode, HO_Elements, element_mask) + SUBROUTINE ComputeTimeDerivative( mesh, particles, time, mode, HO_Elements, element_mask, Level) IMPLICIT NONE ! ! --------- @@ -237,12 +237,13 @@ SUBROUTINE ComputeTimeDerivative( mesh, particles, time, mode, HO_Elements, elem integer, intent(in) :: mode logical, intent(in), optional :: HO_Elements logical, intent(in), optional :: element_mask(:) + integer, intent(in), optional :: Level ! ! --------------- ! Local variables ! --------------- ! - INTEGER :: k + INTEGER :: k, locLevel logical :: HOElements if (present(HO_Elements)) then @@ -257,9 +258,12 @@ SUBROUTINE ComputeTimeDerivative( mesh, particles, time, mode, HO_Elements, elem ! Prolongation of the solution to the faces ! ----------------------------------------- ! -!$omp parallel shared(mesh, time) - call mesh % ProlongSolutionToFaces(NCONS, HO_Elements) - +!$omp parallel shared(mesh, time, Level) + if (present(Level)) then + call mesh % ProlongSolutionToFaces(NCONS, HO_Elements=HO_Elements, Level=Level) + else + call mesh % ProlongSolutionToFaces(NCONS, HO_Elements) + end if ! ---------------- ! Update MPI Faces ! ---------------- @@ -275,7 +279,12 @@ SUBROUTINE ComputeTimeDerivative( mesh, particles, time, mode, HO_Elements, elem ! ----------------- ! if ( computeGradients ) then - call ViscousDiscretization % ComputeGradient( NCONS, NGRAD, mesh , time, GetGradients, HO_Elements) + if (present(Level)) then + call ViscousDiscretization % ComputeGradient( NCONS, NGRAD, mesh , time, GetGradients, HO_Elements=HO_Elements, Level=Level) + else + call ViscousDiscretization % ComputeGradient( NCONS, NGRAD, mesh , time, GetGradients, HO_Elements) + end if + end if #ifdef _HAS_MPI_ @@ -296,9 +305,15 @@ SUBROUTINE ComputeTimeDerivative( mesh, particles, time, mode, HO_Elements, elem particles = particles, & t = time) else - call TimeDerivative_ComputeQDot(mesh = mesh , & + if (present(Level)) then + call TimeDerivative_ComputeQDotMLRK(mesh = mesh , & + particles = particles, & + t = time, Level=locLevel) + else + call TimeDerivative_ComputeQDot(mesh = mesh , & particles = particles, & t = time) + end if end if !$omp end parallel ! @@ -309,7 +324,7 @@ END SUBROUTINE ComputeTimeDerivative ! This routine computes the time derivative element by element, without considering the Riemann Solvers ! This is useful for estimating the isolated truncation error ! - SUBROUTINE ComputeTimeDerivativeIsolated( mesh, particles, time, mode, HO_Elements, element_mask) + SUBROUTINE ComputeTimeDerivativeIsolated( mesh, particles, time, mode, HO_Elements, element_mask, Level) use EllipticDiscretizationClass IMPLICIT NONE ! @@ -323,6 +338,7 @@ SUBROUTINE ComputeTimeDerivativeIsolated( mesh, particles, time, mode, HO_Elemen integer, intent(in) :: mode logical, intent(in), optional :: HO_Elements logical, intent(in), optional :: element_mask(:) + integer, intent(in), optional :: Level ! ! --------------- ! Local variables @@ -667,6 +683,350 @@ subroutine TimeDerivative_ComputeQDot( mesh , particles, t) end if end subroutine TimeDerivative_ComputeQDot +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! +! --------------- +! Compute Qdot for MLRK Scheme - This split is necessary due to problem on AnisFAS time integration +! --------------- +!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + subroutine TimeDerivative_ComputeQDotMLRK( mesh , particles, t, Level) + use WallFunctionConnectivity + use TripForceClass, only: randomTrip + use ActuatorLine, only: farm, ForcesFarm + use SpongeClass, only: sponge, addSourceSponge + implicit none + type(HexMesh) :: mesh + type(Particles_t) :: particles + real(kind=RP) :: t + integer, intent(in), optional :: Level + procedure(UserDefinedSourceTermNS_f) :: UserDefinedSourceTermNS +! +! --------------- +! Local variables +! --------------- +! + integer :: eID , i, j, k, ierr, fID, iFace, iEl, iP, STLNum, n , locLevel, lID + real(kind=RP) :: mu_smag, delta, Source(NCONS), TurbulentSource(NCONS), Q_target(NCONS) + real(kind=RP), allocatable :: Source_HO(:,:,:,:) + integer, allocatable :: i_(:), j_(:), k_(:) + + if (present(Level)) then + locLevel = Level + else + locLevel = 1 + + if (.not.allocated(mesh % MLRK % MLIter)) then + call mesh % MLRK % construct(mesh, 1) ! default 1 level + end if + end if + + associate ( MLRK => mesh % MLRK) + associate ( MLIter_eID => MLRK % MLIter_eID, & + MLIter => MLRK % MLIter, & + MLIter_eID_Seq => MLRK % MLIter_eID_Seq, & + MLIter_eID_MPI => MLRK % MLIter_eID_MPI, & + MLIter_fID => MLRK % MLIter_fID, & + MLIter_fID_Interior => MLRK % MLIter_fID_Interior, & + MLIter_fID_Boundary => MLRK % MLIter_fID_Boundary, & + MLIter_fID_MPI => MLRK % MLIter_fID_MPI ) +! +! *********************************************** +! Compute the viscosity at the elements and faces +! *********************************************** +! + if (flowIsNavierStokes) then +!$omp do schedule(runtime) private(i,j,k,eID) + do lID = 1, MLIter(locLevel,1) + eID = MLIter_eID(lID) + associate(e => mesh % elements(eID)) + do k = 0, e % Nxyz(3) ; do j = 0, e % Nxyz(2) ; do i = 0, e % Nxyz(1) + call get_laminar_mu_kappa(e % storage % Q(:,i,j,k), e % storage % mu_NS(1,i,j,k), e % storage % mu_NS(2,i,j,k)) + end do ; end do ; end do + end associate + end do +!$omp end do + end if + + + if ( LESModel % active) then +!$omp do schedule(runtime) private(i,j,k,delta,mu_smag,eID) + do lID = 1, MLIter(locLevel,1) + eID = MLIter_eID(lID) + associate(e => mesh % elements(eID)) + delta = (e % geom % Volume / product(e % Nxyz + 1)) ** (1.0_RP / 3.0_RP) + do k = 0, e % Nxyz(3) ; do j = 0, e % Nxyz(2) ; do i = 0, e % Nxyz(1) + call LESModel % ComputeViscosity(delta, e % geom % dWall(i,j,k), e % storage % Q(:,i,j,k), & + e % storage % U_x(:,i,j,k), & + e % storage % U_y(:,i,j,k), & + e % storage % U_z(:,i,j,k), & + e % storage % mu_turb_NS(i,j,k) ) + ! mu_smag) + ! e % storage % mu_NS(1,i,j,k) = e % storage % mu_NS(1,i,j,k) + mu_smag + ! e % storage % mu_NS(2,i,j,k) = e % storage % mu_NS(2,i,j,k) + mu_smag * dimensionless % mu_to_kappa + e % storage % mu_NS(1,i,j,k) = e % storage % mu_NS(1,i,j,k) + e % storage % mu_turb_NS(i,j,k) + e % storage % mu_NS(2,i,j,k) = e % storage % mu_NS(2,i,j,k) + e % storage % mu_turb_NS(i,j,k) * dimensionless % mu_to_kappa + end do ; end do ; end do + end associate + end do +!$omp end do + end if +! +! Compute viscosity at interior and boundary faces +! ------------------------------------------------ + call compute_viscosity_at_faces(MLIter(locLevel,3), 2, MLIter_fID_Interior(1:MLIter(locLevel,3)), mesh) + call compute_viscosity_at_faces(MLIter(locLevel,4), 1, MLIter_fID_Boundary(1:MLIter(locLevel,4)), mesh) +! +! **************** +! Volume integrals +! **************** +! +!$omp do schedule(runtime) private(eID) + do lID = 1, MLIter(locLevel,1) + eID = MLIter_eID(lID) + call TimeDerivative_VolumetricContribution( mesh, mesh % elements(eID) , t) + end do +!$omp end do + +#if defined(_HAS_MPI_) +!$omp single + if (ShockCapturingDriver % isActive) then + call mesh % UpdateMPIFacesAviscflux(NCONS) + end if +!$omp end single +#endif +! +! ****************************************** +! Compute Riemann solver of non-shared faces +! ****************************************** +! +!$omp do schedule(runtime) private(fID) + do iFace = 1, MLIter(locLevel,3) + fID = MLIter_fID_Interior(iFace) + call computeElementInterfaceFlux(mesh % faces(fID)) + end do +!$omp end do nowait + +!$omp do schedule(runtime) private(fID) + do iFace = 1, MLIter(locLevel,4) + fID = MLIter_fID_Boundary(iFace) + call computeBoundaryFlux(mesh % faces(fID), t, mesh) + end do +!$omp end do +! +! *************************************************************** +! Surface integrals and scaling of elements with non-shared faces +! *************************************************************** +! +!$omp do schedule(runtime) private(i,j,k,eID) + do iEl = 1, MLIter(locLevel,5) + eID = MLIter_eID_Seq(iEl) + associate(e => mesh % elements(eID)) + call TimeDerivative_FacesContribution(e, t, mesh) + + do k = 0, e % Nxyz(3) ; do j = 0, e % Nxyz(2) ; do i = 0, e % Nxyz(1) + e % storage % QDot(:,i,j,k) = e % storage % QDot(:,i,j,k) / e % geom % jacobian(i,j,k) + end do ; end do ; end do + end associate + end do +!$omp end do +! +! **************************** +! Wait until messages are sent +! **************************** +! +#ifdef _HAS_MPI_ + if ( MPI_Process % doMPIAction ) then +!$omp single + if ( flowIsNavierStokes ) then + call mesh % GatherMPIFacesGradients(NGRAD) + else + call mesh % GatherMPIFacesSolution(NCONS) + end if +!$omp end single +! +! Compute viscosity at MPI faces +! ------------------------------ + call compute_viscosity_at_faces(MLIter(locLevel,7), 2, MLIter_fID_MPI(1:MLIter(locLevel,7)), mesh) + +!$omp single + if ( flowIsNavierStokes ) then + if ( ShockCapturingDriver % isActive ) then + call mpi_barrier(MPI_COMM_WORLD, ierr) ! TODO: This can't be the best way :( + call mesh % GatherMPIFacesAviscflux(NCONS) + end if + end if +!$omp end single +! +! ************************************** +! Compute Riemann solver of shared faces +! ************************************** +! +!$omp do schedule(runtime) private(fID) + do iFace = 1, MLIter(locLevel,7) + fID = MLIter_fID_MPI(iFace) + call computeMPIFaceFlux(mesh % faces(fID)) + end do +!$omp end do +! +! *********************************************************** +! Surface integrals and scaling of elements with shared faces +! *********************************************************** +! +!$omp do schedule(runtime) private(i,j,k,eID) + do iEl = 1, MLIter(locLevel,6) + eID = MLIter_eID_MPI(iEl) + associate(e => mesh % elements(eID)) + call TimeDerivative_FacesContribution(e, t, mesh) + + do k = 0, e % Nxyz(3) ; do j = 0, e % Nxyz(2) ; do i = 0, e % Nxyz(1) + e % storage % QDot(:,i,j,k) = e % storage % QDot(:,i,j,k) / e % geom % jacobian(i,j,k) + end do ; end do ; end do + end associate + end do +!$omp end do +! ! +! ! Add an MPI Barrier +! ! ------------------ +! !$omp single +! call mpi_barrier(MPI_COMM_WORLD, ierr) +! !$omp end single + end if +#endif +! +! ***************************************************************************************************************************** +! Compute contributions to source term +! ATTENTION: This is deactivated for child multigrid meshes since they have specially tailored source terms (already computed). +! If you are going to add contributions to the source term, do it adding to e % storage % S_NS inside the condition! +! ***************************************************************************************************************************** + if (.not. mesh % child) then +! +! Add physical source term +! ************************ +!$omp do schedule(runtime) private(i,j,k,eID) + do lID = 1, MLIter(locLevel,1) + eID = MLIter_eID(lID) + associate ( e => mesh % elements(eID) ) + do k = 0, e % Nxyz(3) ; do j = 0, e % Nxyz(2) ; do i = 0, e % Nxyz(1) + ! All terms are calculated indepentenly and overwritten in case one gauss point has more than one contribution + call UserDefinedSourceTermNS(e % geom % x(:,i,j,k), e % storage % Q(:,i,j,k), t, e % storage % S_NS(:,i,j,k), thermodynamics, dimensionless, refValues) + call randomTrip % getTripSource( e % geom % x(:,i,j,k), e % storage % S_NS(:,i,j,k) ) + end do ; end do ; end do + end associate + end do +!$omp end do + ! for the sponge, loops are in the internal subroutine as values are precalculated + call addSourceSponge(sponge,mesh) + call ForcesFarm(farm, mesh, t, Level=locLevel) +! +! Add Particles source +! ******************** + if ( particles % active ) then +!$omp do schedule(runtime) private(eID) + do lID = 1, MLIter(locLevel,1) + eID = MLIter_eID(lID) + associate ( e => mesh % elements(eID) ) + e % storage % S_NSP = 0.0_RP + end associate + end do +!$omp end do + +!$omp do schedule(runtime) + do i = 1, particles % injection % injected + 1 + if (particles % particle(i) % active) then + + associate ( eID => particles % particle(i) % eID ) + + call particles % AddSource(i, mesh % elements( eID ), & + t, thermodynamics, dimensionless, refValues) + + ! If this is uncommented, private(j) should be added to openmp. + !this commented section permits the computation of source term in neighbor elements + !do j = 1, 6 + ! if (particles % particle(i) % mesh%elements( eID )%NumberOfConnections(j) > 0) then + ! call particles % AddSource(i, & + ! mesh % elements( particles % particle(i) % mesh%elements( eID )%Connection(j)%ElementIDs(1) ), & + ! t, thermodynamics, dimensionless, refValues) + ! else + ! ! + ! end if + !end do + end associate + endif + end do +!$omp end do +!$omp do schedule(runtime) private(eID) + do lID = 1, MLIter(locLevel,1) + eID = MLIter_eID(lID) + associate ( e => mesh % elements(eID) ) + e % storage % S_NS = e % storage % S_NS + e % storage % S_NSP + end associate + end do +!$omp end do + end if + end if !(.not. mesh % child) +! +! *********************** +! Now add the source term +! *********************** +!$omp do schedule(runtime) private(i,j,k,eID) + do lID = 1, MLIter(locLevel,1) + eID = MLIter_eID(lID) + associate ( e => mesh % elements(eID) ) + do k = 0, e % Nxyz(3) ; do j = 0, e % Nxyz(2) ; do i = 0, e % Nxyz(1) + e % storage % QDot(:,i,j,k) = e % storage % QDot(:,i,j,k) + e % storage % S_NS(:,i,j,k) + end do ; end do ; end do + end associate + end do +!$omp end do +! +! ********************* +! Add IBM source term +! ********************* + if( mesh% IBM% active ) then + if( .not. mesh% IBM% semiImplicit ) then +!$omp do schedule(runtime) private(i,j,k,Source,Q_target,eID) + do lID = 1, MLIter(locLevel,1) + eID = MLIter_eID(lID) + associate ( e => mesh % elements(eID) ) + do k = 0, e % Nxyz(3) ; do j = 0, e % Nxyz(2) ; do i = 0, e % Nxyz(1) + if( e% isInsideBody(i,j,k) ) then + if( mesh% IBM% stl(e% STL(i,j,k))% move ) then + Q_target = mesh% IBM% MaskVelocity( e% storage% Q(:,i,j,k), NCONS, e% STL(i,j,k), e% geom% x(:,i,j,k), t ) + call mesh% IBM% SourceTerm( eID = eID, Q = e % storage % Q(:,i,j,k), Q_target = Q_target, Source = Source, wallfunction = .false. ) + else + call mesh% IBM% SourceTerm( eID = eID, Q = e % storage % Q(:,i,j,k), Source = Source, wallfunction = .false. ) + end if + e % storage % QDot(:,i,j,k) = e % storage % QDot(:,i,j,k) + Source + end if + end do ; end do ; end do + end associate + end do +!$omp end do + if( mesh% IBM% Wallfunction ) then +!$omp single + call mesh% IBM% GetBandRegionStates( mesh% elements ) +!$omp end single +!$omp do schedule(runtime) private(i,j,k,TurbulentSource) + do iP = 1, mesh% IBM% NumOfForcingPoints + associate( e => mesh% elements(mesh% IBM% ImagePoints(iP)% element_index), & + e_in => mesh% elements(mesh% IBM% ImagePoints(iP)% element_in) ) + i = mesh% IBM% ImagePoints(iP)% local_position(1) + j = mesh% IBM% ImagePoints(iP)% local_position(2) + k = mesh% IBM% ImagePoints(iP)% local_position(3) + call mesh % IBM % SourceTermTurbulence( mesh% IBM% ImagePoints(iP), e% storage% Q(:,i,j,k), & + e% geom% normal(:,i,j,k), e% geom% dWall(i,j,k), & + e% STL(i,j,k), TurbulentSource ) + e% storage% QDot(:,i,j,k) = e % storage % QDot(:,i,j,k) + TurbulentSource + end associate + end do +!$omp end do + end if + end if + end if + + end associate + end associate + + end subroutine TimeDerivative_ComputeQDotMLRK !******************************************************************************* ! Computes the Q-dot only for the High-Order elements (p>1) diff --git a/Solver/src/NavierStokesSolverRANS/SpatialDiscretization.f90 b/Solver/src/NavierStokesSolverRANS/SpatialDiscretization.f90 index 1a2de9af8c..1f0cd2ab6d 100644 --- a/Solver/src/NavierStokesSolverRANS/SpatialDiscretization.f90 +++ b/Solver/src/NavierStokesSolverRANS/SpatialDiscretization.f90 @@ -255,7 +255,7 @@ end subroutine Finalize_SpaceAndTimeMethods ! !//////////////////////////////////////////////////////////////////////// ! - SUBROUTINE ComputeTimeDerivative( mesh, particles, time, mode, HO_Elements, element_mask) + SUBROUTINE ComputeTimeDerivative( mesh, particles, time, mode, HO_Elements, element_mask, Level) IMPLICIT NONE ! ! --------- @@ -268,6 +268,7 @@ SUBROUTINE ComputeTimeDerivative( mesh, particles, time, mode, HO_Elements, elem integer, intent(in) :: mode logical, intent(in), optional :: HO_Elements logical, intent(in), optional :: element_mask(:) + integer, intent(in), optional :: Level ! ! --------------- ! Local variables @@ -327,7 +328,7 @@ END SUBROUTINE ComputeTimeDerivative ! This routine computes the time derivative element by element, without considering the Riemann Solvers ! This is useful for estimating the isolated truncation error ! - SUBROUTINE ComputeTimeDerivativeIsolated( mesh, particles, time, mode, HO_Elements, element_mask) + SUBROUTINE ComputeTimeDerivativeIsolated( mesh, particles, time, mode, HO_Elements, element_mask, Level) use EllipticDiscretizationClass IMPLICIT NONE ! @@ -341,6 +342,7 @@ SUBROUTINE ComputeTimeDerivativeIsolated( mesh, particles, time, mode, HO_Elemen integer, intent(in) :: mode logical, intent(in), optional :: HO_Elements logical, intent(in), optional :: element_mask(:) + integer, intent(in), optional :: Level ! ! --------------- ! Local variables diff --git a/Solver/src/libs/discretization/DGSEMClass.f90 b/Solver/src/libs/discretization/DGSEMClass.f90 index a3e09ecb7c..136738ebdb 100644 --- a/Solver/src/libs/discretization/DGSEMClass.f90 +++ b/Solver/src/libs/discretization/DGSEMClass.f90 @@ -9,6 +9,7 @@ #include "Includes.h" Module DGSEMClass use SMConstants + use Utilities , only: sortAscend USE NodalStorageClass , only: CurrentNodes, NodalStorage, NodalStorage_t use MeshTypes , only: HOPRMESH use ElementClass @@ -37,7 +38,7 @@ Module DGSEMClass private public ComputeTimeDerivative_f, DGSem, ConstructDGSem - public DestructDGSEM, MaxTimeStep, ComputeMaxResiduals + public DestructDGSEM, MaxTimeStep, ComputeMaxResiduals, DetermineCFL public hnRange TYPE DGSem @@ -70,7 +71,7 @@ Module DGSEMClass END TYPE DGSem abstract interface - SUBROUTINE ComputeTimeDerivative_f( mesh, particles, time, mode, HO_Elements, element_mask) + SUBROUTINE ComputeTimeDerivative_f( mesh, particles, time, mode, HO_Elements, element_mask, Level) use SMConstants use HexMeshClass use ParticlesClass @@ -85,6 +86,7 @@ SUBROUTINE ComputeTimeDerivative_f( mesh, particles, time, mode, HO_Elements, el integer, intent(in) :: mode logical, intent(in), optional :: HO_Elements logical, intent(in), optional :: element_mask(:) + integer, intent(in), optional :: Level end subroutine ComputeTimeDerivative_f END INTERFACE @@ -442,6 +444,11 @@ SUBROUTINE ConstructDGSem( self, meshFileName_, controlVariables, & #if defined(NAVIERSTOKES) && (!(SPALARTALMARAS)) IF (flowIsNavierStokes) call self % fwh % construct(self % mesh, controlVariables) #endif +! +! ------------------------------------------------------------------ +! Construct MLRK Library with Level 1(Include all elements and faces) +! ------------------------------------------------------------------ + call self % mesh % MLRK % construct(self % mesh, 1) ! default 1 level ! #if defined(NAVIERSTOKES) ! ! @@ -854,6 +861,169 @@ subroutine MaxTimeStep( self, cfl, dcfl, MaxDt , MaxDtVec) end subroutine MaxTimeStep ! !//////////////////////////////////////////////////////////////////////// +! +! ------------------------------------------------------------------- +! Determine the max advective CFL at each element +! ------------------------------------------------------------------- + subroutine DetermineCFL(self, deltat, globalMax, globalMin, globalMaxCFLInterf) + use VariableConversion + use MPI_Process_Info + implicit none + !------------------------------------------------ + type(DGSem) :: self + real(kind=RP),intent(in) :: deltat + real(kind=RP),intent(out) :: globalMax + real(kind=RP),intent(out) :: globalMin + real(kind=RP),intent(out) :: globalMaxCFLInterf +#ifdef FLOW + !------------------------------------------------ + integer :: i, j, k, eID ! Coordinate and element counters + integer :: N(3) ! Polynomial order in the three reference directions + integer :: ierr, nProcs ! Error and number of MPI for MPI calls + integer :: counter(1:3), GlobalCounter(1:3) ! Local Counter + real(kind=RP) :: eValues(3) ! Advective eigenvalues + real(kind=RP) :: dcsi, deta, dzet ! Smallest mesh spacing + real(kind=RP) :: dcsi2, deta2, dzet2 ! Smallest mesh spacing squared + real(kind=RP) :: lamcsi_a, lamzet_a, lameta_a ! Advective eigenvalues in the three reference directions + real(kind=RP) :: jac ! Mapping Jacobian + real(kind=RP) :: Q(NCONS) ! The conservative variable + real(kind=RP) :: cfl ! cfl - Advective + real(kind=RP) :: maxCFL, minCFL, maxCFLInterface + real(kind=RP), allocatable :: elementCFL(:), maxCFLInterfaceID(:) + type(NodalStorage_t), pointer :: spAxi_p, spAeta_p, spAzeta_p ! Pointers to the nodal storage in every direction + external :: ComputeEigenvaluesForState ! Advective eigenvalue +#if defined(INCNS) || defined(MULTIPHASE) + logical :: flowIsNavierStokes = .true. +#endif +#if defined(SPALARTALMARAS) + external :: ComputeEigenvaluesForStateSA +#elif defined(ACOUSTIC) + external :: ComputeEigenvaluesForStateCAA +#endif +!-------------------------------------------------------- +! Initializations +! --------------- + allocate(elementCFL(1:SIZE(self % mesh % elements)), maxCFLInterfaceID(1:SIZE(self % mesh % elements))) + maxCFLInterfaceID(:) = 0.0_RP + +!$omp parallel shared(self,elementCFL,NodalStorage,flowIsNavierStokes, deltat, maxCFLInterfaceID) default(private) +!$omp do schedule(runtime) + + do eID = 1, SIZE(self % mesh % elements) + cfl = 0.0_RP + N = self % mesh % elements(eID) % Nxyz + spAxi_p => NodalStorage(N(1)) + spAeta_p => NodalStorage(N(2)) + spAzeta_p => NodalStorage(N(3)) + + if ( N(1) .ne. 0 ) then + dcsi = 1.0_RP / abs( spAxi_p % x(1) - spAxi_p % x (0) ) + + else + dcsi = 0.0_RP + + end if + + if ( N(2) .ne. 0 ) then + deta = 1.0_RP / abs( spAeta_p % x(1) - spAeta_p % x (0) ) + + else + deta = 0.0_RP + + end if + + if ( N(3) .ne. 0 ) then + dzet = 1.0_RP / abs( spAzeta_p % x(1) - spAzeta_p % x (0) ) + + else + dzet = 0.0_RP + + end if + + if (flowIsNavierStokes) then + dcsi2 = dcsi*dcsi + deta2 = deta*deta + dzet2 = dzet*dzet + end if + + do k = 0, N(3) ; do j = 0, N(2) ; do i = 0, N(1) +! +! ------------------------------------------------------------ +! The maximum eigenvalues for a particular state is determined +! by the physics. +! ------------------------------------------------------------ +! + Q(1:NCONS) = self % mesh % elements(eID) % storage % Q(1:NCONS,i,j,k) + +#if defined(SPALARTALMARAS) + CALL ComputeEigenvaluesForStateSA( Q , eValues ) +#elif defined(ACOUSTIC) + CALL ComputeEigenvaluesForStateCAA( Q , self % mesh % elements(eID) % storage % Qbase(:,i,j,k), eValues ) +#else + CALL ComputeEigenvaluesForState( Q , eValues ) +#endif + jac = self % mesh % elements(eID) % geom % jacobian(i,j,k) +! +! ---------------------------- +! Compute contravariant values +! ---------------------------- +! + lamcsi_a =abs( self % mesh % elements(eID) % geom % jGradXi(IX,i,j,k) * eValues(IX) + & + self % mesh % elements(eID) % geom % jGradXi(IY,i,j,k) * eValues(IY) + & + self % mesh % elements(eID) % geom % jGradXi(IZ,i,j,k) * eValues(IZ) ) * dcsi + + lameta_a =abs( self % mesh % elements(eID) % geom % jGradEta(IX,i,j,k) * eValues(IX) + & + self % mesh % elements(eID) % geom % jGradEta(IY,i,j,k) * eValues(IY) + & + self % mesh % elements(eID) % geom % jGradEta(IZ,i,j,k) * eValues(IZ) ) * deta + + lamzet_a =abs( self % mesh % elements(eID) % geom % jGradZeta(IX,i,j,k) * eValues(IX) + & + self % mesh % elements(eID) % geom % jGradZeta(IY,i,j,k) * eValues(IY) + & + self % mesh % elements(eID) % geom % jGradZeta(IZ,i,j,k) * eValues(IZ) ) * dzet + + cfl = max( cfl, deltat * abs(lamcsi_a+lameta_a+lamzet_a)/abs(jac) ) + +#ifdef MULTIPHASE + if ((Q(1).ge.0.0001_RP).or.(Q(1).ge.0.9999_RP)) then + maxCFLInterfaceID (eID) = cfl + end if +#endif + + end do ; end do ; end do + + self % mesh % elements(eID) % ML_CFL = cfl + elementCFL(eID)=cfl + end do +!$omp end do +!$omp end parallel + + call sortAscend(elementCFL) + maxCFL = maxval(elementCFL) + minCFL = minval(elementCFL) + maxCFLInterface = maxval(maxCFLInterfaceID) + globalMax = maxCFL + globalMin = minCFL + globalMaxCFLInterf = maxCFLInterface + + deallocate(elementCFL) + deallocate(maxCFLInterfaceID) + +#ifdef _HAS_MPI_ + + if ( MPI_Process % doMPIAction ) then + call mpi_allreduce(maxCFL, globalMax, 1, MPI_DOUBLE, MPI_MAX, & + MPI_COMM_WORLD, ierr) + call mpi_allreduce(maxCFLInterface, globalMaxCFLInterf, 1, MPI_DOUBLE, MPI_MAX, & + MPI_COMM_WORLD, ierr) + call mpi_allreduce(minCFL, globalMin, 1, MPI_DOUBLE, MPI_MIN, & + MPI_COMM_WORLD, ierr) + call MPI_Comm_size(MPI_COMM_WORLD, nProcs, ierr) + end if +#endif + +#endif + end subroutine DetermineCFL +! +!//////////////////////////////////////////////////////////////////////// ! subroutine hnRange(mesh, hnmin, hnmax) ! diff --git a/Solver/src/libs/discretization/EllipticBR1.f90 b/Solver/src/libs/discretization/EllipticBR1.f90 index 6c08c7420c..2d576901ab 100644 --- a/Solver/src/libs/discretization/EllipticBR1.f90 +++ b/Solver/src/libs/discretization/EllipticBR1.f90 @@ -68,7 +68,7 @@ subroutine BR1_Describe(self) end subroutine BR1_Describe - subroutine BR1_ComputeGradient(self, nEqn, nGradEqn, mesh, time, GetGradients, HO_Elements, element_mask) + subroutine BR1_ComputeGradient(self, nEqn, nGradEqn, mesh, time, GetGradients, HO_Elements, element_mask, Level) use HexMeshClass use PhysicsStorage use Physics @@ -80,13 +80,14 @@ subroutine BR1_ComputeGradient(self, nEqn, nGradEqn, mesh, time, GetGradients, H procedure(GetGradientValues_f) :: GetGradients logical, intent(in), optional :: HO_Elements logical, intent(in), optional :: element_mask(:) + integer, intent(in), optional :: Level ! ! --------------- ! Local variables ! --------------- ! integer :: i, j, k - integer :: eID , fID , dimID , eqID, fIDs(6), iFace, iEl + integer :: eID , fID , dimID , eqID, fIDs(6), iFace, iEl, locLevel logical :: set_mu logical :: HOElements logical :: compute_element @@ -126,17 +127,33 @@ subroutine BR1_ComputeGradient(self, nEqn, nGradEqn, mesh, time, GetGradients, H !$omp end do nowait call self % LiftGradientsHO(nEqn, nGradEqn, mesh, time, GetGradients, element_mask) else -!$omp do schedule(runtime) - do eID = 1 , size(mesh % elements) - compute_element = .true. - if (present(element_mask)) compute_element = element_mask(eID) - - if (compute_element) then - call mesh % elements(eID) % ComputeLocalGradient(nEqn, nGradEqn, GetGradients, set_mu) - endif - end do + if (present(Level)) then + locLevel=Level +!$omp do schedule(runtime) private(eID) + do iEl = 1 , mesh % MLRK % MLIter(locLevel,8) + eID = mesh % MLRK % MLIter_eIDN(iEl) + compute_element = .true. + if (present(element_mask)) compute_element = element_mask(eID) + + if (compute_element) then + call mesh % elements(eID) % ComputeLocalGradient(nEqn, nGradEqn, GetGradients, set_mu) + endif + end do +!$omp end do nowait + call self % LiftGradients(nEqn, nGradEqn, mesh, time, GetGradients, element_mask, locLevel) + else +!$omp do schedule(runtime) private(eID) + do iEl = 1 , size(mesh % elements) + compute_element = .true. + if (present(element_mask)) compute_element = element_mask(iEl) + + if (compute_element) then + call mesh % elements(iEl) % ComputeLocalGradient(nEqn, nGradEqn, GetGradients, set_mu) + endif + end do !$omp end do nowait - call self % LiftGradients(nEqn, nGradEqn, mesh, time, GetGradients, element_mask) + call self % LiftGradients(nEqn, nGradEqn, mesh, time, GetGradients, element_mask) + end if end if end subroutine BR1_ComputeGradient @@ -148,7 +165,7 @@ end subroutine BR1_ComputeGradient ! !/////////////////////////////////////////////////////////////////////////////////////////////// ! - subroutine BR1_LiftGradients(self, nEqn, nGradEqn, mesh, time, GetGradients, element_mask) + subroutine BR1_LiftGradients(self, nEqn, nGradEqn, mesh, time, GetGradients, element_mask, Level) ! use HexMeshClass use PhysicsStorage @@ -160,13 +177,14 @@ subroutine BR1_LiftGradients(self, nEqn, nGradEqn, mesh, time, GetGradients, ele real(kind=RP), intent(in) :: time procedure(GetGradientValues_f) :: GetGradients logical, intent(in), optional :: element_mask(:) + integer, intent(in), optional :: Level ! ! --------------- ! Local variables ! --------------- ! integer :: i, j, k - integer :: eID , fID , dimID , eqID, fIDs(6), iFace, iEl + integer :: eID , fID , dimID , eqID, fIDs(6), iFace, iEl, locLevel, locLevelm1 logical :: compute_element logical, allocatable :: face_mask(:) @@ -182,104 +200,203 @@ subroutine BR1_LiftGradients(self, nEqn, nGradEqn, mesh, time, GetGradients, ele end do !$omp end parallel do endif + + + if (present(Level)) then + locLevel = Level + locLevelm1 = max(locLevel-1,1) +! +! ******************************************* +! Compute Riemann solvers of non-shared faces +! ******************************************* +! +!$omp do schedule(runtime) private(fID) + do iFace = 1, mesh % MLRK % MLIter(locLevelm1,3) + fID = mesh % MLRK % MLIter_fID_Interior(iFace) + compute_element = .true. + if (present(element_mask)) compute_element = face_mask(fID) + + if (compute_element) then + call BR1_ComputeElementInterfaceAverage(self, mesh % faces(fID), nEqn, nGradEqn, GetGradients) + endif + end do +!$omp end do nowait + +!$omp do schedule(runtime) private(fID) + do iFace = 1, mesh % MLRK % MLIter(locLevelm1,4) + fID = mesh % MLRK % MLIter_fID_Boundary(iFace) + compute_element = .true. + if (present(element_mask)) compute_element = face_mask(fID) + + if (compute_element) then + call BR1_ComputeBoundaryFlux(self, mesh % faces(fID), nEqn, nGradEqn, time, GetGradients) + endif + end do +!$omp end do +! +!$omp do schedule(runtime) private(eID) + do iEl = 1, mesh % MLRK % MLIter(locLevel,9) + eID = mesh % MLRK % MLIter_eIDN_Seq(iEl) + compute_element = .true. + if (present(element_mask)) compute_element = element_mask(eID) + + if (compute_element) then + associate(e => mesh % elements(eID)) + ! + ! Add the surface integrals + ! ------------------------- + call BR1_GradientFaceLoop( self , nGradEqn, e, mesh) + ! + ! Prolong gradients + ! ----------------- + fIDs = e % faceIDs + call e % ProlongGradientsToFaces(nGradEqn, mesh % faces(fIDs(1)),& + mesh % faces(fIDs(2)),& + mesh % faces(fIDs(3)),& + mesh % faces(fIDs(4)),& + mesh % faces(fIDs(5)),& + mesh % faces(fIDs(6)) ) + + end associate + endif + end do +!$omp end do + +#ifdef _HAS_MPI_ +!$omp single + if ( MPI_Process % doMPIAction ) then + call mesh % GatherMPIFacesSolution(nEqn) + end if +!$omp end single +!$omp do schedule(runtime) private(fID) + do iFace = 1, mesh % MLRK % MLIter(locLevelm1,7) + fID = mesh % MLRK % MLIter_fID_MPI(iFace) + call BR1_ComputeMPIFaceAverage(self, mesh % faces(fID), nEqn, nGradEqn, GetGradients) + end do +!$omp end do +! +!$omp do schedule(runtime) private(eID) + do iEl = 1, mesh % MLRK % MLIter(locLevel,10) + eID = mesh % MLRK % MLIter_eIDN_MPI(iEl) + associate(e => mesh % elements(eID)) + ! + ! Add the surface integrals + ! ------------------------- + call BR1_GradientFaceLoop(self, nGradEqn, e, mesh) + ! + ! Prolong gradients + ! ----------------- + fIDs = e % faceIDs + call e % ProlongGradientsToFaces(nGradEqn, mesh % faces(fIDs(1)),& + mesh % faces(fIDs(2)),& + mesh % faces(fIDs(3)),& + mesh % faces(fIDs(4)),& + mesh % faces(fIDs(5)),& + mesh % faces(fIDs(6)) ) + end associate + end do +!$omp end do +#endif + else ! ! ******************************************* ! Compute Riemann solvers of non-shared faces ! ******************************************* ! !$omp do schedule(runtime) private(fID) - do iFace = 1, size(mesh % faces_interior) - fID = mesh % faces_interior(iFace) - compute_element = .true. - if (present(element_mask)) compute_element = face_mask(fID) - - if (compute_element) then - call BR1_ComputeElementInterfaceAverage(self, mesh % faces(fID), nEqn, nGradEqn, GetGradients) - endif - end do + do iFace = 1, size(mesh % faces_interior) + fID = mesh % faces_interior(iFace) + compute_element = .true. + if (present(element_mask)) compute_element = face_mask(fID) + + if (compute_element) then + call BR1_ComputeElementInterfaceAverage(self, mesh % faces(fID), nEqn, nGradEqn, GetGradients) + endif + end do !$omp end do nowait !$omp do schedule(runtime) private(fID) - do iFace = 1, size(mesh % faces_boundary) - fID = mesh % faces_boundary(iFace) - compute_element = .true. - if (present(element_mask)) compute_element = face_mask(fID) - - if (compute_element) then - call BR1_ComputeBoundaryFlux(self, mesh % faces(fID), nEqn, nGradEqn, time, GetGradients) - endif - end do + do iFace = 1, size(mesh % faces_boundary) + fID = mesh % faces_boundary(iFace) + compute_element = .true. + if (present(element_mask)) compute_element = face_mask(fID) + + if (compute_element) then + call BR1_ComputeBoundaryFlux(self, mesh % faces(fID), nEqn, nGradEqn, time, GetGradients) + endif + end do !$omp end do ! !$omp do schedule(runtime) private(eID) - do iEl = 1, size(mesh % elements_sequential) - eID = mesh % elements_sequential(iEl) - compute_element = .true. - if (present(element_mask)) compute_element = element_mask(eID) - - if (compute_element) then - associate(e => mesh % elements(eID)) + do iEl = 1, size(mesh % elements_sequential) + eID = mesh % elements_sequential(iEl) + compute_element = .true. + if (present(element_mask)) compute_element = element_mask(eID) + + if (compute_element) then + associate(e => mesh % elements(eID)) ! ! Add the surface integrals ! ------------------------- - call BR1_GradientFaceLoop( self , nGradEqn, e, mesh) + call BR1_GradientFaceLoop( self , nGradEqn, e, mesh) ! ! Prolong gradients ! ----------------- - fIDs = e % faceIDs - call e % ProlongGradientsToFaces(nGradEqn, mesh % faces(fIDs(1)),& - mesh % faces(fIDs(2)),& - mesh % faces(fIDs(3)),& - mesh % faces(fIDs(4)),& - mesh % faces(fIDs(5)),& - mesh % faces(fIDs(6)) ) - - end associate - endif - end do + fIDs = e % faceIDs + call e % ProlongGradientsToFaces(nGradEqn, mesh % faces(fIDs(1)),& + mesh % faces(fIDs(2)),& + mesh % faces(fIDs(3)),& + mesh % faces(fIDs(4)),& + mesh % faces(fIDs(5)),& + mesh % faces(fIDs(6)) ) + + end associate + endif + end do !$omp end do #ifdef _HAS_MPI_ !$omp single - if ( MPI_Process % doMPIAction ) then - call mesh % GatherMPIFacesSolution(nEqn) - end if + if ( MPI_Process % doMPIAction ) then + call mesh % GatherMPIFacesSolution(nEqn) + end if !$omp end single !$omp do schedule(runtime) private(fID) - do iFace = 1, size(mesh % faces_mpi) - fID = mesh % faces_mpi(iFace) - call BR1_ComputeMPIFaceAverage(self, mesh % faces(fID), nEqn, nGradEqn, GetGradients) - end do + do iFace = 1, size(mesh % faces_mpi) + fID = mesh % faces_mpi(iFace) + call BR1_ComputeMPIFaceAverage(self, mesh % faces(fID), nEqn, nGradEqn, GetGradients) + end do !$omp end do ! !$omp do schedule(runtime) private(eID) - do iEl = 1, size(mesh % elements_mpi) - eID = mesh % elements_mpi(iEl) - associate(e => mesh % elements(eID)) -! -! Add the surface integrals -! ------------------------- - call BR1_GradientFaceLoop(self, nGradEqn, e, mesh) -! -! Prolong gradients -! ----------------- - fIDs = e % faceIDs - call e % ProlongGradientsToFaces(nGradEqn, mesh % faces(fIDs(1)),& - mesh % faces(fIDs(2)),& - mesh % faces(fIDs(3)),& - mesh % faces(fIDs(4)),& - mesh % faces(fIDs(5)),& - mesh % faces(fIDs(6)) ) - end associate - end do + do iEl = 1, size(mesh % elements_mpi) + eID = mesh % elements_mpi(iEl) + associate(e => mesh % elements(eID)) + ! + ! Add the surface integrals + ! ------------------------- + call BR1_GradientFaceLoop(self, nGradEqn, e, mesh) + ! + ! Prolong gradients + ! ----------------- + fIDs = e % faceIDs + call e % ProlongGradientsToFaces(nGradEqn, mesh % faces(fIDs(1)),& + mesh % faces(fIDs(2)),& + mesh % faces(fIDs(3)),& + mesh % faces(fIDs(4)),& + mesh % faces(fIDs(5)),& + mesh % faces(fIDs(6)) ) + end associate + end do !$omp end do #endif + end if end subroutine BR1_LiftGradients - subroutine BR1_LiftGradientsHO(self, nEqn, nGradEqn, mesh, time, GetGradients, element_mask) + subroutine BR1_LiftGradientsHO(self, nEqn, nGradEqn, mesh, time, GetGradients, element_mask, Level) use HexMeshClass use PhysicsStorage use Physics @@ -290,6 +407,7 @@ subroutine BR1_LiftGradientsHO(self, nEqn, nGradEqn, mesh, time, GetGradients, e real(kind=RP), intent(in) :: time procedure(GetGradientValues_f) :: GetGradients logical, intent(in), optional :: element_mask(:) + integer, intent(in), optional :: Level ! ! --------------- ! Local variables @@ -665,7 +783,8 @@ subroutine BR1_ComputeInnerFluxes( self , nEqn, nGradEqn, EllipticFlux, GetVisco do k = 0, e % Nxyz(3) ; do j = 0, e % Nxyz(2) ; do i = 0, e % Nxyz(1) call GetViscosity(e % storage % Q(IMC,i,j,k), mu(i,j,k)) end do ; end do ; end do - + + mu = mu + e % storage % mu_NS(1,:,:,:) ! Add Subgrid Viscosity kappa = 0.0_RP beta = multiphase % M0_star diff --git a/Solver/src/libs/discretization/EllipticBR2.f90 b/Solver/src/libs/discretization/EllipticBR2.f90 index 7d9d94bd9c..b821f30211 100644 --- a/Solver/src/libs/discretization/EllipticBR2.f90 +++ b/Solver/src/libs/discretization/EllipticBR2.f90 @@ -119,7 +119,7 @@ subroutine BR2_Describe(self) #endif end subroutine BR2_Describe - subroutine BR2_ComputeGradient( self , nEqn, nGradEqn, mesh , time , GetGradients, HO_Elements, element_mask) + subroutine BR2_ComputeGradient( self , nEqn, nGradEqn, mesh , time , GetGradients, HO_Elements, element_mask, Level) use HexMeshClass use PhysicsStorage use Physics @@ -133,6 +133,7 @@ subroutine BR2_ComputeGradient( self , nEqn, nGradEqn, mesh , time , GetGradient procedure(GetGradientValues_f) :: GetGradients logical, intent(in), optional :: HO_Elements logical, intent(in), optional :: element_mask(:) + integer, intent(in), optional :: Level ! ! --------------- ! Local variables @@ -175,6 +176,7 @@ subroutine BR2_ComputeGradient( self , nEqn, nGradEqn, mesh , time , GetGradient else !$omp do schedule(runtime) do eID = 1, size(mesh % elements) + associate( e => mesh % elements(eID) ) call e % ComputeLocalGradient(nEqn, nGradEqn, GetGradients, .false.) ! diff --git a/Solver/src/libs/discretization/EllipticDiscretizationClass.f90 b/Solver/src/libs/discretization/EllipticDiscretizationClass.f90 index 1a38854ed3..76c797164e 100644 --- a/Solver/src/libs/discretization/EllipticDiscretizationClass.f90 +++ b/Solver/src/libs/discretization/EllipticDiscretizationClass.f90 @@ -119,7 +119,7 @@ subroutine BaseClass_Describe(self) end subroutine BaseClass_Describe - subroutine BaseClass_ComputeGradient(self, nEqn, nGradEqn, mesh, time, GetGradients, HO_Elements, element_mask) + subroutine BaseClass_ComputeGradient(self, nEqn, nGradEqn, mesh, time, GetGradients, HO_Elements, element_mask, Level) ! ! ***************************************************** ! BaseClass computes Local Gradients by default @@ -137,12 +137,13 @@ subroutine BaseClass_ComputeGradient(self, nEqn, nGradEqn, mesh, time, GetGradie procedure(GetGradientValues_f) :: GetGradients logical, intent(in), optional :: HO_Elements logical, intent(in), optional :: element_mask(:) + integer, intent(in), optional :: Level ! ! --------------- ! Local variables ! --------------- ! - integer :: eID + integer :: eID, lID, locLevel logical :: set_mu logical :: compute_element @@ -157,8 +158,22 @@ subroutine BaseClass_ComputeGradient(self, nEqn, nGradEqn, mesh, time, GetGradie set_mu = .false. #endif + if (present(Level)) then + locLevel = Level +!$omp do schedule(runtime) private(eID) + do lID = 1 , mesh % MLRK % MLIter(locLevel,8) + eID = mesh % MLRK % MLIter_eIDN(lID) + compute_element = .true. + if (present(element_mask)) compute_element = element_mask(eID) + + if (compute_element) then + call mesh % elements(eID) % ComputeLocalGradient(nEqn, nGradEqn, GetGradients, set_mu) + endif + end do +!$omp end do nowait + else !$omp do schedule(runtime) - do eID = 1 , size(mesh % elements) + do eID = 1 , size(mesh % elements) compute_element = .true. if (present(element_mask)) compute_element = element_mask(eID) @@ -167,10 +182,11 @@ subroutine BaseClass_ComputeGradient(self, nEqn, nGradEqn, mesh, time, GetGradie endif end do !$omp end do nowait + end if end subroutine BaseClass_ComputeGradient - subroutine BaseClass_LiftGradients(self, nEqn, nGradEqn, mesh, time, GetGradients, element_mask) + subroutine BaseClass_LiftGradients(self, nEqn, nGradEqn, mesh, time, GetGradients, element_mask, Level) ! ! ***************************************************** ! Lift gradients: do nothing here @@ -186,6 +202,15 @@ subroutine BaseClass_LiftGradients(self, nEqn, nGradEqn, mesh, time, GetGradient real(kind=RP), intent(in) :: time procedure(GetGradientValues_f) :: GetGradients logical, intent(in), optional :: element_mask(:) + integer, intent(in), optional :: Level + + integer :: locLevel + + if (present(Level)) then + locLevel = Level + else + locLevel = 1 + end if end subroutine BaseClass_LiftGradients diff --git a/Solver/src/libs/discretization/EllipticIP.f90 b/Solver/src/libs/discretization/EllipticIP.f90 index d2301a5198..3b98a3c187 100644 --- a/Solver/src/libs/discretization/EllipticIP.f90 +++ b/Solver/src/libs/discretization/EllipticIP.f90 @@ -186,7 +186,7 @@ subroutine IP_Describe(self) end subroutine IP_Describe - subroutine IP_ComputeGradient(self, nEqn, nGradEqn, mesh, time, GetGradients, HO_Elements, element_mask) + subroutine IP_ComputeGradient(self, nEqn, nGradEqn, mesh, time, GetGradients, HO_Elements, element_mask, Level) use HexMeshClass use PhysicsStorage use Physics @@ -199,6 +199,7 @@ subroutine IP_ComputeGradient(self, nEqn, nGradEqn, mesh, time, GetGradients, HO procedure(GetGradientValues_f) :: GetGradients logical, intent(in), optional :: HO_Elements logical, intent(in), optional :: element_mask(:) + integer, intent(in), optional :: Level ! ! --------------- ! Local variables diff --git a/Solver/src/libs/foundation/Utilities.f90 b/Solver/src/libs/foundation/Utilities.f90 index e2246c663f..f6c41ae8b3 100644 --- a/Solver/src/libs/foundation/Utilities.f90 +++ b/Solver/src/libs/foundation/Utilities.f90 @@ -19,7 +19,7 @@ module Utilities private public AlmostEqual, UnusedUnit, SolveThreeEquationLinearSystem, GreatestCommonDivisor, outer_product, AlmostEqualRelax - public toLower, Qsort, QsortWithFriend, BubblesortWithFriend, my_findloc + public toLower, Qsort, QsortWithFriend, BubblesortWithFriend, my_findloc, sortAscend, sortDescendInt, sortAscendInt public logarithmicMean, dot_product public LeastSquaresLinRegression @@ -490,6 +490,100 @@ end subroutine BubblesortWithFriend ! !/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ! +pure subroutine sortAscend(A) + implicit none + !-------------------------------------- + real(RP), intent(inout) :: A(:) + !-------------------------------------- + integer :: n, newn + integer :: i + real(RP):: temp + !-------------------------------------- + + n = size(A) + do + newn = 0 + do i = 1, n-1 + if (A(i) > A(i+1)) then + ! Swap A + temp = A(i) + A(i) = A(i+1) + A(i+1) = temp + ! Update number of unsorted elements + newn = i + end if + end do + n = newn + if (n <= 1) exit + end do + +end subroutine sortAscend + +pure subroutine sortAscendInt(A) + implicit none + !-------------------------------------- + integer, intent(inout) :: A(:) + !-------------------------------------- + integer :: n, newn + integer :: i + real(RP):: temp + !-------------------------------------- + + n = size(A) + do + newn = 0 + do i = 1, n-1 + if (A(i) > A(i+1)) then + ! Swap A + temp = A(i) + A(i) = A(i+1) + A(i+1) = temp + ! Update number of unsorted elements + newn = i + end if + end do + n = newn + if (n <= 1) exit + end do + +end subroutine sortAscendInt + +pure subroutine sortDescendInt(A,B) + implicit none + !-------------------------------------- + integer, intent(inout) :: A(:) + integer, intent(inout) :: B(size(A)) + !-------------------------------------- + integer :: n, newn + integer :: i + integer :: temp + !-------------------------------------- + + n = size(A) + do + newn = 0 + do i = 1, n-1 + if (A(i) < A(i+1)) then + ! Swap A + temp = A(i) + A(i) = A(i+1) + A(i+1) = temp + ! Swap B + temp = B(i) + B(i) = B(i+1) + B(i+1) = temp + ! Update number of unsorted elements + newn = i + end if + end do + n = newn + if (n <= 1) exit + end do + +end subroutine sortDescendInt +! +!/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +! integer function my_findloc(arr, val, dim) ! --------------------------------------------------------- ! Vanilla routine to find an index of matching value in the array. diff --git a/Solver/src/libs/mesh/HexElementClass.f90 b/Solver/src/libs/mesh/HexElementClass.f90 index f6cd1457b8..1c856a0d39 100644 --- a/Solver/src/libs/mesh/HexElementClass.f90 +++ b/Solver/src/libs/mesh/HexElementClass.f90 @@ -66,6 +66,8 @@ Module ElementClass INTEGER :: nodeIDs(8) integer :: faceIDs(6) integer :: faceSide(6) + integer :: MLevel ! RK Level + real(kind=RP) :: ML_CFL ! CFL storage for Multi Level RK INTEGER, DIMENSION(3) :: Nxyz ! Polynomial orders in every direction (Nx,Ny,Nz) real(kind=RP) :: hn ! Ratio of size and polynomial order TYPE(MappedGeometry) :: geom @@ -122,6 +124,7 @@ SUBROUTINE HexElement_Construct( self, Nx, Ny, Nz, nodeIDs, eID, globID) self % boundaryName = emptyBCName self % hasSharedFaces = .false. self % NumberOfConnections = 0 + self % MLevel = 1 ! ! ---------------------------------------- ! Solution Storage is allocated separately diff --git a/Solver/src/libs/mesh/HexMesh.f90 b/Solver/src/libs/mesh/HexMesh.f90 index 3a403b53da..cffbd6d3ab 100644 --- a/Solver/src/libs/mesh/HexMesh.f90 +++ b/Solver/src/libs/mesh/HexMesh.f90 @@ -1,6 +1,6 @@ #include "Includes.h" MODULE HexMeshClass - use Utilities , only: toLower, almostEqual, AlmostEqualRelax + use Utilities , only: toLower, almostEqual, AlmostEqualRelax, sortDescendInt, sortAscendInt use SMConstants USE MeshTypes USE NodeClass @@ -35,6 +35,7 @@ MODULE HexMeshClass private public HexMesh public Neighbor_t, NUM_OF_NEIGHBORS + public MultiLevel_RK public GetOriginalNumberOfFaces public ConstructFaces, ConstructPeriodicFaces @@ -45,6 +46,28 @@ MODULE HexMeshClass ! Mesh definition ! --------------- ! + type MultiLevel_RK + integer :: nLevel + integer :: maxLevel + real(kind=RP) :: CFL_CutOff + integer, allocatable :: ML_GlobalLevel(:) ! Number of Multi-Level Timestep Element - Global - in TimeIntegrator.f90 and DGSEMClass.f90 + integer, allocatable :: ML_Level(:) ! Number of Multi-Level Timestep Element - Partition + integer, allocatable :: MLIter(:,:) ! Number of ML Iteration - partition (L1 include L2 & L3, etc); size(nLevel, 6(eID, fID, fID_Inter, fID Bound, eID Seq, eID MPI)) + integer, allocatable :: MLIter_eID(:) ! Sorted Element ID from nLevel to Level 1 (eID) size is (mesh%element) + integer, allocatable :: MLIter_fID(:) ! Element ID for each level iteration (fID) size is (mesh% faces) + integer, allocatable :: MLIter_fID_Interior(:) ! Interior Face ID for each level iteration (fID) size is (mesh%faces_interior) + integer, allocatable :: MLIter_fID_Boundary(:) ! Boundary Face ID for each level 1 iteration (fID) size is (mesh%faces_boundary) + integer, allocatable :: MLIter_fID_MPI(:) ! MPI Face ID for each level 1 iteration (fID) size is (mesh%faces_mpi) + integer, allocatable :: MLIter_eID_Seq(:) ! Element ID for each level 1 iteration (eID,levelIteration) size is (mesh%element,nLevel) + integer, allocatable :: MLIter_eID_MPI(:) ! Element ID for each level 1 iteration (eID,levelIteration) size is (mesh%element,nLevel) + integer, allocatable :: MLIter_eIDN(:) ! Sorted Element ID from nLevel to Level 1 (eID) size is (mesh%element) + integer, allocatable :: MLIter_eIDN_Seq(:) ! Element ID for each level 1 iteration (eID,levelIteration) size is (mesh%element,nLevel) + integer, allocatable :: MLIter_eIDN_MPI(:) ! Element ID for each level 1 iteration (eID,levelIteration) size is (mesh%element,nLevel) + contains + procedure :: construct => MultiLevel_RK_Construct + procedure :: update => MultiLevel_RK_Update + procedure :: destruct => MultiLevel_RK_Destruct + end type MultiLevel_RK type HexMesh integer :: numberOfFaces integer :: nodeType @@ -67,7 +90,8 @@ MODULE HexMeshClass type(Element), dimension(:), allocatable :: elements type(MPI_FacesSet_t) :: MPIfaces type(IBM_type) :: IBM - type(Zone_t), dimension(:), allocatable :: zones + type(Zone_t), dimension(:), allocatable :: zones + type(MultiLevel_RK) :: MLRK logical :: child = .FALSE. ! Is this a (multigrid) child mesh? default .FALSE. logical :: meshIs2D = .FALSE. ! Is this a 2D mesh? default .FALSE. integer :: dir2D = 0 ! If it is in fact a 2D mesh, dir 2D stores the global direction IX, IY or IZ @@ -223,6 +247,13 @@ SUBROUTINE HexMesh_Destruct( self ) call self% IBM% destruct( .false. ) end if end if + +! +! ---------------- +! MLRK Library +! ---------------- +! + call self % MLRK % destruct END SUBROUTINE HexMesh_Destruct ! @@ -889,19 +920,20 @@ END SUBROUTINE DeletePeriodicminusfaces ! !//////////////////////////////////////////////////////////////////////// ! - subroutine HexMesh_ProlongSolutionToFaces(self, nEqn, HO_Elements, element_mask) + subroutine HexMesh_ProlongSolutionToFaces(self, nEqn, HO_Elements, element_mask, Level) implicit none class(HexMesh), intent(inout) :: self integer, intent(in) :: nEqn logical, optional, intent(in) :: HO_Elements logical, optional, intent(in) :: element_mask(:) + integer, optional, intent(in) :: Level ! ! --------------- ! Local variables ! --------------- ! integer :: fIDs(6) - integer :: eID, i + integer :: eID, i, locLevel, lID logical :: HOElements logical :: compute_element @@ -912,7 +944,7 @@ subroutine HexMesh_ProlongSolutionToFaces(self, nEqn, HO_Elements, element_mask) end if if (HOElements) then -!$omp do schedule(runtime) private(eID, fIDs, compute_element) +!$omp do schedule(runtime) private(eID, fIDs) do i = 1, size(self % HO_Elements) eID = self % HO_Elements(i) compute_element = .true. @@ -931,54 +963,101 @@ subroutine HexMesh_ProlongSolutionToFaces(self, nEqn, HO_Elements, element_mask) end do !$omp end do else + ! The following differentiation is needed due to problem on AnisFAS NS + if (present(Level)) then + locLevel = Level +!$omp do schedule(runtime) private(fIDs, eID) + do lID = 1, self % MLRK % MLIter(locLevel,8) + eID = self % MLRK % MLIter_eIDN(lID) + + compute_element = .true. + if (present(element_mask)) compute_element = element_mask(eID) + + if (compute_element) then + fIDs = self % elements(eID) % faceIDs + call self % elements(eID) % ProlongSolutionToFaces(nEqn, & + self % faces(fIDs(1)),& + self % faces(fIDs(2)),& + self % faces(fIDs(3)),& + self % faces(fIDs(4)),& + self % faces(fIDs(5)),& + self % faces(fIDs(6)) ) + endif + end do +!$omp end do + else !$omp do schedule(runtime) private(fIDs) - do eID = 1, size(self % elements) - - compute_element = .true. - if (present(element_mask)) compute_element = element_mask(eID) - - if (compute_element) then - fIDs = self % elements(eID) % faceIDs - call self % elements(eID) % ProlongSolutionToFaces(nEqn, & - self % faces(fIDs(1)),& - self % faces(fIDs(2)),& - self % faces(fIDs(3)),& - self % faces(fIDs(4)),& - self % faces(fIDs(5)),& - self % faces(fIDs(6)) ) - endif - end do + do eID = 1, size(self % elements) + + + compute_element = .true. + if (present(element_mask)) compute_element = element_mask(eID) + + if (compute_element) then + fIDs = self % elements(eID) % faceIDs + call self % elements(eID) % ProlongSolutionToFaces(nEqn, & + self % faces(fIDs(1)),& + self % faces(fIDs(2)),& + self % faces(fIDs(3)),& + self % faces(fIDs(4)),& + self % faces(fIDs(5)),& + self % faces(fIDs(6)) ) + endif + end do !$omp end do + end if end if end subroutine HexMesh_ProlongSolutionToFaces ! !//////////////////////////////////////////////////////////////////////// ! - subroutine HexMesh_ProlongGradientsToFaces(self, nGradEqn) + subroutine HexMesh_ProlongGradientsToFaces(self, nGradEqn, Level) implicit none class(HexMesh), intent(inout) :: self integer, intent(in) :: nGradEqn + integer, optional, intent(in) :: Level ! ! --------------- ! Local variables ! --------------- ! integer :: fIDs(6) - integer :: eID + integer :: eID, lID + integer :: locLevel + + if (present(Level)) then + locLevel = Level + +!$omp do schedule(runtime) private(fIDs, eID) + do lID = 1, self % MLRK % MLIter(locLevel,8) + eID = self % MLRK % MLIter_eIDN(lID) + fIDs = self % elements(eID) % faceIDs + call self % elements(eID) % ProlongGradientsToFaces(nGradEqn, & + self % faces(fIDs(1)),& + self % faces(fIDs(2)),& + self % faces(fIDs(3)),& + self % faces(fIDs(4)),& + self % faces(fIDs(5)),& + self % faces(fIDs(6)) ) + end do +!$omp end do + else !$omp do schedule(runtime) - do eID = 1, size(self % elements) - fIDs = self % elements(eID) % faceIDs - call self % elements(eID) % ProlongGradientsToFaces(nGradEqn, & - self % faces(fIDs(1)),& - self % faces(fIDs(2)),& - self % faces(fIDs(3)),& - self % faces(fIDs(4)),& - self % faces(fIDs(5)),& - self % faces(fIDs(6)) ) - end do + do eID = 1, size(self % elements) + + fIDs = self % elements(eID) % faceIDs + call self % elements(eID) % ProlongGradientsToFaces(nGradEqn, & + self % faces(fIDs(1)),& + self % faces(fIDs(2)),& + self % faces(fIDs(3)),& + self % faces(fIDs(4)),& + self % faces(fIDs(5)),& + self % faces(fIDs(6)) ) + end do !$omp end do + end if end subroutine HexMesh_ProlongGradientsToFaces ! @@ -4986,6 +5065,419 @@ subroutine HexMesh_UpdateHOArrays(self) call elementMPIList % destruct end subroutine HexMesh_UpdateHOArrays + +! +!//////////////////////////////////////////////////////////////////////// +! Multi Level Runge-Kutta +!//////////////////////////////////////////////////////////////////////// +!------------------------------------------------------------------------ +! Construct +!------------------------------------------------------------------------ + subroutine MultiLevel_RK_Construct(self, mesh, nLevel) + implicit none + !-arguments----------------------------------------- + class(MultiLevel_RK), target , intent(inout) :: self + type (HexMesh) :: mesh + integer , intent(in) :: nLevel + + !-local variable----------------------------------------- + integer :: i + + !-deallocate----------------------------------------- + if( allocated(self % ML_GlobalLevel) ) deallocate(self % ML_GlobalLevel) + if( allocated(self % ML_Level)) deallocate(self % ML_Level) + if( allocated(self % MLIter) ) deallocate(self % MLIter) + if( allocated(self % MLIter_eID) ) deallocate(self % MLIter_eID) + if( allocated(self % MLIter_fID) ) deallocate(self % MLIter_fID) + if( allocated(self % MLIter_fID_Interior)) deallocate(self % MLIter_fID_Interior) + if( allocated(self % MLIter_fID_Boundary) ) deallocate(self % MLIter_fID_Boundary) + if( allocated(self % MLIter_fID_MPI) ) deallocate(self % MLIter_fID_MPI) + if( allocated(self % MLIter_eID_Seq) ) deallocate(self % MLIter_eID_Seq) + if( allocated(self % MLIter_eIDN) ) deallocate(self % MLIter_eIDN) + if( allocated(self % MLIter_eIDN_Seq) ) deallocate(self % MLIter_eIDN_Seq) + + !-allocate----------------------------------------- + allocate(self % ML_GlobalLevel(nLevel), self % ML_Level(nLevel), self % MLIter(nLevel,10)) + allocate(self % MLIter_eID(mesh % no_of_elements)) + allocate(self % MLIter_fID(mesh % no_of_faces)) ! Each face has 2 sides, except boundaries + allocate(self % MLIter_fID_Interior(size(mesh % faces_interior))) + allocate(self % MLIter_fID_Boundary(size(mesh % faces_boundary))) + allocate(self % MLIter_eID_Seq(size(mesh % elements_sequential))) + allocate(self % MLIter_eIDN(mesh % no_of_elements)) + allocate(self % MLIter_eIDN_Seq(size(mesh % elements_sequential))) + + + !Initial default value - all Level 1 + self % nLevel = nLevel + self % maxLevel = nLevel + self % CFL_CutOff = 0.5_RP + self % ML_GlobalLevel = mesh % no_of_allElements + self % ML_Level = mesh % no_of_elements + self % MLIter = 0 + self % MLIter(1,1) = mesh % no_of_elements + self % MLIter(1,2) = size(mesh % faces) + self % MLIter(1,3) = size(mesh % faces_interior) + self % MLIter(1,4) = size(mesh % faces_boundary) + self % MLIter(1,5) = size(mesh % elements_sequential) + self % MLIter(1,8) = mesh % no_of_elements + self % MLIter(1,9) = size(mesh % elements_sequential) + self % MLIter_eID (:) = [(i, i=1,size(mesh % elements))] + self % MLIter_fID (:) = [(i, i=1,size(mesh % faces))] + self % MLIter_fID_Interior = mesh % faces_interior + self % MLIter_fID_Boundary = mesh % faces_boundary + self % MLIter_eID_Seq = mesh % elements_sequential + self % MLIter_eIDN (:) = [(i, i=1,size(mesh % elements))] + self % MLIter_eIDN_Seq = mesh % elements_sequential + +#ifdef _HAS_MPI_ + if ( MPI_Process % doMPIAction ) then + if( allocated(self % MLIter_eID_MPI) ) deallocate(self % MLIter_eID_MPI) + if( allocated(self % MLIter_fID_MPI) ) deallocate(self % MLIter_fID_MPI) + if( allocated(self % MLIter_eIDN_MPI) ) deallocate(self % MLIter_eIDN_MPI) + allocate(self % MLIter_eID_MPI(size(mesh % elements_mpi))) + allocate(self % MLIter_fID_MPI(size(mesh % faces_mpi))) + allocate(self % MLIter_eIDN_MPI(size(mesh % elements_mpi))) + self % MLIter(1,6) = size(mesh % elements_mpi) + self % MLIter(1,7) = size(mesh % faces_mpi) + self % MLIter(1,10) = size(mesh % elements_mpi) + self % MLIter_eID_MPI = mesh % elements_mpi + self % MLIter_fID_MPI = mesh % faces_mpi + self % MLIter_eIDN_MPI = mesh % elements_mpi + end if +#endif + + end subroutine MultiLevel_RK_Construct +!------------------------------------------------------------------------ +! Update +!------------------------------------------------------------------------ + subroutine MultiLevel_RK_Update(self, mesh, CFL_Cut, globalMax, globalMin, maxCFLInterf) + use MPI_Process_Info + implicit none + !-arguments----------------------------------------- + class(MultiLevel_RK), target , intent(inout) :: self + type (HexMesh) :: mesh + real (kind=RP) , intent(in) :: CFL_Cut + real (kind=RP) , intent(in) :: globalMax + real (kind=RP) , intent(in) :: globalMin + real (kind=RP) , intent(in) :: maxCFLInterf + + !-local variable----------------------------------------- + integer :: eID, level, levelOld, i, j, fID, sideID + integer :: counter(1:self%nLevel), nInterior, nBoundary, nFace, nMPIface + integer :: Level_eID(mesh % no_of_elements), Level_eID_wN(mesh % no_of_elements), Level_eID_wN_BUFF(mesh % no_of_elements) + integer :: faceFlag(size(mesh % faces),2) + integer :: iterations(self%nLevel+1,10) + integer :: ierr ! Error for MPI calls + real(kind=RP) :: perLevel, cflLevel, dtRatio, cfl + real(kind=RP) :: maxc, minc + real(kind=RP) :: cfl_cutoff(self%nLevel-1) + integer, allocatable :: neighborID(:,:), neighborIDAll(:,:),counterOMP(:,:), counterOMPN(:,:) +! +! Initialize +! ------------------------------------------------------------ + dtRatio = 0.8_RP ! estimated maximum local timestep ratio in RK3 (5/12) + self % CFL_CutOff = CFL_Cut + self % MLIter = 0 + + allocate(neighborID(self%nLevel-2,mesh % no_of_allElements), neighborIDAll(self%nLevel-2,mesh % no_of_allElements)) + neighborID = 0 + neighborIDAll= 0 + allocate(counterOMP(self%nLevel,mesh % no_of_elements), counterOMPN (self%nLevel,mesh % no_of_elements)) + counterOMP = 0 + counterOMPN = 0 + Level_eID = 0 + Level_eID_wN = 0 + + cfl_cutoff(1) = self % CFL_CutOff + + do i = 2, self % nLevel-1 + cfl_cutoff(i) = cfl_cutoff(i-1)*2.0_RP/dtRatio + end do +! +! Determine the level of each element +! ------------------------------------------------------------ +!$omp parallel shared(self,mesh,neighborID,neighborIDAll,CFL_Cut, dtRatio, counter, counterOMP, counterOMPN, maxCFLInterf, cfl_cutoff, Level_eID, Level_eID_wN) default(private) +!$omp do schedule(runtime) + do eID = 1, SIZE(mesh % elements) + ! Determine level from CFL cutoff and CFL percentile. Level <= max Level + cfl = mesh % elements(eID) % ML_CFL +#ifdef MULTIPHASE + ! For multiphase, the interface elements have homogenous level, represented by the highest CFL along interface elements + maxc = min(maxval(mesh % elements(eID) % storage % Q(1,:,:,:)),1.0_RP) + minc = max(minval(mesh % elements(eID) % storage % Q(1,:,:,:)),0.0_RP) + if ((maxc.gt.0.001).and.(minc.lt.0.999)) then + cfl = maxCFLInterf + end if +#endif + ! Determine Level of element based on its cfl + level = self % nLevel + do i = 1, self % nLevel-1 + if (cfl .lt. cfl_cutoff(i)) then + level = i + exit + end if + end do + counterOMP(level,eID) = 1 + mesh % elements(eID) % MLevel = level + Level_eID(eID) = level + end do +!$omp end do +!$omp end parallel + counterOMPN = counterOMP + Level_eID_wN= Level_eID + ! Check the neighbours of each element to ensure no rapid jump in level, modify level if required + do i=self%nLevel, 2, -1 + call MultiLevel_RK_CheckNeighbour(self, mesh, i, Level_eID, Level_eID_wN, counterOMP, counterOMPN) + end do + Level_eID_wN_BUFF = Level_eID_wN + ! Sort decending Level_eID (Large to small level) and Level_eID_wN (neighbour included) + call sortDescendInt(Level_eID,self % MLIter_eID) + call sortDescendInt(Level_eID_wN, self % MLIter_eIDN) + + do i = 1, self % nLevel + self % MLIter(i,1) = sum(counterOMP(i:self % nLevel,:)) ! Elements + self % ML_Level(i) = sum(counterOMP(i,:)) + self % MLIter(i,8) = sum(counterOMPN(i:self % nLevel,:)) ! Elements + neighbours + end do + + deallocate(counterOMP, counterOMPN) + + !All faces of each sorted element + nInterior=0 + nBoundary=0 + levelOld = self % nLevel + faceFlag = 0 + nFace = 0 + nMPIface = 0 + + do i = 1, self % MLIter(1,1) + eID = self % MLIter_eID(i) + level = Level_eID (i) + + if (level .ne. levelOld) then + self % MLIter(1:levelOld,2) = nFace + self % MLIter(1:levelOld,3) = nInterior + self % MLIter(1:levelOld,4) = nBoundary + self % MLIter(1:levelOld,7) = nMPIface + levelOld = level + end if + + ! Check for interior or boundary faces + do j=1,6 + fID = mesh % elements(eID) % faceIDs(j) + sideID = mesh % elements(eID) % faceSide(j) + + ! Order of faceID from highest to lowest level + if (faceFlag(fID,1).eq.0) then + nFace = nFace+1 + self % MLIter_fID(nFace) = fID + faceFlag(fID,1) = 1 + end if + ! Order of interiorFace, boundaryFace and MPIFace from highest to lowest level + if (mesh % faces(fID) % FaceType .eq. HMESH_INTERIOR) then + if (faceFlag(fID,2).eq.0) then + nInterior = nInterior+1 + self % MLIter_fID_Interior(nInterior) = fID + faceFlag(fID,2) = 1 + end if + elseif (mesh % faces(fID) % FaceType .eq. HMESH_BOUNDARY) then + nBoundary = nBoundary+1 + self % MLIter_fID_Boundary(nBoundary) = fID + elseif (mesh % faces(fID) % FaceType .eq. HMESH_MPI) then + nMPIface = nMPIface + 1 + self % MLIter_fID_MPI(nMPIface) = fID + end if + end do + end do + self % MLIter(1:level,2) = nFace + self % MLIter(1:level,3) = nInterior + self % MLIter(1:level,4) = nBoundary + self % MLIter(1:level,7) = nMPIface + + ! ! Sequential elements + do i = 1, size(mesh % elements_sequential) + eID = mesh % elements_sequential(i) + Level_eID(i) = mesh % elements(eID) % MLevel + Level_eID_wN(i) = Level_eID_wN_BUFF(eID) + self % MLIter(1:Level_eID(i),5) = self % MLIter(1:Level_eID(i),5)+1 + self % MLIter(1:Level_eID_wN(i),9) = self % MLIter(1:Level_eID_wN(i),9)+1 + end do + ! Sort decending Level_eID (Large to small level) + call sortDescendInt(Level_eID(1:size(mesh % elements_sequential)),self % MLIter_eID_Seq) + call sortDescendInt(Level_eID_wN(1:size(mesh % elements_sequential)),self % MLIter_eIDN_Seq) + + ! Sort eID, fID for better memory access + iterations = 0 + iterations(1:self%nLevel,:)=self % MLIter + do i=self % nLevel, 1, -1 + call sortAscendInt(self % MLIter_eID(iterations(i+1,1)+1:iterations(i,1))) + call sortAscendInt(self % MLIter_fID(iterations(i+1,2)+1:iterations(i,2))) + call sortAscendInt(self % MLIter_fID_Interior(iterations(i+1,3)+1:iterations(i,3))) + call sortAscendInt(self % MLIter_fID_Boundary(iterations(i+1,4)+1:iterations(i,4))) + call sortAscendInt(self % MLIter_eID_Seq(iterations(i+1,5)+1:iterations(i,5))) + call sortAscendInt(self % MLIter_eIDN(iterations(i+1,8)+1:iterations(i,8))) + call sortAscendInt(self % MLIter_eIDN_Seq(iterations(i+1,9)+1:iterations(i,9))) + end do + self % ML_GlobalLevel = self % ML_Level + +! MPI elements and operation +#ifdef _HAS_MPI_ + if ( MPI_Process % doMPIAction ) then + counter(1:self % nLevel)=0 + do i = 1, size(mesh % elements_mpi) + eID = mesh % elements_mpi(i) + Level_eID(i) = mesh % elements(eID) % MLevel + Level_eID_wN(i) = Level_eID_wN_BUFF(eID) + self % MLIter(1:Level_eID(i),6) = self % MLIter(1:Level_eID(i),6)+1 + self % MLIter(1:Level_eID_wN(i),10) = self % MLIter(1:Level_eID_wN(i),10)+1 + end do + ! Sort decending Level_eID (Large to small level) + call sortDescendInt(Level_eID(1:size(mesh % elements_mpi)),self % MLIter_eID_MPI) + call sortDescendInt(Level_eID_wN(1:size(mesh % elements_mpi)),self % MLIter_eIDN_MPI) + + ! Global statistics level + call mpi_allreduce(self % ML_Level(1:self%nLevel), self % ML_GlobalLevel(1:self%nLevel), self%nLevel, MPI_INT, MPI_SUM, & + MPI_COMM_WORLD, ierr) + end if +#endif + +! Max actual level - This is the actual level used should no element exist in nLevel + do i = self % nLevel,1,-1 + self % maxLevel = i + if (self % ML_GlobalLevel(i).ne.0) exit + end do + + if (self % maxLevel.eq.1) then + if (MPI_Process % isRoot ) then +! +! Write Error Information +! ----------------------------------------------- + write(STD_OUT,'(/)') + call SubSection_Header("Update Multi-Level Local Timestepping") + write(STD_OUT,'(A)') "Error: All elements are in Level 1. Minimum 2 levels required. Use normal RK method" + end if + errorMessage(STD_OUT) + error stop + end if + + if ( .not. MPI_Process % isRoot ) return +! +! Write Information +! ----------------------------------------------- + write(STD_OUT,'(/)') + call SubSection_Header("Update Multi-Level Local Timestepping") + write(STD_OUT,'(30X,A,A27,F10.4)') "->" , "Max. Advective CFL: " , globalMax + write(STD_OUT,'(30X,A,A27,F10.4)') "->" , "Min. Advective CFL: " , globalMin + write(STD_OUT,'(30X,A,A27,F10.4)') "->" , "Max. Allow. Global CFL: " , 2.5_RP**real(self % maxLevel-1) +#if defined(MULTIPHASE) + write(STD_OUT,'(30X,A,A27,F10.4)') "->" , "Max. CFL Interface: " , maxCFLInterf +#endif + write(STD_OUT,'(30X,A,A27)') "->" , "Number of Element: " + do i = 1,self % nLevel + write(STD_OUT,'(30X,A,A18,I4,A4,I10)') "->" , "Level ", i," : ",self%ML_GlobalLevel(i) + end do + write(STD_OUT,'(30X,A,A27,I4)') "->" , "Number of RK3 Level: " , self % maxLevel + + end subroutine MultiLevel_RK_Update +!------------------------------------------------------------------------ +! Update +!------------------------------------------------------------------------ + subroutine MultiLevel_RK_CheckNeighbour(self, mesh, level, Level_eID, Level_eID_wN, counterOMP, counterOMPN) + implicit none + !-arguments----------------------------------------- + type(MultiLevel_RK) , intent(inout) :: self + type (HexMesh) :: mesh + integer , intent(in) :: level + integer , intent(inout) :: Level_eID(mesh % no_of_elements) + integer , intent(inout) :: Level_eID_wN(mesh % no_of_elements) + integer , intent(inout) :: counterOMP(self % nLevel, mesh % no_of_elements) + integer , intent(inout) :: counterOMPN(self % nLevel, mesh % no_of_elements) + + integer :: eID, i, j + integer :: ierr + integer, allocatable :: neighborID(:), neighborIDAll(:) + + allocate(neighborID(mesh % no_of_allElements), neighborIDAll(mesh % no_of_allElements)) + neighborID = 0 + neighborIDAll = 0 + +!$omp parallel shared(self,mesh,neighborID,neighborIDAll, counterOMP, counterOMPN, level, Level_eID, Level_eID_wN) default(private) +!$omp do schedule(runtime) + do eID =1, size(mesh % elements) + associate(element => mesh % elements(eID)) + ! Flag neighboring elements of elements with level equal to the level N that being assesed + if (element % MLevel .eq. level) then + do i =1, 6 + if ((element % Connection(i) % globID .gt. 0).and.(element % Connection(i) % globID .lt. mesh % no_of_allElements+1)) then + neighborID(element % Connection(i) % globID) = neighborID(element % Connection(i) % globID) +1 + end if + end do + end if + end associate + end do +!$omp end do +!$omp end parallel + + neighborIDAll = neighborID +! Combine information on neighboring elements with other MPI +#ifdef _HAS_MPI_ + if ( MPI_Process % doMPIAction ) then + ! Gather all the neigboring element of level 3 (globalID) + call mpi_allreduce(neighborID, neighborIDAll, mesh % no_of_allElements, MPI_INT, MPI_SUM, & + MPI_COMM_WORLD, ierr) + end if +#endif + deallocate(neighborID) +!$omp parallel shared(self,mesh,neighborIDAll, counterOMP, counterOMPN, Level_eID, Level_eID_wN, level) default(private) +!$omp do schedule(runtime) + ! Neighbouring elements of elements with level N will have at least level N-1 + do eID =1, SIZE(mesh % elements) + associate(element => mesh % elements(eID)) + if (element % MLevel.lt.(level)) then + if (neighborIDAll(element % globID).gt.0) then + element % MLevel = level-1 + counterOMP (:,eID) = 0 + counterOMP (level-1,eID) = 1 + Level_eID_wN(eID) = level + Level_eID(eID) = element % MLevel + counterOMPN (:,eID)= 0 + counterOMPN (level,eID) = 1 + end if + end if + end associate + end do +!$omp end do +!$omp end parallel + + end subroutine MultiLevel_RK_CheckNeighbour +!------------------------------------------------------------------------ +! Update +!------------------------------------------------------------------------ + subroutine MultiLevel_RK_Destruct(self) + implicit none + !-arguments----------------------------------------- + class(MultiLevel_RK), target , intent(inout) :: self + + if( allocated(self % ML_GlobalLevel) ) deallocate(self % ML_GlobalLevel) + if( allocated(self % ML_Level)) deallocate(self % ML_Level) + if( allocated(self % MLIter) ) deallocate(self % MLIter) + if( allocated(self % MLIter_eID) ) deallocate(self % MLIter_eID) + if( allocated(self % MLIter_fID) ) deallocate(self % MLIter_fID) + if( allocated(self % MLIter_fID_Interior)) deallocate(self % MLIter_fID_Interior) + if( allocated(self % MLIter_fID_Boundary) ) deallocate(self % MLIter_fID_Boundary) + if( allocated(self % MLIter_fID_MPI) ) deallocate(self % MLIter_fID_MPI) + if( allocated(self % MLIter_eID_Seq) ) deallocate(self % MLIter_eID_Seq) + if( allocated(self % MLIter_eIDN) ) deallocate(self % MLIter_eIDN) + if( allocated(self % MLIter_eIDN_Seq) ) deallocate(self % MLIter_eIDN_Seq) + + if( allocated(self % MLIter_eID_MPI) ) deallocate(self % MLIter_eID_MPI) + if( allocated(self % MLIter_fID_MPI) ) deallocate(self % MLIter_fID_MPI) + if( allocated(self % MLIter_eIDN_MPI) ) deallocate(self % MLIter_eIDN_MPI) + + end subroutine MultiLevel_RK_Destruct +! +!//////////////////////////////////////////////////////////////////////// +! subroutine HexMesh_DefineAcousticElements(self, observer, acoustic_sources, d_th, virtualZones) implicit none diff --git a/Solver/src/libs/physics/common/LESModels.f90 b/Solver/src/libs/physics/common/LESModels.f90 index 97ccc5f963..bf6b338ab5 100644 --- a/Solver/src/libs/physics/common/LESModels.f90 +++ b/Solver/src/libs/physics/common/LESModels.f90 @@ -577,7 +577,7 @@ pure function get_rho(Q, dimensionless_) result(rho) #elif defined (INCNS) rho = Q(INSRHO) #elif defined (MULTIPHASE) - rho = dimensionless_%rho(1) * Q(IMC) + dimensionless_%rho(2) * (1.0 - Q(IMC)) + rho = dimensionless_%rho(1) * max(min(Q(IMC),1.0_RP),0.0_RP) + dimensionless_%rho(2) * (1.0 - max(min(Q(IMC),1.0_RP),0.0_RP)) ! #else ! print *, "Error: rho computation not valid for physics " ! stop diff --git a/Solver/src/libs/physics/multiphase/Physics_MU.f90 b/Solver/src/libs/physics/multiphase/Physics_MU.f90 index 7b180aed85..d1282c45a7 100644 --- a/Solver/src/libs/physics/multiphase/Physics_MU.f90 +++ b/Solver/src/libs/physics/multiphase/Physics_MU.f90 @@ -144,35 +144,35 @@ END Module Physics_MU ! SUBROUTINE ComputeEigenvaluesForState( Q, eigen ) - USE SMConstants - USE PhysicsStorage_MU - use FluidData_MU, only: Thermodynamics + use SMConstants + use PhysicsStorage_MU + use VariableConversion_MU + use FluidData_MU IMPLICIT NONE ! ! --------- ! Arguments ! --------- ! - REAL(KIND=Rp), DIMENSION(NCONS) :: Q - REAL(KIND=Rp), DIMENSION(3) :: eigen + REAL(KIND=RP), DIMENSION(NCONS) :: Q + REAL(KIND=RP), DIMENSION(3) :: eigen ! ! --------------- ! Local Variables ! --------------- ! - REAL(KIND=Rp) :: u, v, w, p, a -print*, "Get eigenvalues!!" -errorMessage(STD_OUT) -error stop -! -! u = ABS( Q(IMSQRHOU)/Q(IMSQRHO) ) -! v = ABS( Q(IMSQRHOV)/Q(IMSQRHO) ) -! w = ABS( Q(IMSQRHOW)/Q(IMSQRHO) ) -! a = sqrt(u*u+v*v+w*w + 4.0_RP * thermodynamics % rho0c02/Q(IMSQRHO)) + REAL(KIND=RP) :: u, v, w, p, a, invSqrtRho, sqrtRho + + sqrtRho = sqrt(dimensionless % rho(2) + (dimensionless % rho(1)-dimensionless % rho(2))* (min(max(Q(1),0.0_RP),1.0_RP))) + invSqrtRho = 1.0_RP / sqrtRho + + u = ABS( Q(IMSQRHOU)*invSqrtRho ) + v = ABS( Q(IMSQRHOV)*invSqrtRho ) + w = ABS( Q(IMSQRHOW)*invSqrtRho ) + a = sqrt(thermodynamics % c02(2)+( thermodynamics % c02(1)-thermodynamics % c02(2))* (min(max(Q(1),0.0_RP),1.0_RP)))/ refValues % V -! eigen(1) = 0.5_RP * (u + a) -! eigen(2) = 0.5_RP * (v + a) -! eigen(3) = 0.5_RP * (w + a) - eigen = 0.0_RP + eigen(1) = (u + a) + eigen(2) = (v + a) + eigen(3) = (w + a) END SUBROUTINE ComputeEigenvaluesForState diff --git a/Solver/src/libs/physics/multiphase/VariableConversion_MU.f90 b/Solver/src/libs/physics/multiphase/VariableConversion_MU.f90 index 2044f56485..71bf251012 100644 --- a/Solver/src/libs/physics/multiphase/VariableConversion_MU.f90 +++ b/Solver/src/libs/physics/multiphase/VariableConversion_MU.f90 @@ -98,16 +98,20 @@ pure subroutine getVelocityGradients(Q,Q_x,Q_y,Q_z,dimensionless_,U_x,U_y,U_z) real(kind=RP) :: rho, invRho, invRho2, uDivRho(NDIM) !------------------------------------------------------------- - rho = dimensionless_ % rho(1)*Q(IMC) + dimensionless_ % rho(2)*(1-Q(IMC)) - invRho = 1._RP / rho - invRho2 = invRho * invRho + ! rho = dimensionless_ % rho(1)*Q(IMC) + dimensionless_ % rho(2)*(1-Q(IMC)) + ! invRho = 1._RP / rho + ! invRho2 = invRho * invRho - uDivRho = [Q(IMSQRHOU) , Q(IMSQRHOV) , Q(IMSQRHOW) ] * invRho2 + ! uDivRho = [Q(IMSQRHOU) , Q(IMSQRHOV) , Q(IMSQRHOW) ] * invRho2 - u_x = invRho * Q_x(IMSQRHOU:IMSQRHOW) - uDivRho * Q_x(IMC)*(dimensionless_ % rho(1) - dimensionless_ % rho(2)) - u_y = invRho * Q_y(IMSQRHOU:IMSQRHOW) - uDivRho * Q_y(IMC)*(dimensionless_ % rho(1) - dimensionless_ % rho(2)) - u_z = invRho * Q_z(IMSQRHOU:IMSQRHOW) - uDivRho * Q_z(IMC)*(dimensionless_ % rho(1) - dimensionless_ % rho(2)) - + ! u_x = invRho * Q_x(IMSQRHOU:IMSQRHOW) - uDivRho * Q_x(IMC)*(dimensionless_ % rho(1) - dimensionless_ % rho(2)) + ! u_y = invRho * Q_y(IMSQRHOU:IMSQRHOW) - uDivRho * Q_y(IMC)*(dimensionless_ % rho(1) - dimensionless_ % rho(2)) + ! u_z = invRho * Q_z(IMSQRHOU:IMSQRHOW) - uDivRho * Q_z(IMC)*(dimensionless_ % rho(1) - dimensionless_ % rho(2)) + + u_x = Q_x(IMSQRHOU:IMSQRHOW) + u_y = Q_y(IMSQRHOU:IMSQRHOW) + u_z = Q_z(IMSQRHOU:IMSQRHOW) + end subroutine getVelocityGradients ! diff --git a/Solver/src/libs/sources/ActuatorLine.f90 b/Solver/src/libs/sources/ActuatorLine.f90 index 89cfca9ca7..384cf2b7bb 100644 --- a/Solver/src/libs/sources/ActuatorLine.f90 +++ b/Solver/src/libs/sources/ActuatorLine.f90 @@ -671,7 +671,7 @@ end subroutine UpdateFarm ! !/////////////////////////////////////////////////////////////////////////////////////// ! - subroutine ForcesFarm(self, mesh, time) + subroutine ForcesFarm(self, mesh, time, Level) use PhysicsStorage use HexMeshClass use fluiddata @@ -680,12 +680,13 @@ subroutine ForcesFarm(self, mesh, time) class(Farm_t) , intent(inout) :: self type(HexMesh), intent(in) :: mesh real(kind=RP),intent(in) :: time + integer, intent(in), optional :: Level ! local vars real(kind=RP) :: Non_dimensional, t, interp integer :: ii,jj, kk integer :: i,j, k - integer :: eID, eIndex + integer :: eID, eIndex, locLevel real(kind=RP), dimension(NDIM) :: actuator_source real(kind=RP) :: local_angle real(kind=RP) :: local_velocity @@ -697,6 +698,12 @@ subroutine ForcesFarm(self, mesh, time) #endif if (.not. self % active) return + + if (present(Level)) then + locLevel = Level + else + locLevel = 1 + end if Non_dimensional = POW2(refValues % V) * refValues % rho / Lref t = time * Lref / refValues % V @@ -705,6 +712,7 @@ subroutine ForcesFarm(self, mesh, time) do eIndex = 1, size(elementsActuated) eID = elementsActuated(eIndex) + if (mesh % elements(eID) % MLevel .eq. locLevel) then kk = turbineOfElement(eIndex) do k = 0, mesh % elements(eID) % Nxyz(3) ; do j = 0, mesh % elements(eID) % Nxyz(2) ; do i = 0, mesh % elements(eID) % Nxyz(1) @@ -750,13 +758,15 @@ subroutine ForcesFarm(self, mesh, time) mesh % elements(eID) % storage % S_NS(IMSQRHOW,i,j,k) = actuator_source(3)*invSqrtRho #endif end do ; end do ; end do + end if end do else ! no projection !$omp do schedule(runtime) private(i,j,k,ii,jj,kk,actuator_source,eID,interp) do eIndex = 1, size(elementsActuated) - eID = elementsActuated(eIndex) + eID = elementsActuated(eIndex) + if (mesh % elements(eID) % MLevel .eq. locLevel) then ! only one turbine is associated for one element kk = turbineOfElement(eIndex) @@ -792,6 +802,7 @@ subroutine ForcesFarm(self, mesh, time) mesh % elements(eID) % storage % S_NS(IMSQRHOW,i,j,k) = actuator_source(3)*invSqrtRho #endif end do ; end do ; end do + end if end do !$omp end do diff --git a/Solver/src/libs/timeintegrator/AnisFASMultigridClass.f90 b/Solver/src/libs/timeintegrator/AnisFASMultigridClass.f90 index 87398bbd8d..3360c4854c 100644 --- a/Solver/src/libs/timeintegrator/AnisFASMultigridClass.f90 +++ b/Solver/src/libs/timeintegrator/AnisFASMultigridClass.f90 @@ -605,6 +605,7 @@ recursive subroutine FASVCycle(this,t,lvl,Dir,TE, ComputeTimeDerivative) if (SmoothFine .AND. lvl > 1) then call MGRestrictToChild(this,Dir,lvl,t,TE, ComputeTimeDerivative) Childp_sem => this % Child % MGStorage(Dir) % p_sem + call ComputeTimeDerivative(Childp_sem % mesh, Childp_sem % particles, t, CTD_IGNORE_MODE) if (MAXVAL(ComputeMaxResiduals(p_sem % mesh)) < SmoothFineFrac * MAXVAL(ComputeMaxResiduals & diff --git a/Solver/src/libs/timeintegrator/ExplicitMethods.f90 b/Solver/src/libs/timeintegrator/ExplicitMethods.f90 index 8654a704bf..55036fb9f5 100644 --- a/Solver/src/libs/timeintegrator/ExplicitMethods.f90 +++ b/Solver/src/libs/timeintegrator/ExplicitMethods.f90 @@ -17,9 +17,9 @@ MODULE ExplicitMethods IMPLICIT NONE private - public EULER_NAME, RK3_NAME, RK5_NAME, OPTRK_NAME, SSPRK33_NAME, SSPRK43_NAME, EULER_RK3_NAME, LSERK14_4_NAME, MIXED_RK_NAME - public EULER_KEY, RK3_KEY, RK5_KEY, OPTRK_KEY, SSPRK33_KEY, SSPRK43_KEY, EULER_RK3_KEY, LSERK14_4_KEY, MIXED_RK_KEY - public TakeExplicitEulerStep, TakeRK3Step, TakeRK5Step, TakeRKOptStep + public EULER_NAME, RK3_NAME, RK5_NAME, OPTRK_NAME, SSPRK33_NAME, SSPRK43_NAME, EULER_RK3_NAME, LSERK14_4_NAME, MIXED_RK_NAME, ML_RK3_NAME + public EULER_KEY, RK3_KEY, RK5_KEY, OPTRK_KEY, SSPRK33_KEY, SSPRK43_KEY, EULER_RK3_KEY, LSERK14_4_KEY, MIXED_RK_KEY, ML_RK3_KEY + public TakeExplicitEulerStep, TakeRK3Step, TakeRK5Step, TakeRKOptStep, TakeMLRK3Step public TakeSSPRK33Step, TakeSSPRK43Step, TakeEulerRK3Step, TakeLSERK14_4Step, TakeMixedRKStep public Enable_CTD_AFTER_STEPS, Enable_limiter, CTD_AFTER_STEPS, LIMITED, LIMITER_MIN @@ -42,6 +42,7 @@ MODULE ExplicitMethods character(len=*), parameter :: EULER_RK3_NAME = "euler rk3" character(len=*), parameter :: LSERK14_4_NAME = "lserk14-4" character(len=*), parameter :: MIXED_RK_NAME = "mixed rk" + character(len=*), parameter :: ML_RK3_NAME = "multi level rk3" integer, parameter :: EULER_KEY = 1 @@ -52,7 +53,8 @@ MODULE ExplicitMethods integer, parameter :: SSPRK43_KEY = 6 integer, parameter :: EULER_RK3_KEY = 7 integer, parameter :: LSERK14_4_KEY = 8 - integer, parameter :: MIXED_RK_KEY = 9 + integer, parameter :: MIXED_RK_KEY = 9 + integer, parameter :: ML_RK3_KEY = 10 !======== CONTAINS @@ -1450,6 +1452,180 @@ SUBROUTINE TakeRKOptStep( mesh, particles, t, deltaT, ComputeTimeDerivative , N_ end subroutine TakeRKOptStep ! !/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +! +! ------------------------------ +! Routine for Multi Level RK3 +! ------------------------------ + SUBROUTINE TakeMLRK3Step( mesh, particles, t, deltaT, ComputeTimeDerivative, dt_vec, dts, global_dt, iter) +! +! ---------------------------------- +! Williamson's 3rd order Runge-Kutta +! ---------------------------------- +! + IMPLICIT NONE +! +! ----------------- +! Input parameters: +! ----------------- +! + type(HexMesh) :: mesh +#ifdef FLOW + type(Particles_t) :: particles +#else + logical :: particles +#endif + REAL(KIND=RP) :: t, deltaT + real(kind=RP), allocatable, dimension(:), intent(in), optional :: dt_vec + procedure(ComputeTimeDerivative_f) :: ComputeTimeDerivative + logical, intent(in), optional :: dts + real(kind=RP), intent(in), optional :: global_dt + integer, intent(in), optional :: iter +! +! --------------- +! Local variables +! --------------- +! + REAL(KIND=RP), DIMENSION(3) :: a = (/0.0_RP , -5.0_RP /9.0_RP , -153.0_RP/128.0_RP/) + REAL(KIND=RP), DIMENSION(4) :: b = (/0.0_RP , 1.0_RP /3.0_RP , 3.0_RP/4.0_RP , 1.0_RP /) + REAL(KIND=RP), DIMENSION(3) :: c = (/1.0_RP/3.0_RP, 15.0_RP/16.0_RP, 8.0_RP/15.0_RP /) + REAL(KIND=RP), DIMENSION(3) :: d + + + + INTEGER :: i, j, id, lID, locLevel, k2, k3, k1, nLevel + INTEGER, ALLOCATABLE :: k(:) + REAL(KIND=RP) :: deltaStep(3), corrector, deltaTLF + REAL(KIND=RP), ALLOCATABLE :: cL(:) , tk(:), deltaTL(:) ! + + nLevel = mesh % MLRK % maxLevel + allocate(k(nLevel), cL(nLevel), tk(nLevel), deltaTL(nLevel)) + k(:) = 1 + + d(1)=1.0_RP/3.0_RP + d(2)=5.0_RP/12.0_RP + d(3)=1.0_RP/4.0_RP + + deltaStep(1) = b(2) + deltaStep(2) = b(3)-b(2) + deltaStep(3) = 1.0_RP-b(3) + + deltaTL(:) = deltaT + tk(:) = t + + associate ( MLIter_eID => mesh % MLRK % MLIter_eID, MLIter => mesh % MLRK % MLIter ) + + k(:) = 3 + do k1 = 1,3 + tk(:) = t + b(k1)*deltaT + call ComputeTimeDerivative( mesh, particles, tk(1), CTD_IGNORE_MODE) + locLevel = 1 +! ------------------------------------------------------------------------------------------------------------------------------- +! LEVEL 2-LEVEL N-1 +! ------------------------------------------------------------------------------------------------------------------------------- + do k2 = 1, max(3**(nLevel-2),1) + k(nLevel-1) = k(nLevel-1)+1 + do i=nLevel-1,1,-1 + if (k(i).gt.3) then + k(i)=1 + if (i.ne.1) then + k(i-1)=k(i-1) +1 + end if + else + exit + end if + end do + + do i=2, nLevel + deltaTL(i) = deltaTL(i-1) * deltaStep(k(i-1)) + end do +! ------------------------------------------------------------------------------------------------------------------------------- +! LEVEL N +! ------------------------------------------------------------------------------------------------------------------------------- + do k3 = 1,3 + k(nLevel) = k3 +! Update G_NS/G_CH from QDot - Depend on level etc +!$omp parallel do schedule(runtime) private(id, corrector) + do i = 1, MLIter(locLevel,1) + id = MLIter_eID(i) + + if (locLevel.eq.nLevel) then + corrector = a(k(nLevel)) + elseif(i.gt.MLIter(min(locLevel+1,nLevel),1)) then + corrector = a(k(locLevel)) + else + corrector = 0.0_RP + end if + + associate(storage => mesh % elements(id) % storage) +#ifdef FLOW + storage % G_NS = corrector * storage % G_NS + storage % QDot +#endif +#if (defined(CAHNHILLIARD)) && (!defined(FLOW)) + storage % G_CH = corrector * storage % G_CH + storage % cDot +#endif + end associate + end do +!$omp end parallel do + +! Marching in time, all elements + corrector = 1.0_RP + do i = 1, nLevel + corrector = corrector * d(k(i)) + end do + do i = 1, nLevel + cL(i) = c(k(i)) * corrector/d(k(i)) + end do + +!$omp parallel do schedule(runtime) private(id, corrector) + do i = 1, MLIter(1,1) + do j = nLevel, 1, -1 + corrector = cL(j) + if (i.le.MLIter(j,1)) exit + end do + + id = MLIter_eID(i) + + associate(storage => mesh % elements(id) % storage) +#ifdef FLOW + storage % Q = storage % Q + corrector * deltaT * storage % G_NS +#endif + +#if (defined(CAHNHILLIARD)) && (!defined(FLOW)) + storage % c = storage % c + corrector * deltaT * storage % G_CH +#endif + end associate + end do +!$omp end parallel do + if (all(k(2:nLevel) == 3)) then + exit + else + do i=nLevel,2,-1 + locLevel =i + if (k(i).ne.3) exit + end do + end if + + tk(locLevel)=tk(locLevel-1)+b(k(locLevel)+1) * deltaTL(locLevel) + + call ComputeTimeDerivative( mesh, particles, tk(locLevel), CTD_IGNORE_MODE, Level = locLevel) ! Update Qdot + + end do + end do + + end do ! k1 + + end associate + + call ComputeTimeDerivative( mesh, particles, t+deltaT, CTD_IGNORE_MODE, Level = 1) ! Necessary for residual computation +! +! To obtain the updated residuals + if ( CTD_AFTER_STEPS ) CALL ComputeTimeDerivative( mesh, particles, t+deltaT, CTD_IGNORE_MODE) + + call checkForNan(mesh, t) + + END SUBROUTINE TakeMLRK3Step +! +!/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ! subroutine Enable_limiter(integrator, minimum) ! diff --git a/Solver/src/libs/timeintegrator/TimeIntegrator.f90 b/Solver/src/libs/timeintegrator/TimeIntegrator.f90 index 89b7a7e720..87098441fa 100644 --- a/Solver/src/libs/timeintegrator/TimeIntegrator.f90 +++ b/Solver/src/libs/timeintegrator/TimeIntegrator.f90 @@ -53,6 +53,9 @@ MODULE TimeIntegratorClass type(MultiTauEstim_t) :: TauEstimator character(len=LINE_LENGTH) :: integration_method integer :: RKStep_key + REAL(KIND=RP) :: ML_CFL_CutOff + INTEGER :: ML_ReLevel_Iteration, ML_Counter, ML_nLevel + logical :: ML_ReLevel PROCEDURE(TimeStep_FCN), NOPASS , POINTER :: RKStep ! ! ======== @@ -147,6 +150,8 @@ SUBROUTINE constructTimeIntegrator(self,controlVariables, sem, initial_iter, ini self % tolerance = controlVariables % doublePrecisionValueForKey("convergence tolerance") self % RKStep => TakeRK3Step + +! call sem % mesh % MLRK % construct(sem % mesh, 1) ! default 1 level if (controlVariables % containsKey(TIME_INTEGRATION_KEY)) then self % integration_method = controlVariables % stringValueForKey(TIME_INTEGRATION_KEY, LINE_LENGTH) else @@ -191,6 +196,30 @@ SUBROUTINE constructTimeIntegrator(self,controlVariables, sem, initial_iter, ini case(MIXED_RK_NAME) self % RKStep => TakeMixedRKStep self % RKStep_key = MIXED_RK_KEY + case(ML_RK3_NAME) + if ( controlVariables % ContainsKey("cfl cut-off") ) then + self % ML_CFL_CutOff = controlVariables % doublePrecisionValueForKey("cfl cut-off") + self % ML_CFL_CutOff = min(max(self % ML_CFL_CutOff,0.0001_RP),10.0_RP) + else + self % ML_CFL_CutOff = 0.5_RP + end if + if ( controlVariables % ContainsKey("relevel iteration") ) then + self % ML_ReLevel_Iteration = controlVariables % integerValueForKey ("relevel iteration") + else + self % ML_ReLevel_Iteration = 1000000000 + end if + if ( controlVariables % ContainsKey("number of level") ) then + self % ML_nLevel = controlVariables % integerValueForKey ("number of level") + else + self % ML_nLevel = 3 + end if + self % ML_ReLevel = .true. + + self % RKStep => TakeMLRK3Step + self % RKStep_key = ML_RK3_KEY + self % ML_Counter = 0 + + call sem % mesh % MLRK % construct(sem % mesh, self % ML_nLevel) ! construct nlevel case default print*, "Explicit time integration method not implemented" @@ -290,6 +319,13 @@ SUBROUTINE constructTimeIntegrator(self,controlVariables, sem, initial_iter, ini write(STD_OUT,'(A)') "Euler-RK3" case (MIXED_RK_KEY) write(STD_OUT,'(A)') "Mixed rk" + case (ML_RK3_KEY) + write(STD_OUT,'(A)') "Multi-Level RK3" + write(STD_OUT,'(35X,A,A23,I14)') "->" , "Number of Level: ", self % ML_nLevel + write(STD_OUT,'(35X,A,A23,F7.4)') "->" , "CFL Cut-Off: ", self % ML_CFL_CutOff + write(STD_OUT,'(35X,A,A23,I14)') "->" , "Update Iteration: ", self % ML_ReLevel_Iteration + write(STD_OUT,'(35X,A,A23,A)') "->" , "Cut-Off Level 1: ","CFL Cut-Off" + write(STD_OUT,'(35X,A,A23,A)') "->" , "Cut-Off Level N: ","CFL Cut-Off x 2.5^(N-1) " end select write(STD_OUT,'(30X,A,A28)',advance='no') "->" , "Stage limiter: " @@ -423,6 +459,7 @@ subroutine IntegrateInTime( self, sem, controlVariables, monitors, ComputeTimeDe use FASMultigridClass use AnisFASMultigridClass use RosenbrockTimeIntegrator + use ExplicitMethods use StopwatchClass use FluidData use mainKeywordsModule @@ -463,8 +500,10 @@ subroutine IntegrateInTime( self, sem, controlVariables, monitors, ComputeTimeDe REAL(KIND=RP) :: t REAL(KIND=RP) :: maxResidual(NCONS) REAL(KIND=RP) :: dt + REAL(KIND=RP) :: globalMax, globalMin, maxCFLInterf integer :: k integer :: eID + logical :: updatelevel CHARACTER(len=LINE_LENGTH) :: SolutionFileName ! Time-step solvers: type(FASMultigrid_t) :: FASSolver @@ -666,6 +705,16 @@ subroutine IntegrateInTime( self, sem, controlVariables, monitors, ComputeTimeDe #if defined(NAVIERSTOKES) if( sem% mesh% IBM% active ) call sem% mesh% IBM% SemiImplicitCorrection( sem% mesh% elements, t, dt ) #endif + if (self % RKStep_key .eq. ML_RK3_KEY) then + self % ML_Counter = self % ML_Counter + 1 + if ((self % ML_Counter .eq. self % ML_ReLevel_Iteration).or.(self % ML_ReLevel)) THEN + CALL DetermineCFL(sem, self % dt, globalMax, globalMin, maxCFLInterf) + call sem % mesh % MLRK % construct(sem % mesh, self % ML_nLevel) ! reconstruct nLevel + CALL sem % mesh % MLRK % update (sem % mesh, self % ML_CFL_CutOff, globalMax, globalMin, maxCFLInterf) + self % ML_ReLevel = .false. + self % ML_Counter = 0 + end if + end if CALL self % RKStep ( sem % mesh, sem % particles, t, dt, ComputeTimeDerivative, iter=k+1) #if defined(NAVIERSTOKES) if( sem% mesh% IBM% active ) call sem% mesh% IBM% SemiImplicitCorrection( sem% mesh% elements, t, dt ) diff --git a/Solver/src/libs/timeintegrator/pAdaptationClass.f90 b/Solver/src/libs/timeintegrator/pAdaptationClass.f90 index f18e32a530..746803c17a 100644 --- a/Solver/src/libs/timeintegrator/pAdaptationClass.f90 +++ b/Solver/src/libs/timeintegrator/pAdaptationClass.f90 @@ -21,6 +21,10 @@ module pAdaptationClass use HexMeshClass , only: HexMesh use ElementConnectivityDefinitions , only: neighborFaces use ReadMeshFile , only: NumOfElemsFromMeshFile + use Headers +#ifdef _HAS_MPI_ + use mpi +#endif implicit none #include "Includes.h" @@ -59,10 +63,24 @@ module pAdaptationClass real(kind=RP) :: y_span(2) real(kind=RP) :: z_span(2) integer :: mode + integer :: polynomial(3) contains procedure :: initialize => OverEnriching_Initialize end type overenriching_t + !----------------------------------------------------------------- + ! Type for storing the adaptation based on range value of variable + !----------------------------------------------------------------- + type :: pAdaptVariable_t + integer :: ID + integer :: variable + real(kind=RP) :: maxValue + real(kind=RP) :: minValue + integer :: polynomial(3) + + contains + procedure :: initialize => pAdaptVariable_Initialize + end type pAdaptVariable_t !------------------------------------------------------- ! Main base type for performing a p-adaptation procedure @@ -84,6 +102,7 @@ module pAdaptationClass real(kind=RP) :: nextAdaptationTime = huge(1._RP) character(len=BC_STRING_LENGTH), allocatable :: conformingBoundaries(:) ! Stores the conforming boundaries (the length depends on FTDictionaryClass) type(overenriching_t) , allocatable :: overenriching(:) + type(pAdaptVariable_t) , allocatable :: adaptVariable(:) contains procedure(constructInterface), deferred :: pAdaptation_Construct @@ -245,7 +264,7 @@ subroutine GetMeshPolynomialOrders(controlVariables,Nx,Ny,Nz,Nmax) deallocate (Nx_r , Ny_r , Nz_r ) end if - Nmax = max(Nmax,maxval(Nx),maxval(Ny),maxval(Nz)) + Nmax = 12 ! not a good practice but necessary - old: max(Nmax,maxval(Nx),maxval(Ny),maxval(Nz)) ! ! ***************************************************************************** @@ -274,6 +293,7 @@ end subroutine GetMeshPolynomialOrders !/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ! subroutine OverEnriching_Initialize(this,oID) + use MPI_Process_Info implicit none !---------------------------------- class(overenriching_t) :: this @@ -284,7 +304,7 @@ subroutine OverEnriching_Initialize(this,oID) character(LINE_LENGTH) :: x_span character(LINE_LENGTH) :: y_span character(LINE_LENGTH) :: z_span - character(LINE_LENGTH) :: mode + character(LINE_LENGTH) :: mode, poly integer, allocatable :: order !---------------------------------- @@ -304,6 +324,7 @@ subroutine OverEnriching_Initialize(this,oID) call readValueInRegion ( trim ( paramFile ) , "y span" , y_span , in_label , "# end" ) call readValueInRegion ( trim ( paramFile ) , "z span" , z_span , in_label , "# end" ) call readValueInRegion ( trim ( paramFile ) , "mode" , mode , in_label , "# end" ) + call readValueInRegion ( trim ( paramFile ) , "polynomial " , poly , in_label , "# end" ) if (allocated(order)) then this % order = order @@ -323,10 +344,30 @@ subroutine OverEnriching_Initialize(this,oID) else this % mode = 1 end if + + if ( poly /= "" ) then + this % polynomial = getIntArrayFromString(poly) + else + this % polynomial(1) = this % order + this % polynomial(2) = this % order + this % polynomial(3) = this % order + end if this % x_span = getRealArrayFromString(x_span) this % y_span = getRealArrayFromString(y_span) this % z_span = getRealArrayFromString(z_span) + + if ( .not. MPI_Process % isRoot ) return +! +! Write Information +! ----------------------------------------------- + write(STD_OUT,'(/)') + call SubSection_Header("Initialize Adaptation based on Overenriching Box") + write(STD_OUT,'(30X,A,A27,A20)') "->" , "ID: " , this % ID + write(STD_OUT,'(30X,A,A27,A20)') "->" , "x span: " , x_span + write(STD_OUT,'(30X,A,A27,A20)') "->" , "y span: " , y_span + write(STD_OUT,'(30X,A,A27,A20)') "->" , "z span: " , z_span + write(STD_OUT,'(30X,A,A27,A14)') "->" , "Polynomials: " , poly deallocate(order) @@ -441,9 +482,9 @@ subroutine OverEnrichRegions(overenriching,mesh,NNew,Nmax,Nmin) NNew(2,eID) = max(min(NNew(2,eID) + region % order, Nmax(2)), Nmin(2)) NNew(3,eID) = max(min(NNew(3,eID) + region % order, Nmax(3)), Nmin(3)) else if (region % mode == 2) then !Set mode - NNew(1,eID) = max(min(region % order, Nmax(1)), Nmin(1)) - NNew(2,eID) = max(min(region % order, Nmax(2)), Nmin(2)) - NNew(3,eID) = max(min(region % order, Nmax(3)), Nmin(3)) + NNew(1,eID) = max(min(region % polynomial(1), Nmax(1)), Nmin(1)) + NNew(2,eID) = max(min(region % polynomial(2), Nmax(2)), Nmin(2)) + NNew(3,eID) = max(min(region % polynomial(3), Nmax(3)), Nmin(3)) end if enriched(eID) = .TRUE. @@ -461,6 +502,261 @@ end subroutine OverEnrichRegions ! !/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ! +! ROUTINES FOR p Adaptation based on variable +! +!/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +! + subroutine pAdaptVariable_Initialize(this,oID) + use MPI_Process_Info + implicit none + !---------------------------------- + class(pAdaptVariable_t) :: this + integer, intent(in) :: oID + !---------------------------------- + character(LINE_LENGTH) :: paramFile + character(LINE_LENGTH) :: in_label + character(LINE_LENGTH) :: rangeValue + character(LINE_LENGTH) :: poly + character(LINE_LENGTH) :: variable + real(kind=RP) :: minmaxValue(2) + !---------------------------------- + + call get_command_argument(1, paramFile) +! +! Get adaptation variable ID +! --------------------------- + this % ID = oID +! +! Search for the parameters in the case file +! ------------------------------------------ + write(in_label , '(A,I0)') "#define adapt variable " , this % ID + + call get_command_argument(1, paramFile) ! + call readValueInRegion ( trim ( paramFile ) , "variable" , variable , in_label , "# end" ) + call readValueInRegion ( trim ( paramFile ) , "value range" , rangeValue , in_label , "# end" ) + call readValueInRegion ( trim ( paramFile ) , "polynomial " , poly , in_label , "# end" ) + + + if ( variable /= "" ) then + select case ( trim (variable) ) + case ("rho") + this % variable = 1 + case ("q2") + this % variable = 2 + case default + this % variable = 1 + end select + else + this % variable = 1 + end if + + minmaxValue = getRealArrayFromString(rangeValue) + this % polynomial = getIntArrayFromString(poly) + + this % maxValue = maxval(minmaxValue) + this % minValue = minval(minmaxValue) + + if ( .not. MPI_Process % isRoot ) return +! +! Write Information +! ----------------------------------------------- + write(STD_OUT,'(/)') + call SubSection_Header("Initialize Adaptation based on Variable") + write(STD_OUT,'(30X,A,A27,A20)') "->" , "ID: " , this % ID + write(STD_OUT,'(30X,A,A27,A10)') "->" , "Variable: " , variable + write(STD_OUT,'(30X,A,A27,F10.4)') "->" , "Max. value: " , this % maxValue + write(STD_OUT,'(30X,A,A27,F10.4)') "->" , "Min. value: " , this % minValue + write(STD_OUT,'(30X,A,A27,A14)') "->" , "Polynomials: " , poly + + end subroutine pAdaptVariable_Initialize +! +!/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +! + subroutine getNoOfpAdaptVariables(no_of_regions) + implicit none + integer, intent(out) :: no_of_regions +! +! --------------- +! Local variables +! --------------- +! + character(len=LINE_LENGTH) :: case_name, line + integer :: fID + integer :: io +! +! Initialize +! ---------- + no_of_regions = 0 +! +! Get case file name +! ------------------ + call get_command_argument(1, case_name) + +! +! Open case file +! -------------- + open ( newunit = fID , file = case_name , status = "old" , action = "read" ) + +! +! Read the whole file to find overenriching regions +! ------------------------------------------------- +readloop:do + read ( fID , '(A)' , iostat = io ) line + + if ( io .lt. 0 ) then +! +! End of file +! ----------- + line = "" + exit readloop + + elseif ( io .gt. 0 ) then +! +! Error +! ----- + errorMessage(STD_OUT) + error stop "error stopped." + + else +! +! Succeeded +! --------- + line = getSquashedLine( line ) + + if ( index ( line , '#defineadaptvariable' ) .gt. 0 ) then + no_of_regions = no_of_regions + 1 + end if + + end if + + end do readloop +! +! Close case file +! --------------- + close(fID) + + end subroutine getNoOfpAdaptVariables +! +!/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +! + subroutine pAdaptVariableRange(adaptVar,mesh,NNew) + implicit none + !--------------------------------------- + type(pAdaptVariable_t), allocatable :: adaptVar(:) + type(HexMesh), intent(in) :: mesh + integer :: NNew(:,:) + !--------------------------------------- + integer :: eID, oID ! Element counter + real(kind=RP) :: maxQ, minQ + + !--------------------------------------- + + if (.not. allocated(adaptVar) ) return + + do oID = 1, size(adaptVar) +!$omp parallel shared(adaptVar,mesh,NNew, oID) default(private) +!$omp do schedule(runtime) + do eID=1, mesh % no_of_elements + + maxQ = maxval(mesh % elements(eID) % storage % Q(adaptVar(oID) % variable,:,:,:)) + minQ = minval(mesh % elements(eID) % storage % Q(adaptVar(oID) % variable,:,:,:)) + + if (((maxQ.lt.adaptVar(oID)%maxValue).and.(maxQ.gt.adaptVar(oID)%minValue)).or.((minQ.lt.adaptVar(oID)%maxValue).and.(minQ.gt.adaptVar(oID)%minValue))) then + NNew (1,eID) = adaptVar(oID) % polynomial(1) + NNew (2,eID) = adaptVar(oID) % polynomial(2) + NNew (3,eID) = adaptVar(oID) % polynomial(3) + else if ((maxQ .gt. adaptVar(oID)%maxValue) .and. (minQ.lt.adaptVar(oID)%minValue)) then + NNew (1,eID) = adaptVar(oID) % polynomial(1) + NNew (2,eID) = adaptVar(oID) % polynomial(2) + NNew (3,eID) = adaptVar(oID) % polynomial(3) + end if + end do +!$omp end do +!$omp end parallel + end do + + end subroutine pAdaptVariableRange +! +!/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +! This subroutine ensure no rapid jump on polynomial order between elements +!/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// + subroutine pAdapt_CheckNeighbour(mesh, currentP, pJump, NNew) + use MPI_Process_Info + implicit none + !--------------------------------------- + type(HexMesh) :: mesh + integer , intent(in) :: currentP + integer , intent(in) :: pJump + integer , intent(inout) :: NNew(:,:) + !--------------------------------------- + integer :: eID, i, j + integer :: ierr + integer, allocatable :: neighborID(:,:), neighborIDAll(:,:) + + allocate(neighborID(3, mesh % no_of_allElements), neighborIDAll(3,mesh % no_of_allElements)) + neighborID = 0 + neighborIDAll = 0 + + !--------------------------------------- + +!$omp parallel shared(mesh,neighborID,neighborIDAll, NNew, currentP) default(private) +!$omp do schedule(runtime) + do eID = 1, size(mesh % elements) + associate(element => mesh % elements(eID)) + do i = 1, 3 + if (NNew(i,eID).eq.currentP) then + do j = 1,6 + if ((element % Connection(j) % globID .gt. 0).and.(element % Connection(j) % globID .lt. mesh % no_of_allElements+1)) then + neighborID(i,element % Connection(j) % globID) = neighborID(i,element % Connection(j) % globID) +1 + end if + end do + end if + end do + end associate + end do +!$omp end do +!$omp end parallel + neighborIDAll = neighborID +! Combine information on neighboring elements with other MPI +#ifdef _HAS_MPI_ + if ( MPI_Process % doMPIAction ) then + ! Gather all the neigboring element (globalID) + call mpi_allreduce(neighborID(1,:), neighborIDAll(1,:), mesh % no_of_allElements, MPI_INT, MPI_SUM, & + MPI_COMM_WORLD, ierr) + call mpi_barrier(MPI_COMM_WORLD, ierr) + call mpi_allreduce(neighborID(2,:), neighborIDAll(2,:), mesh % no_of_allElements, MPI_INT, MPI_SUM, & + MPI_COMM_WORLD, ierr) + call mpi_barrier(MPI_COMM_WORLD, ierr) + call mpi_allreduce(neighborID(3,:), neighborIDAll(3,:), mesh % no_of_allElements, MPI_INT, MPI_SUM, & + MPI_COMM_WORLD, ierr) + call mpi_barrier(MPI_COMM_WORLD, ierr) + end if +#endif + + deallocate(neighborID) + + +!$omp parallel shared(mesh, neighborIDAll, NNew, currentP, pJump) default(private) +!$omp do schedule(runtime) + ! Neighbouring elements of elements with polynomial currentP will have at least order currentP-1 + do eID =1, SIZE(mesh % elements) + associate(element => mesh % elements(eID)) + do i =1,3 + if ((NNew(i,eID).lt.(currentP-pJump))) then + if (neighborIDAll(i,element % globID).gt.0) then + NNew(i,eID) = currentP-pJump + end if + end if + end do + end associate + end do +!$omp end do +!$omp end parallel + + end subroutine pAdapt_CheckNeighbour +! +!/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// +! ! ROUTINES FOR ADAPTATION ! !/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// diff --git a/Solver/src/libs/timeintegrator/pAdaptationClassRL.f90 b/Solver/src/libs/timeintegrator/pAdaptationClassRL.f90 index c68ff98e7d..244a8b416f 100644 --- a/Solver/src/libs/timeintegrator/pAdaptationClassRL.f90 +++ b/Solver/src/libs/timeintegrator/pAdaptationClassRL.f90 @@ -52,6 +52,7 @@ module pAdaptationClassRL logical :: error_estimation = .false. logical :: avg_error_type = .true. !True for average error, false for max error integer :: error_variable !1:u, 2:v, 3:w, 4:rho*u, 5:rho*v, 6:rho*w, 7:p (only for Navier-Stokes), 8:rho + integer :: pJump logical :: acoustics = .false. real(kind=RP) :: acoustic_tol = 1e-4_RP integer :: acoustic_variable !7:p, 8:rho @@ -95,7 +96,7 @@ subroutine pAdaptation_Construct(this, controlVariables, t0, mesh) character(LINE_LENGTH) :: in_label character(LINE_LENGTH) :: agentFile character(20*BC_STRING_LENGTH) :: confBoundaries, R_acoustic_sources - character(LINE_LENGTH) :: R_Nmax, R_Nmin, R_OrderAcrossFaces, replacedValue, R_mode, R_interval, cwd, R_ErrorType, R_ErrorVariable, R_observer, R_acoustic_variable + character(LINE_LENGTH) :: R_Nmax, R_Nmin, R_OrderAcrossFaces, replacedValue, R_mode, R_interval, R_Jump, cwd, R_ErrorType, R_ErrorVariable, R_observer, R_acoustic_variable logical , allocatable :: R_increasing, reorganize_z, R_restart, R_ErrorEstimation, R_acoustics real(kind=RP), allocatable :: R_tolerance, R_threshold, R_acoustic_tol, R_acoustic_distance ! Extra vars @@ -132,6 +133,7 @@ subroutine pAdaptation_Construct(this, controlVariables, t0, mesh) call readValueInRegion ( trim ( paramFile ) , "order across faces" , R_OrderAcrossFaces , in_label , "# end" ) call readValueInRegion ( trim ( paramFile ) , "mode" , R_mode , in_label , "# end" ) call readValueInRegion ( trim ( paramFile ) , "interval" , R_interval , in_label , "# end" ) + call readValueInRegion ( trim ( paramFile ) , "max polynomial diff" , R_Jump , in_label , "# end" ) call readValueInRegion ( trim ( paramFile ) , "restart files" , R_restart , in_label , "# end" ) call readValueInRegion ( trim ( paramFile ) , "agent file" , agentFile , in_label , "# end" ) call readValueInRegion ( trim ( paramFile ) , "threshold" , R_threshold , in_label , "# end" ) @@ -292,6 +294,14 @@ subroutine pAdaptation_Construct(this, controlVariables, t0, mesh) if ( allocated(R_restart) ) then this % restartFiles = R_restart end if + +! Maximum polynomial difference between neighbour elements +! -------------------------------------------------------- + if ( R_Jump /= "" ) then + this % pJump = GetIntValue(R_Jump) + else + this % pJump = 10 + end if ! Acoustics ! --------------- @@ -373,6 +383,19 @@ subroutine pAdaptation_Construct(this, controlVariables, t0, mesh) call this % overenriching(i) % initialize (i) end do end if + +! Adaptation based on variable value +! ********************************** + + call getNoOfpAdaptVariables(no_of_overen_boxes) + + if (no_of_overen_boxes > 0) then + allocate ( this % adaptVariable(no_of_overen_boxes) ) + + do i = 1, no_of_overen_boxes + call this % adaptVariable(i) % initialize (i) + end do + end if ! Policy definition for the RL agent ! ********************************** @@ -441,6 +464,7 @@ subroutine pAdaptation_pAdapt(this, sem, itera, t, computeTimeDerivative, Comput character(len=LINE_LENGTH) :: AdaptedMeshFile logical :: last integer :: Ndir = 3, Ndir_acoustics = 4 + integer :: maxPGlob, minPGlob, maxP, minP ! integer :: pressure_var = 7, rho_var = 8 !-mpi-variables------------------------- integer :: ierr @@ -565,6 +589,34 @@ subroutine pAdaptation_pAdapt(this, sem, itera, t, computeTimeDerivative, Comput ! ---------------------------- ! call OverEnrichRegions(this % overenriching, sem % mesh, NNew, this % NxyzMax, NMIN) +! +! ---------------------------- +! Adaptation based on variable +! ---------------------------- +! + call pAdaptVariableRange(this % adaptVariable, sem % mesh, NNew) +! +! -------------------------------------------------------------------------- +! Restrict polynomial order jump between elements to be pJump (Default is 1) +! -------------------------------------------------------------------------- +! + maxP=maxval(NNew) + minP=minval(NNew) + maxPGlob = maxP + minPGlob = minP + +#ifdef _HAS_MPI_ + if ( MPI_Process % doMPIAction ) then + call mpi_allreduce(maxP, maxPGlob, 1, MPI_INT, MPI_MAX, & + MPI_COMM_WORLD, ierr) + call mpi_allreduce(minP, minPGlob, 1, MPI_INT, MPI_MIN, & + MPI_COMM_WORLD, ierr) + end if +#endif + + do i=maxPGlob,minPGlob+2,-1 + call pAdapt_CheckNeighbour(sem % mesh, i, this % pJump, NNew) + end do ! ! --------------------------------------------------------------- @@ -694,7 +746,10 @@ subroutine pAdaptation_pAdaptRL_SelectElemPolorders (this, e, NNew, tolerance, N ! Initialization of NNew ! ------------------------------- NNew = -1 ! Initialized to negative value - + + if (maxval(Pxyz).gt.6) then ! The RL only allow pMax .le.6 - This is to bypass the process should higher p is assign by other refinement method + NNew = Pxyz + else ! -------------------------------- ! Select the polynomial order in Direction 1 ! -------------------------------- @@ -1065,6 +1120,8 @@ subroutine pAdaptation_pAdaptRL_SelectElemPolorders (this, e, NNew, tolerance, N e % storage % sensor = max(e % storage % sensor, sqrt(error_sensor / ((Pxyz(1)+1) * (Pxyz(2)+1)))) end if end if + + end if end subroutine pAdaptation_pAdaptRL_SelectElemPolorders diff --git a/Solver/test/Multiphase/ActuatorLineInterpolation/SETUP/ProblemFile.f90 b/Solver/test/Multiphase/ActuatorLineInterpolation/SETUP/ProblemFile.f90 index 5aaf0d7ef1..c448ef0b2f 100644 --- a/Solver/test/Multiphase/ActuatorLineInterpolation/SETUP/ProblemFile.f90 +++ b/Solver/test/Multiphase/ActuatorLineInterpolation/SETUP/ProblemFile.f90 @@ -594,12 +594,12 @@ SUBROUTINE UserDefinedFinalize(mesh, time, iter, maxResidual & real(kind=RP), parameter :: residuals(5) = [ 0.0000000000000000E+00_RP,& 6.6848885010291738E+01_RP,& - 0.2700344444108091E+00_RP,& - 0.2914829241166736E+00_RP,& + 0.2700343177172555E+00_RP,& + 0.2914828786903740E+00_RP,& 2.2442342918587788E+03_RP] real(kind=RP), parameter :: source(5) = [ 0.0000000000000000E+00_RP,& - -5.9825397593181728E-03_RP,& + -5.98253977235252E-03_RP,& 0.0000192653654527E+00_RP,& -1.9887137428830E-05_RP,& 0.0000000000000000E+00_RP] diff --git a/Solver/test/Multiphase/RisingBubbleVreman/SETUP/ProblemFile.f90 b/Solver/test/Multiphase/RisingBubbleVreman/SETUP/ProblemFile.f90 index 9ffadfa9a6..4be5851e46 100644 --- a/Solver/test/Multiphase/RisingBubbleVreman/SETUP/ProblemFile.f90 +++ b/Solver/test/Multiphase/RisingBubbleVreman/SETUP/ProblemFile.f90 @@ -487,13 +487,13 @@ SUBROUTINE UserDefinedFinalize(mesh, time, iter, maxResidual & LOGICAL :: success real(kind=RP), parameter :: area_saved = 2.0280805425949214E-01_RP real(kind=RP), parameter :: xcog_saved = 1.014044175764861E-01_RP - real(kind=RP), parameter :: risevel_saved = 3.840682144321292E-04_RP - real(kind=RP), parameter :: residuals_saved(5) = [7.2798703496061990E-01_RP, & - 4.1415341863462345E+00_RP, & + real(kind=RP), parameter :: risevel_saved = 3.840682015588E-04_RP + real(kind=RP), parameter :: residuals_saved(5) = [7.2798707116375E-01_RP, & + 4.14153741113145E+00_RP, & 9.7885082257257918E-14_RP, & - 3.2583666451827610E+00_RP, & - 1.5785032494113227E+02_RP] - real(kind=RP), parameter :: entropyRate_saved = -6.542225158039039E-03_RP + 3.25836947641518E+00_RP, & + 1.5785047340470567E+02_RP] + real(kind=RP), parameter :: entropyRate_saved = -6.54222548413974E-03_RP CALL initializeSharedAssertionsManager sharedManager => sharedAssertionsManager()