diff --git a/bindings/c/tests/laplace.c b/bindings/c/tests/laplace.c index e2759f9a..30d4afc6 100755 --- a/bindings/c/tests/laplace.c +++ b/bindings/c/tests/laplace.c @@ -90,6 +90,7 @@ int main() { cmfe_BasisType Basis = (cmfe_BasisType)NULL; cmfe_BoundaryConditionsType BoundaryConditions=(cmfe_BoundaryConditionsType)NULL; + cmfe_ComputationEnvironmentType ComputationEnvironment = (cmfe_ComputationEnvironmentType)NULL; cmfe_CoordinateSystemType CoordinateSystem=(cmfe_CoordinateSystemType)NULL,WorldCoordinateSystem=(cmfe_CoordinateSystemType)NULL; cmfe_DecompositionType Decomposition=(cmfe_DecompositionType)NULL; cmfe_EquationsType Equations=(cmfe_EquationsType)NULL; @@ -103,7 +104,7 @@ int main() cmfe_SolverType Solver=(cmfe_SolverType)NULL; cmfe_SolverEquationsType SolverEquations=(cmfe_SolverEquationsType)NULL; - int NumberOfComputationalNodes,ComputationalNodeNumber; + int NumberOfComputationNodes,ComputationNodeNumber; int EquationsSetIndex; int FirstNodeNumber,LastNodeNumber; int FirstNodeDomain,LastNodeDomain; @@ -127,8 +128,9 @@ int main() CHECK_ERROR("Initialising OpenCMISS-Iron"); Err = cmfe_ErrorHandlingModeSet(CMFE_ERRORS_TRAP_ERROR); - Err = cmfe_ComputationalNumberOfNodesGet(&NumberOfComputationalNodes); - Err = cmfe_ComputationalNodeNumberGet(&ComputationalNodeNumber); + Err = cmfe_ComputationEnvironment_Initialise(&ComputationEnvironment); + Err = cmfe_ComputationEnvironment_NumberOfWorldNodesGet(ComputationEnvironment,&NumberOfComputationNodes); + Err = cmfe_ComputationEnvironment_WorldNodeNumberGet(ComputationEnvironment,&ComputationNodeNumber); /* Start the creation of a new RC coordinate system */ Err = cmfe_CoordinateSystem_Initialise(&CoordinateSystem); @@ -200,7 +202,7 @@ int main() Err = cmfe_Decomposition_CreateStart(DECOMPOSITION_USER_NUMBER,Mesh,Decomposition); /* Set the decomposition to be a general decomposition with the specified number of domains */ Err = cmfe_Decomposition_TypeSet(Decomposition,CMFE_DECOMPOSITION_CALCULATED_TYPE); - Err = cmfe_Decomposition_NumberOfDomainsSet(Decomposition,NumberOfComputationalNodes); + Err = cmfe_Decomposition_NumberOfDomainsSet(Decomposition,NumberOfComputationNodes); /* Finish the decomposition */ Err = cmfe_Decomposition_CreateFinish(Decomposition); @@ -313,12 +315,12 @@ int main() } Err = cmfe_Decomposition_NodeDomainGet(Decomposition,FirstNodeNumber,1,&FirstNodeDomain); Err = cmfe_Decomposition_NodeDomainGet(Decomposition,LastNodeNumber,1,&LastNodeDomain); - if(FirstNodeDomain==ComputationalNodeNumber) + if(FirstNodeDomain==ComputationNodeNumber) { Err = cmfe_BoundaryConditions_SetNode(BoundaryConditions,DependentField,CMFE_FIELD_U_VARIABLE_TYPE,1,1,FirstNodeNumber,1, \ CMFE_BOUNDARY_CONDITION_FIXED,0.0); } - if(LastNodeDomain==ComputationalNodeNumber) + if(LastNodeDomain==ComputationNodeNumber) { Err = cmfe_BoundaryConditions_SetNode(BoundaryConditions,DependentField,CMFE_FIELD_U_VARIABLE_TYPE,1,1,LastNodeNumber,1, \ CMFE_BOUNDARY_CONDITION_FIXED,1.0); diff --git a/cmake/Sources.cmake b/cmake/Sources.cmake index 48a9140d..be248f44 100644 --- a/cmake/Sources.cmake +++ b/cmake/Sources.cmake @@ -36,7 +36,8 @@ set(IRON_Fortran_SRC cmiss_petsc_types.f90 cmiss_petsc.f90 cmiss.f90 - computational_environment.f90 + computation_routines.f90 + computation_access_routines.f90 constants.f90 control_loop_routines.f90 control_loop_access_routines.f90 diff --git a/src/Darcy_equations_routines.f90 b/src/Darcy_equations_routines.f90 index ac5c4248..73109680 100755 --- a/src/Darcy_equations_routines.f90 +++ b/src/Darcy_equations_routines.f90 @@ -7,7 +7,8 @@ MODULE DARCY_EQUATIONS_ROUTINES USE Constants USE CONTROL_LOOP_ROUTINES USE ControlLoopAccessRoutines - USE ComputationEnvironment + USE ComputationRoutines + USE ComputationAccessRoutines USE COORDINATE_ROUTINES USE DistributedMatrixVector USE DOMAIN_MAPPINGS @@ -7213,7 +7214,7 @@ SUBROUTINE DARCY_EQUATION_MONITOR_CONVERGENCE(CONTROL_LOOP,SOLVER,err,error,*) INTEGER(INTG) :: FIELD_VAR_TYPE INTEGER(INTG) :: dof_number,NUMBER_OF_DOFS,equations_set_idx - INTEGER(INTG) :: COMPUTATIONAL_NODE_NUMBER + INTEGER(INTG) :: COMPUTATION_NODE_NUMBER INTEGER(INTG) :: FILEUNIT_N, FILEUNIT_N1 ENTERS("DARCY_EQUATION_MONITOR_CONVERGENCE",err,error,*999) @@ -7226,8 +7227,8 @@ SUBROUTINE DARCY_EQUATION_MONITOR_CONVERGENCE(CONTROL_LOOP,SOLVER,err,error,*) NULLIFY(vectorMapping) NULLIFY(FIELD_VARIABLE) - COMPUTATIONAL_NODE_NUMBER=ComputationalEnvironment_NodeNumberGet(err,error) - WRITE(FILENAME,'("Darcy_",I3.3,".conv")') COMPUTATIONAL_NODE_NUMBER + CALL ComputationEnvironment_WorldNodeNumberGet(computationEnvironment,COMPUTATION_NODE_NUMBER,err,error,*999) + WRITE(FILENAME,'("Darcy_",I3.3,".conv")') COMPUTATION_NODE_NUMBER FILEPATH = "./output/"//FILENAME OPEN(UNIT=23, FILE=CHAR(FILEPATH),STATUS='unknown',ACCESS='append') diff --git a/src/Navier_Stokes_equations_routines.f90 b/src/Navier_Stokes_equations_routines.f90 index 6c95e960..1f5444bf 100644 --- a/src/Navier_Stokes_equations_routines.f90 +++ b/src/Navier_Stokes_equations_routines.f90 @@ -1,3 +1,46 @@ +!> \file +!> \author Sebastian Krittian +!> \brief This module handles all Navier-Stokes fluid routines. +!> +!> \section LICENSE +!> +!> Version: MPL 1.1/GPL 2.0/LGPL 2.1 +!> +!> The contents of this file are subject to the Mozilla Public License +!> Version 1.1 (the "License"); you may not use this file except in +!> compliance with the License. You may obtain a copy of the License at +!> http://www.mozilla.org/MPL/ +!> +!> Software distributed under the License is distributed on an "AS IS" +!> basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +!> License for the specific language governing rights and limitations +!> under the License. +!> +!> The Original Code is OpenCMISS +!> +!> The Initial Developer of the Original Code is University of Auckland, +!> Auckland, New Zealand, the University of Oxford, Oxford, United +!> Kingdom and King's College, London, United Kingdom. Portions created +!> by the University of Auckland, the University of Oxford and King's +!> College, London are Copyright (C) 2007-2010 by the University of +!> Auckland, the University of Oxford and King's College, London. +!> All Rights Reserved. +!> +!> Contributor(s): Sebastian Krittian, David Ladd, Soroush Safaei, Chris Bradley +!> +!> Alternatively, the contents of this file may be used under the terms of +!> either the GNU General Public License Version 2 or later (the "GPL"), or +!> the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), +!> in which case the provisions of the GPL or the LGPL are applicable instead +!> of those above. If you wish to allow use of your version of this file only +!> under the terms of either the GPL or the LGPL, and not to allow others to +!> use your version of this file under the terms of the MPL, indicate your +!> decision by deleting the provisions above and replace them with the notice +!> and other provisions required by the GPL or the LGPL. If you do not delete +!> the provisions above, a recipient may use your version of this file under +!> the terms of any one of the MPL, the GPL or the LGPL. +!> + !>This module handles all Navier-Stokes fluid routines. MODULE NAVIER_STOKES_EQUATIONS_ROUTINES @@ -7,10 +50,11 @@ MODULE NAVIER_STOKES_EQUATIONS_ROUTINES USE BasisRoutines USE BOUNDARY_CONDITIONS_ROUTINES USE CHARACTERISTIC_EQUATION_ROUTINES - USE CmissMPI + USE CmissMPI USE CmissPetsc USE CmissPetscTypes - USE ComputationEnvironment + USE ComputationRoutines + USE ComputationAccessRoutines USE Constants USE CONTROL_LOOP_ROUTINES USE COORDINATE_ROUTINES @@ -6961,7 +7005,7 @@ SUBROUTINE NavierStokes_PreSolveUpdateBoundaryConditions(SOLVER,err,error,*) INTEGER(INTG) :: componentNumberVelocity,numberOfDimensions,numberOfNodes,numberOfGlobalNodes INTEGER(INTG) :: dependentVariableType,independentVariableType,dependentDof,independentDof,userNodeNumber,localNodeNumber INTEGER(INTG) :: EquationsSetIndex,SolidNodeNumber,FluidNodeNumber,equationsSetIdx - INTEGER(INTG) :: currentTimeLoopIteration,outputIterationNumber,numberOfFittedNodes,computationalNode + INTEGER(INTG) :: currentTimeLoopIteration,outputIterationNumber,numberOfFittedNodes,computationNode INTEGER(INTG), ALLOCATABLE :: InletNodes(:) REAL(DP) :: CURRENT_TIME,TIME_INCREMENT,DISPLACEMENT_VALUE,VALUE,XI_COORDINATES(3),timeData,QP,QPP,componentValues(3) REAL(DP) :: T_COORDINATES(20,3),MU_PARAM,RHO_PARAM,X(3),FluidGFValue,SolidDFValue,NewLaplaceBoundaryValue,Lref,Tref,Mref @@ -7054,7 +7098,7 @@ SUBROUTINE NavierStokes_PreSolveUpdateBoundaryConditions(SOLVER,err,error,*) IF(ASSOCIATED(independentField)) THEN componentNumberVelocity = 1 numberOfDimensions = dependentFieldVariable%NUMBER_OF_COMPONENTS - 1 - ! Get the nodes on this computational domain + ! Get the nodes on this computation domain IF(independentFieldVariable%COMPONENTS(componentNumberVelocity)%INTERPOLATION_TYPE== & & FIELD_NODE_BASED_INTERPOLATION) THEN domain=>independentFieldVariable%COMPONENTS(componentNumberVelocity)%DOMAIN @@ -7077,7 +7121,7 @@ SUBROUTINE NavierStokes_PreSolveUpdateBoundaryConditions(SOLVER,err,error,*) CALL FlagError("Only node based interpolation is implemented.",err,error,*999) END IF - ! Construct the filename based on the computational node and time step + ! Construct the filename based on the computation node and time step inputFile = './../interpolatedData/fitData' //TRIM(NUMBER_TO_VSTRING(currentTimeLoopIteration, & & "*",ERR,ERROR)) // '.dat' @@ -7096,7 +7140,7 @@ SUBROUTINE NavierStokes_PreSolveUpdateBoundaryConditions(SOLVER,err,error,*) CALL DOMAIN_TOPOLOGY_NODE_CHECK_EXISTS(domain%Topology,userNodeNumber,nodeExists, & & localNodeNumber,ghostNode,err,error,*999) IF(nodeExists .AND. .NOT. ghostNode) THEN - ! Node found on this computational node + ! Node found on this computation node READ(10,*) (componentValues(componentIdx), componentIdx=1,numberOfDimensions) DO componentIdx=1,numberOfDimensions dependentDof = dependentFieldVariable%COMPONENTS(componentIdx)%PARAM_TO_DOF_MAP% & @@ -7115,7 +7159,7 @@ SUBROUTINE NavierStokes_PreSolveUpdateBoundaryConditions(SOLVER,err,error,*) END IF END DO !componentIdx ELSE - ! Dummy read if this node not on this computational node + ! Dummy read if this node not on this computation node READ(10,*) END IF END DO @@ -7695,7 +7739,7 @@ SUBROUTINE NavierStokes_PreSolveUpdateBoundaryConditions(SOLVER,err,error,*) IF(ASSOCIATED(independentField)) THEN componentNumberVelocity = 1 numberOfDimensions = dependentFieldVariable%NUMBER_OF_COMPONENTS - 1 - ! Get the nodes on this computational domain + ! Get the nodes on this computation domain IF(independentFieldVariable%COMPONENTS(componentNumberVelocity)%INTERPOLATION_TYPE== & & FIELD_NODE_BASED_INTERPOLATION) THEN domain=>independentFieldVariable%COMPONENTS(componentNumberVelocity)%DOMAIN @@ -7718,15 +7762,16 @@ SUBROUTINE NavierStokes_PreSolveUpdateBoundaryConditions(SOLVER,err,error,*) CALL FlagError("Only node based interpolation is implemented.",ERR,ERROR,*999) END IF - ! Construct the filename based on the computational node and time step + ! Construct the filename based on the computation node and time step inputFile = './../interpolatedData/fitData' //TRIM(NUMBER_TO_VSTRING(currentTimeLoopIteration, & & "*",ERR,ERROR)) // '.dat' INQUIRE(FILE=inputFile, EXIST=importDataFromFile) IF(importDataFromFile) THEN !Read fitted data from input file (if exists) - computationalNode = ComputationalEnvironment_NodeNumberGet(ERR,ERROR) - IF(computationalNode==0) THEN + CALL ComputationEnvironment_WorldNodeNumberGet(computationEnvironment,computationNode, & + & err,error,*999) + IF(computationNode==0) THEN CALL WRITE_STRING(GENERAL_OUTPUT_TYPE,"Updating independent field and boundary nodes from " & & //inputFile,ERR,ERROR,*999) END IF @@ -7740,7 +7785,7 @@ SUBROUTINE NavierStokes_PreSolveUpdateBoundaryConditions(SOLVER,err,error,*) CALL DOMAIN_TOPOLOGY_NODE_CHECK_EXISTS(domain%Topology,userNodeNumber,nodeExists, & & localNodeNumber,ghostNode,err,error,*999) IF(nodeExists .AND. .NOT. ghostNode) THEN - ! Node found on this computational node + ! Node found on this computation node READ(10,*) (componentValues(componentIdx), componentIdx=1,numberOfDimensions) DO componentIdx=1,numberOfDimensions dependentDof = dependentFieldVariable%COMPONENTS(componentIdx)%PARAM_TO_DOF_MAP% & @@ -7759,7 +7804,7 @@ SUBROUTINE NavierStokes_PreSolveUpdateBoundaryConditions(SOLVER,err,error,*) END IF END DO !componentIdx ELSE - ! Dummy read if this node not on this computational node + ! Dummy read if this node not on this computation node READ(10,*) END IF END DO @@ -8717,7 +8762,7 @@ SUBROUTINE NavierStokes_PreSolveAleUpdateMesh(solver,err,error,*) LOGICAL :: fluidEquationsSetFound=.FALSE. LOGICAL :: solidEquationsSetFound=.FALSE. - ENTERS("NavierStokes_PreSolveALEUpdateMesh",err,error,*999) + ENTERS("NavierStokes_PreSolveAleUpdateMesh",err,error,*999) NULLIFY(controlLoop) NULLIFY(dynamicSolver) @@ -12466,8 +12511,8 @@ SUBROUTINE NavierStokes_CalculateBoundaryFlux(equationsSet,coupledEquationsSet,i INTEGER(INTG) :: faceNodeDerivativeIdx, meshComponentNumber INTEGER(INTG) :: normalComponentIdx INTEGER(INTG) :: boundaryID,numberOfBoundaries,boundaryType,coupledNodeNumber,numberOfGlobalBoundaries - INTEGER(INTG) :: MPI_IERROR,numberOfComputationalNodes - INTEGER(INTG) :: i,j,computationalNode + INTEGER(INTG) :: MPI_IERROR,numberOfWorldComputationNodes + INTEGER(INTG) :: i,j,computationNode,worldCommunicator REAL(DP) :: gaussWeight, normalProjection,elementNormal(3) REAL(DP) :: normalDifference,normalTolerance REAL(DP) :: courant,maxCourant,toleranceCourant @@ -12781,28 +12826,27 @@ SUBROUTINE NavierStokes_CalculateBoundaryFlux(equationsSet,coupledEquationsSet,i ! G a t h e r v a l u e s o v e r t h r e a d s ! ------------------------------------------------------ - ! Need to add boundary flux for any boundaries split accross computational nodes + ! Need to add boundary flux for any boundaries split accross computation nodes numberOfGlobalBoundaries = 0 globalBoundaryFlux = 0.0_DP globalBoundaryArea = 0.0_DP globalBoundaryPressure = 0.0_DP globalBoundaryNormalStress = 0.0_DP - numberOfComputationalNodes=computationalEnvironment%numberOfComputationalNodes - IF(numberOfComputationalNodes>1) THEN !use mpi - CALL MPI_ALLREDUCE(localBoundaryFlux,globalBoundaryFlux,10,MPI_DOUBLE_PRECISION,MPI_SUM, & - & computationalEnvironment%mpiCommunicator,MPI_IERROR) + CALL ComputationEnvironment_NumberOfWorldNodesGet(computationEnvironment,numberOfWorldComputationNodes,err,error,*999) + CALL ComputationEnvironment_WorldCommunicatorGet(computationEnvironment,worldCommunicator,err,error,*999) + IF(numberOfWorldComputationNodes>1) THEN !use mpi + CALL MPI_ALLREDUCE(localBoundaryFlux,globalBoundaryFlux,10,MPI_DOUBLE_PRECISION,MPI_SUM,worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_ALLREDUCE",MPI_IERROR,err,error,*999) - CALL MPI_ALLREDUCE(localBoundaryArea,globalBoundaryArea,10,MPI_DOUBLE_PRECISION,MPI_SUM, & - & computationalEnvironment%mpiCommunicator,MPI_IERROR) + CALL MPI_ALLREDUCE(localBoundaryArea,globalBoundaryArea,10,MPI_DOUBLE_PRECISION,MPI_SUM,worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_ALLREDUCE",MPI_IERROR,ERR,ERROR,*999) CALL MPI_ALLREDUCE(localBoundaryNormalStress,globalBoundaryNormalStress,10,MPI_DOUBLE_PRECISION,MPI_SUM, & - & computationalEnvironment%mpiCommunicator,MPI_IERROR) + & worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_ALLREDUCE",MPI_IERROR,ERR,ERROR,*999) CALL MPI_ALLREDUCE(localBoundaryPressure,globalBoundaryPressure,10,MPI_DOUBLE_PRECISION,MPI_SUM, & - & computationalEnvironment%mpiCommunicator,MPI_IERROR) + & worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_ALLREDUCE",MPI_IERROR,ERR,ERROR,*999) CALL MPI_ALLREDUCE(numberOfBoundaries,numberOfGlobalBoundaries,1,MPI_INTEGER,MPI_MAX, & - & computationalEnvironment%mpiCommunicator,MPI_IERROR) + & worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_ALLREDUCE",MPI_IERROR,ERR,ERROR,*999) ELSE numberOfGlobalBoundaries = numberOfBoundaries @@ -12820,8 +12864,8 @@ SUBROUTINE NavierStokes_CalculateBoundaryFlux(equationsSet,coupledEquationsSet,i END DO DO boundaryID=2,numberOfGlobalBoundaries IF(globalBoundaryArea(boundaryID) > ZERO_TOLERANCE) THEN - computationalNode = ComputationalEnvironment_NodeNumberGet(ERR,ERROR) - IF(computationalNode==0) THEN + CALL ComputationEnvironment_WorldNodeNumberGet(computationEnvironment,computationNode,err,error,*999) + IF(computationNode==0) THEN CALL WriteStringTwoValue(DIAGNOSTIC_OUTPUT_TYPE,"3D boundary ",boundaryID," flow: ", & & globalBoundaryFlux(boundaryID),err,error,*999) CALL WriteStringTwoValue(DIAGNOSTIC_OUTPUT_TYPE,"3D boundary ",boundaryID," area: ", & @@ -13085,11 +13129,10 @@ SUBROUTINE NavierStokes_CalculateBoundaryFlux(equationsSet,coupledEquationsSet,i END DO !elementIdx !allocate array for mpi communication - IF(numberOfComputationalNodes>1) THEN !use mpi - ALLOCATE(globalConverged(numberOfComputationalNodes),STAT=ERR) + IF(numberOfWorldComputationNodes>1) THEN !use mpi + ALLOCATE(globalConverged(numberOfWorldComputationNodes),STAT=ERR) IF(ERR/=0) CALL FlagError("Could not allocate global convergence check array.",ERR,ERROR,*999) - CALL MPI_ALLGATHER(convergedFlag,1,MPI_LOGICAL,globalConverged,1,MPI_LOGICAL, & - & computationalEnvironment%mpiCommunicator,MPI_IERROR) + CALL MPI_ALLGATHER(convergedFlag,1,MPI_LOGICAL,globalConverged,1,MPI_LOGICAL,worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_ALLGATHER",MPI_IERROR,ERR,ERROR,*999) IF(ALL(globalConverged)) THEN convergedFlag = .TRUE. @@ -13159,7 +13202,7 @@ SUBROUTINE NavierStokes_Couple1D0D(controlLoop,solver,err,error,*) TYPE(VARYING_STRING) :: localError INTEGER(INTG) :: nodeNumber,nodeIdx,derivativeIdx,versionIdx,componentIdx,numberOfLocalNodes1D INTEGER(INTG) :: solver1dNavierStokesNumber,solverNumber,MPI_IERROR,timestep,iteration - INTEGER(INTG) :: boundaryNumber,numberOfBoundaries,numberOfComputationalNodes + INTEGER(INTG) :: boundaryNumber,numberOfBoundaries,numberOfWorldComputationNodes,worldCommunicator INTEGER(INTG) :: dependentDof,boundaryConditionType REAL(DP) :: A0_PARAM,E_PARAM,H_PARAM,beta,pCellML,normalWave(2) REAL(DP) :: qPrevious,pPrevious,aPrevious,q1d,a1d,qError,aError,couplingTolerance @@ -13381,13 +13424,13 @@ SUBROUTINE NavierStokes_Couple1D0D(controlLoop,solver,err,error,*) localConverged = .FALSE. END IF ! Need to check that boundaries have converged globally (on all domains) if this is a parallel problem - numberOfComputationalNodes=computationalEnvironment%numberOfComputationalNodes - IF(numberOfComputationalNodes>1) THEN !use mpi + CALL ComputationEnvironment_NumberOfWorldNodesGet(computationEnvironment,numberOfWorldComputationNodes,err,error,*999) + CALL ComputationEnvironment_WorldCommunicatorGet(computationEnvironment,worldCommunicator,err,error,*999) + IF(numberOfWorldComputationNodes>1) THEN !use mpi !allocate array for mpi communication - ALLOCATE(globalConverged(numberOfComputationalNodes),STAT=ERR) + ALLOCATE(globalConverged(numberOfWorldComputationNodes),STAT=ERR) IF(ERR/=0) CALL FlagError("Could not allocate global convergence check array.",ERR,ERROR,*999) - CALL MPI_ALLGATHER(localConverged,1,MPI_LOGICAL,globalConverged,1,MPI_LOGICAL, & - & computationalEnvironment%mpiCommunicator,MPI_IERROR) + CALL MPI_ALLGATHER(localConverged,1,MPI_LOGICAL,globalConverged,1,MPI_LOGICAL,worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_ALLGATHER",MPI_IERROR,err,error,*999) IF(ALL(globalConverged)) THEN !CALL WRITE_STRING_VALUE(DIAGNOSTIC_OUTPUT_TYPE,"1D/0D coupling converged; # iterations: ", & @@ -13450,8 +13493,8 @@ SUBROUTINE NavierStokes_Couple3D1D(controlLoop,err,error,*) TYPE(VARYING_STRING) :: localError INTEGER(INTG) :: nodeNumber,nodeIdx,derivativeIdx,versionIdx,componentIdx,numberOfLocalNodes1D INTEGER(INTG) :: solver1dNavierStokesNumber,MPI_IERROR,timestep,iteration - INTEGER(INTG) :: boundaryNumber,boundaryType1D,numberOfBoundaries,numberOfComputationalNodes - INTEGER(INTG) :: solver3dNavierStokesNumber,userNodeNumber,localDof,globalDof,computationalNode + INTEGER(INTG) :: boundaryNumber,boundaryType1D,numberOfBoundaries,numberOfWorldComputationNodes + INTEGER(INTG) :: solver3dNavierStokesNumber,userNodeNumber,localDof,globalDof,computationNode,worldCommunicator REAL(DP) :: normalWave(2) REAL(DP) :: flow1D,stress1D,flow1DPrevious,stress1DPrevious,flow3D,stress3D,flowError,stressError REAL(DP) :: maxStressError,maxFlowError,flowTolerance,stressTolerance,absoluteCouplingTolerance @@ -13670,22 +13713,23 @@ SUBROUTINE NavierStokes_Couple3D1D(controlLoop,err,error,*) localConverged = .TRUE. END IF ! Need to check that boundaries have converged globally (on all domains) if this is a MPI problem - numberOfComputationalNodes=computationalEnvironment%numberOfComputationalNodes - IF(numberOfComputationalNodes>1) THEN !use mpi + CALL ComputationEnvironment_NumberOfWorldNodesGet(computationEnvironment,numberOfWorldComputationNodes,err,error,*999) + CALL ComputationEnvironment_WorldCommunicatorGet(computationEnvironment,worldCommunicator,err,error,*999) + IF(numberOfWorldComputationNodes>1) THEN !use mpi !allocate array for mpi communication IF(ERR/=0) CALL FlagError("Could not allocate global convergence check array.",ERR,ERROR,*999) - CALL MPI_ALLREDUCE(localConverged,globalConverged,1,MPI_LOGICAL,MPI_LAND,computationalEnvironment%mpiCommunicator,MPI_IERROR) + CALL MPI_ALLREDUCE(localConverged,globalConverged,1,MPI_LOGICAL,MPI_LAND,worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_ALLREDUCE",MPI_IERROR,ERR,ERROR,*999) IF(globalConverged) THEN CALL WRITE_STRING_VALUE(DIAGNOSTIC_OUTPUT_TYPE,"3D/1D coupling converged; # iterations: ", & & iteration,err,error,*999) iterativeLoop%CONTINUE_LOOP=.FALSE. ELSE - computationalNode = ComputationalEnvironment_NodeNumberGet(ERR,ERROR) - CALL WriteStringTwoValue(DIAGNOSTIC_OUTPUT_TYPE,"Rank ",computationalNode," 3D/1D max flow error: ", & + CALL ComputationEnvironment_WorldNodeNumberGet(computationEnvironment,computationNode,err,error,*999) + CALL WriteStringTwoValue(DIAGNOSTIC_OUTPUT_TYPE,"Rank ",computationNode," 3D/1D max flow error: ", & & maxFlowError,err,error,*999) - CALL WriteStringTwoValue(DIAGNOSTIC_OUTPUT_TYPE,"Rank ",computationalNode," 3D/1D max stress error: ", & + CALL WriteStringTwoValue(DIAGNOSTIC_OUTPUT_TYPE,"Rank ",computationNode," 3D/1D max stress error: ", & & maxStressError,err,error,*999) END IF ELSE @@ -13750,8 +13794,8 @@ SUBROUTINE NavierStokes_CoupleCharacteristics(controlLoop,solver,err,error,*) TYPE(VARYING_STRING) :: localError INTEGER(INTG) :: nodeNumber,nodeIdx,derivativeIdx,versionIdx,componentIdx,i INTEGER(INTG) :: solver1dNavierStokesNumber,solverNumber - INTEGER(INTG) :: branchNumber,numberOfBranches,numberOfComputationalNodes,numberOfVersions - INTEGER(INTG) :: MPI_IERROR,timestep,iteration,outputIteration + INTEGER(INTG) :: branchNumber,numberOfBranches,numberOfWorldComputationNodes,numberOfVersions + INTEGER(INTG) :: MPI_IERROR,timestep,iteration,outputIteration,worldCommunicator REAL(DP) :: couplingTolerance,l2ErrorW(30),wPrevious(2,7),wNavierStokes(2,7),wCharacteristic(2,7),wError(2,7) REAL(DP) :: l2ErrorQ(100),qCharacteristic(7),qNavierStokes(7),wNext(2,7) REAL(DP) :: totalErrorWPrevious,startTime,stopTime,currentTime,timeIncrement @@ -13963,13 +14007,13 @@ SUBROUTINE NavierStokes_CoupleCharacteristics(controlLoop,solver,err,error,*) localConverged = .FALSE. END IF ! Need to check that boundaries have converged globally (on all domains) if this is a parallel problem - numberOfComputationalNodes=computationalEnvironment%numberOfComputationalNodes - IF(numberOfComputationalNodes>1) THEN !use mpi + CALL ComputationEnvironment_NumberOfWorldNodesGet(computationEnvironment,numberOfWorldComputationNodes,err,error,*999) + CALL ComputationEnvironment_WorldCommunicatorGet(computationEnvironment,worldCommunicator,err,error,*999) + IF(numberOfWorldComputationNodes>1) THEN !use mpi !allocate array for mpi communication - ALLOCATE(globalConverged(numberOfComputationalNodes),STAT=ERR) + ALLOCATE(globalConverged(numberOfWorldComputationNodes),STAT=ERR) IF(ERR/=0) CALL FlagError("Could not allocate global convergence check array.",ERR,ERROR,*999) - CALL MPI_ALLGATHER(localConverged,1,MPI_LOGICAL,globalConverged,1,MPI_LOGICAL, & - & computationalEnvironment%mpiCommunicator,MPI_IERROR) + CALL MPI_ALLGATHER(localConverged,1,MPI_LOGICAL,globalConverged,1,MPI_LOGICAL,worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_ALLGATHER",MPI_IERROR,err,error,*999) IF(ALL(globalConverged)) THEN !CALL WRITE_STRING_VALUE(DIAGNOSTIC_OUTPUT_TYPE,"Navier-Stokes/Characteristic converged; # iterations: ", & @@ -14077,7 +14121,7 @@ SUBROUTINE NavierStokes_ShearRateCalculate(equationsSet,err,error,*) DO elementIdx=startElement,stopElement localElementNumber=elementsMapping%DOMAIN_LIST(elementIdx) userElementNumber = elementsMapping%LOCAL_TO_GLOBAL_MAP(localElementNumber) - !Check computational node for elementIdx + !Check computation node for elementIdx elementExists=.FALSE. ghostElement=.TRUE. CALL DECOMPOSITION_TOPOLOGY_ELEMENT_CHECK_EXISTS(decomposition%TOPOLOGY, & @@ -14927,8 +14971,8 @@ SUBROUTINE NavierStokes_CalculateBoundaryFlux3D0D(equationsSet,err,error,*) INTEGER(INTG) :: faceNodeIdx, elementNodeIdx INTEGER(INTG) :: faceNodeDerivativeIdx, meshComponentNumber INTEGER(INTG) :: boundaryID,numberOfBoundaries,boundaryType,coupledNodeNumber,numberOfGlobalBoundaries - INTEGER(INTG) :: MPI_IERROR,numberOfComputationalNodes - INTEGER(INTG) :: computationalNode,xiDirection(3),orientation + INTEGER(INTG) :: MPI_IERROR,numberOfWorldComputationNodes + INTEGER(INTG) :: computationNode,xiDirection(3),orientation,worldCommunicator REAL(DP) :: gaussWeight, elementNormal(3) REAL(DP) :: normalDifference,normalTolerance REAL(DP) :: courant,maxCourant,toleranceCourant,boundaryValueTemp @@ -15155,24 +15199,25 @@ SUBROUTINE NavierStokes_CalculateBoundaryFlux3D0D(equationsSet,err,error,*) ! G a t h e r v a l u e s o v e r t h r e a d s ! ------------------------------------------------------ - ! Need to add boundary flux for any boundaries split accross computational nodes + ! Need to add boundary flux for any boundaries split accross computation nodes numberOfGlobalBoundaries = 0 globalBoundaryFlux = 0.0_DP globalBoundaryArea = 0.0_DP globalBoundaryPressure = 0.0_DP - numberOfComputationalNodes=computationalEnvironment%numberOfComputationalNodes - IF(numberOfComputationalNodes>1) THEN !use mpi + CALL ComputationEnvironment_NumberOfWorldNodesGet(computationEnvironment,numberOfWorldComputationNodes,err,error,*999) + CALL ComputationEnvironment_WorldCommunicatorGet(computationEnvironment,worldCommunicator,err,error,*999) + IF(numberOfWorldComputationNodes>1) THEN !use mpi CALL MPI_ALLREDUCE(localBoundaryFlux,globalBoundaryFlux,10,MPI_DOUBLE_PRECISION,MPI_SUM, & - & computationalEnvironment%mpiCommunicator,MPI_IERROR) + & worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_ALLREDUCE",MPI_IERROR,ERR,ERROR,*999) CALL MPI_ALLREDUCE(localBoundaryArea,globalBoundaryArea,10,MPI_DOUBLE_PRECISION,MPI_SUM, & - & computationalEnvironment%mpiCommunicator,MPI_IERROR) + & worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_ALLREDUCE",MPI_IERROR,ERR,ERROR,*999) CALL MPI_ALLREDUCE(localBoundaryPressure,globalBoundaryPressure,10,MPI_DOUBLE_PRECISION,MPI_SUM, & - & computationalEnvironment%mpiCommunicator,MPI_IERROR) + & worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_ALLREDUCE",MPI_IERROR,ERR,ERROR,*999) CALL MPI_ALLREDUCE(numberOfBoundaries,numberOfGlobalBoundaries,1,MPI_INTEGER,MPI_MAX, & - & computationalEnvironment%mpiCommunicator,MPI_IERROR) + & worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_ALLREDUCE",MPI_IERROR,ERR,ERROR,*999) ELSE numberOfGlobalBoundaries = numberOfBoundaries @@ -15188,8 +15233,8 @@ SUBROUTINE NavierStokes_CalculateBoundaryFlux3D0D(equationsSet,err,error,*) END DO DO boundaryID=2,numberOfGlobalBoundaries IF(globalBoundaryArea(boundaryID) > ZERO_TOLERANCE) THEN - computationalNode = ComputationalEnvironment_NodeNumberGet(ERR,ERROR) - IF(computationalNode==0) THEN + CALL ComputationEnvironment_WorldNodeNumberGet(computationEnvironment,computationNode,err,error,*999) + IF(computationNode==0) THEN CALL WriteStringTwoValue(DIAGNOSTIC_OUTPUT_TYPE,"3D boundary ",boundaryID," flow: ", & & globalBoundaryFlux(boundaryID),err,error,*999) CALL WriteStringTwoValue(DIAGNOSTIC_OUTPUT_TYPE,"3D boundary ",boundaryID," mean pressure: ", & @@ -15316,11 +15361,10 @@ SUBROUTINE NavierStokes_CalculateBoundaryFlux3D0D(equationsSet,err,error,*) END DO !elementIdx !allocate array for mpi communication - IF(numberOfComputationalNodes>1) THEN !use mpi - ALLOCATE(globalConverged(numberOfComputationalNodes),STAT=ERR) + IF(numberOfWorldComputationNodes>1) THEN !use mpi + ALLOCATE(globalConverged(numberOfWorldComputationNodes),STAT=ERR) IF(ERR/=0) CALL FlagError("Could not allocate global convergence check array.",ERR,ERROR,*999) - CALL MPI_ALLGATHER(convergedFlag,1,MPI_LOGICAL,globalConverged,1,MPI_LOGICAL, & - & computationalEnvironment%mpiCommunicator,MPI_IERROR) + CALL MPI_ALLGATHER(convergedFlag,1,MPI_LOGICAL,globalConverged,1,MPI_LOGICAL,worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_ALLGATHER",MPI_IERROR,ERR,ERROR,*999) IF(ALL(globalConverged)) THEN convergedFlag = .TRUE. diff --git a/src/analytic_analysis_routines.f90 b/src/analytic_analysis_routines.f90 index c3a08ff8..50f5d4e2 100755 --- a/src/analytic_analysis_routines.f90 +++ b/src/analytic_analysis_routines.f90 @@ -47,7 +47,8 @@ MODULE ANALYTIC_ANALYSIS_ROUTINES USE BasisRoutines USE BasisAccessRoutines USE CmissMPI - USE ComputationEnvironment + USE ComputationRoutines + USE ComputationAccessRoutines USE Constants USE FIELD_ROUTINES USE FieldAccessRoutines @@ -108,7 +109,7 @@ MODULE ANALYTIC_ANALYSIS_ROUTINES CONTAINS ! - !================================================================================================================================ + !================================================================================================================================= ! !>Output the analytic error analysis for a dependent field compared to the analytic values parameter set. \see OPENCMISS::CMISSAnalyticAnalytisOutput @@ -121,7 +122,7 @@ SUBROUTINE AnalyticAnalysis_Output(FIELD,FILENAME,ERR,ERROR,*) TYPE(VARYING_STRING), INTENT(OUT) :: ERROR !=1) THEN -!!TODO \todo have more general ascii file mechanism - IF(computationalEnvironment%numberOfComputationalNodes>1) THEN - WRITE(FILE_NAME,'(A,".opanal.",I0)') FILENAME(1:LEN_TRIM(FILENAME)),computationalEnvironment% & - & myComputationalNodeNumber - ELSE - FILE_NAME=FILENAME(1:LEN_TRIM(FILENAME))//".opanal" - ENDIF - OUTPUT_ID=IO1_FILE_UNIT - OPEN(UNIT=OUTPUT_ID,FILE=FILE_NAME(1:LEN_TRIM(FILE_NAME)),STATUS="REPLACE",FORM="FORMATTED",IOSTAT=ERR) - IF(ERR/=0) CALL FlagError("Error opening analysis output file.",ERR,ERROR,*999) - ELSE - OUTPUT_ID=GENERAL_OUTPUT_TYPE - ENDIF DECOMPOSITION=>FIELD%DECOMPOSITION IF(ASSOCIATED(DECOMPOSITION)) THEN + CALL ComputationEnvironment_NumberOfWorldNodesGet(computationEnvironment,numberOfWorldComputationNodes,err,error,*999) + CALL ComputationEnvironment_WorldNodeNumberGet(computationEnvironment,myWorldComputationNodeNumber,err,error,*999) + CALL ComputationEnvironment_WorldCommunicatorGet(computationEnvironment,worldCommunicator,err,error,*999) DECOMPOSITION_TOPOLOGY=>DECOMPOSITION%TOPOLOGY IF(ASSOCIATED(DECOMPOSITION_TOPOLOGY)) THEN + IF(LEN_TRIM(FILENAME)>=1) THEN +!!TODO \todo have more general ascii file mechanism + IF(numberOfWorldComputationNodes>1) THEN + WRITE(FILE_NAME,'(A,".opanal.",I0)') FILENAME(1:LEN_TRIM(FILENAME)),myWorldComputationNodeNumber + ELSE + FILE_NAME=FILENAME(1:LEN_TRIM(FILENAME))//".opanal" + ENDIF + OUTPUT_ID=IO1_FILE_UNIT + OPEN(UNIT=OUTPUT_ID,FILE=FILE_NAME(1:LEN_TRIM(FILE_NAME)),STATUS="REPLACE",FORM="FORMATTED",IOSTAT=ERR) + IF(ERR/=0) CALL FlagError("Error opening analysis output file.",ERR,ERROR,*999) + ELSE + OUTPUT_ID=GENERAL_OUTPUT_TYPE + ENDIF CALL WRITE_STRING(OUTPUT_ID,"Analytic error analysis:",ERR,ERROR,*999) CALL WRITE_STRING(OUTPUT_ID,"",ERR,ERROR,*999) LOCAL_STRING="Field "//TRIM(NUMBER_TO_VSTRING(FIELD%USER_NUMBER,"*",ERR,ERROR))//" : "//FIELD%LABEL @@ -271,7 +274,7 @@ SUBROUTINE AnalyticAnalysis_Output(FIELD,FILENAME,ERR,ERROR,*) !Output RMS errors CALL WRITE_STRING(OUTPUT_ID,"",ERR,ERROR,*999) IF(NUMBER(1)>0) THEN - IF(computationalEnvironment%numberOfComputationalNodes>1) THEN + IF(numberOfWorldComputationNodes>1) THEN !Local elements only CALL WRITE_STRING(OUTPUT_ID,"Local RMS errors:",ERR,ERROR,*999) LOCAL_STRING= & @@ -294,17 +297,16 @@ SUBROUTINE AnalyticAnalysis_Output(FIELD,FILENAME,ERR,ERROR,*) & ERR,ERROR,*999) !Global RMS values !Collect the values across the ranks - CALL MPI_ALLREDUCE(MPI_IN_PLACE,NUMBER,1,MPI_INTEGER,MPI_SUM, & - & computationalEnvironment%mpiCommunicator,MPI_IERROR) + CALL MPI_ALLREDUCE(MPI_IN_PLACE,NUMBER,1,MPI_INTEGER,MPI_SUM,worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_ALLREDUCE",MPI_IERROR,ERR,ERROR,*999) CALL MPI_ALLREDUCE(MPI_IN_PLACE,RMS_ERROR_PER,1,MPI_DOUBLE_PRECISION,MPI_SUM, & - & computationalEnvironment%mpiCommunicator,MPI_IERROR) + & worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_ALLREDUCE",MPI_IERROR,ERR,ERROR,*999) CALL MPI_ALLREDUCE(MPI_IN_PLACE,RMS_ERROR_ABS,1,MPI_DOUBLE_PRECISION,MPI_SUM, & - & computationalEnvironment%mpiCommunicator,MPI_IERROR) + & worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_ALLREDUCE",MPI_IERROR,ERR,ERROR,*999) CALL MPI_ALLREDUCE(MPI_IN_PLACE,RMS_ERROR_REL,1,MPI_DOUBLE_PRECISION,MPI_SUM, & - & computationalEnvironment%mpiCommunicator,MPI_IERROR) + & worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_ALLREDUCE",MPI_IERROR,ERR,ERROR,*999) CALL WRITE_STRING(OUTPUT_ID,"Global RMS errors:",ERR,ERROR,*999) LOCAL_STRING= & @@ -401,7 +403,7 @@ SUBROUTINE AnalyticAnalysis_Output(FIELD,FILENAME,ERR,ERROR,*) ENDDO !node_idx !Output RMS errors CALL WRITE_STRING(OUTPUT_ID,"",ERR,ERROR,*999) - IF(computationalEnvironment%numberOfComputationalNodes>1) THEN + IF(numberOfWorldComputationNodes>1) THEN IF(ANY(NUMBER>0)) THEN !Local nodes only CALL WRITE_STRING(OUTPUT_ID,"Local RMS errors:",ERR,ERROR,*999) @@ -438,17 +440,16 @@ SUBROUTINE AnalyticAnalysis_Output(FIELD,FILENAME,ERR,ERROR,*) ENDDO !deriv_idx !Global RMS values !Collect the values across the ranks - CALL MPI_ALLREDUCE(MPI_IN_PLACE,NUMBER,8,MPI_INTEGER,MPI_SUM, & - & computationalEnvironment%mpiCommunicator,MPI_IERROR) + CALL MPI_ALLREDUCE(MPI_IN_PLACE,NUMBER,8,MPI_INTEGER,MPI_SUM,worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_ALLREDUCE",MPI_IERROR,ERR,ERROR,*999) CALL MPI_ALLREDUCE(MPI_IN_PLACE,RMS_ERROR_PER,8,MPI_DOUBLE_PRECISION,MPI_SUM, & - & computationalEnvironment%mpiCommunicator,MPI_IERROR) + & worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_ALLREDUCE",MPI_IERROR,ERR,ERROR,*999) CALL MPI_ALLREDUCE(MPI_IN_PLACE,RMS_ERROR_ABS,8,MPI_DOUBLE_PRECISION,MPI_SUM, & - & computationalEnvironment%mpiCommunicator,MPI_IERROR) + & worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_ALLREDUCE",MPI_IERROR,ERR,ERROR,*999) CALL MPI_ALLREDUCE(MPI_IN_PLACE,RMS_ERROR_REL,8,MPI_DOUBLE_PRECISION,MPI_SUM, & - & computationalEnvironment%mpiCommunicator,MPI_IERROR) + & worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_ALLREDUCE",MPI_IERROR,ERR,ERROR,*999) CALL WRITE_STRING(OUTPUT_ID,"Global RMS errors:",ERR,ERROR,*999) LOCAL_STRING= & @@ -518,7 +519,7 @@ SUBROUTINE AnalyticAnalysis_Output(FIELD,FILENAME,ERR,ERROR,*) ALLOCATE(GHOST_INTEGRAL_ERRORS(6,FIELD_VARIABLE%NUMBER_OF_COMPONENTS),STAT=ERR) IF(ERR/=0) CALL FlagError("Could not allocate ghost integral errors.",ERR,ERROR,*999) CALL ANALYTIC_ANALYSIS_INTEGRAL_ERRORS(FIELD_VARIABLE,INTEGRAL_ERRORS,GHOST_INTEGRAL_ERRORS,ERR,ERROR,*999) - IF(computationalEnvironment%numberOfComputationalNodes>1) THEN + IF(numberOfWorldComputationNodes>1) THEN CALL WRITE_STRING(OUTPUT_ID,"Local Integral errors:",ERR,ERROR,*999) LOCAL_STRING="Component# Numerical Analytic % error Absolute err Relative err" CALL WRITE_STRING(OUTPUT_ID,LOCAL_STRING,ERR,ERROR,*999) @@ -591,7 +592,7 @@ SUBROUTINE AnalyticAnalysis_Output(FIELD,FILENAME,ERR,ERROR,*) ENDDO !component_idx !Collect the values across the ranks CALL MPI_ALLREDUCE(MPI_IN_PLACE,INTEGRAL_ERRORS,6*FIELD_VARIABLE%NUMBER_OF_COMPONENTS,MPI_DOUBLE_PRECISION, & - & MPI_SUM,computationalEnvironment%mpiCommunicator,MPI_IERROR) + & MPI_SUM,worldCommunicator,MPI_IERROR) CALL WRITE_STRING(OUTPUT_ID,"Global Integral errors:",ERR,ERROR,*999) LOCAL_STRING="Component# Numerical Analytic % error Absolute err Relative err" CALL WRITE_STRING(OUTPUT_ID,LOCAL_STRING,ERR,ERROR,*999) @@ -1649,7 +1650,8 @@ SUBROUTINE AnalyticAnalysis_RMSErrorGetNode(FIELD,VARIABLE_TYPE,COMPONENT_NUMBER TYPE(VARYING_STRING), INTENT(OUT) :: ERROR !FIELD%VARIABLE_TYPE_MAP(VARIABLE_TYPE)%PTR%COMPONENTS(COMPONENT_NUMBER)%DOMAIN%TOPOLOGY%NODES IF(ASSOCIATED(NODES_DOMAIN)) THEN NUMBER=0 @@ -1710,7 +1715,7 @@ SUBROUTINE AnalyticAnalysis_RMSErrorGetNode(FIELD,VARIABLE_TYPE,COMPONENT_NUMBER ENDDO !deriv_idx ENDDO !node_idx - IF(computationalEnvironment%numberOfComputationalNodes>1) THEN + IF(numberOfWorldComputationNodes>1) THEN IF(ANY(NUMBER>0)) THEN DO deriv_idx=1,8 IF(NUMBER(deriv_idx)>0) THEN @@ -1720,15 +1725,14 @@ SUBROUTINE AnalyticAnalysis_RMSErrorGetNode(FIELD,VARIABLE_TYPE,COMPONENT_NUMBER DO deriv_idx=1,8 IF(NUMBER(deriv_idx)>0) THEN LOCAL_GHOST_RMS(deriv_idx)=SQRT((RMS_ERROR(deriv_idx)+GHOST_RMS_ERROR(deriv_idx))/(NUMBER(deriv_idx) & - & +GHOST_NUMBER(deriv_idx))) + & +GHOST_NUMBER(deriv_idx))) ENDIF ENDDO !deriv_idx !Global RMS values !Collect the values across the ranks - CALL MPI_ALLREDUCE(MPI_IN_PLACE,NUMBER,8,MPI_INTEGER,MPI_SUM,computationalEnvironment%mpiCommunicator,MPI_IERROR) + CALL MPI_ALLREDUCE(MPI_IN_PLACE,NUMBER,8,MPI_INTEGER,MPI_SUM,worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_ALLREDUCE",MPI_IERROR,ERR,ERROR,*999) - CALL MPI_ALLREDUCE(MPI_IN_PLACE,RMS_ERROR,8,MPI_DOUBLE_PRECISION,MPI_SUM,computationalEnvironment%mpiCommunicator, & - & MPI_IERROR) + CALL MPI_ALLREDUCE(MPI_IN_PLACE,RMS_ERROR,8,MPI_DOUBLE_PRECISION,MPI_SUM,worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_ALLREDUCE",MPI_IERROR,ERR,ERROR,*999) DO deriv_idx=1,8 IF(NUMBER(deriv_idx)>0) THEN @@ -1757,6 +1761,7 @@ SUBROUTINE AnalyticAnalysis_RMSErrorGetNode(FIELD,VARIABLE_TYPE,COMPONENT_NUMBER RETURN 999 ERRORSEXITS("AnalyticAnalysis_RMSErrorGetNode",ERR,ERROR) RETURN 1 + END SUBROUTINE AnalyticAnalysis_RMSErrorGetNode ! @@ -1779,7 +1784,7 @@ SUBROUTINE AnalyticAnalysis_RMSErrorGetElement(FIELD,VARIABLE_TYPE,COMPONENT_NUM TYPE(VARYING_STRING), INTENT(OUT) :: ERROR !FIELD%VARIABLE_TYPE_MAP(VARIABLE_TYPE)%PTR%COMPONENTS(COMPONENT_NUMBER)%DOMAIN ELEMENTS_DOMAIN=>DOMAIN%TOPOLOGY%ELEMENTS IF(ASSOCIATED(ELEMENTS_DOMAIN)) THEN @@ -1839,18 +1847,16 @@ SUBROUTINE AnalyticAnalysis_RMSErrorGetElement(FIELD,VARIABLE_TYPE,COMPONENT_NUM GHOST_RMS_ERROR=GHOST_RMS_ERROR+ERROR_VALUE*ERROR_VALUE ENDDO !element_idx IF(NUMBER>0) THEN - IF(computationalEnvironment%numberOfComputationalNodes>1) THEN + IF(numberOfWorldComputationNodes>1) THEN !Local elements only LOCAL_RMS=SQRT(RMS_ERROR/NUMBER) !Local and ghost elements LOCAL_GHOST_RMS=SQRT((RMS_ERROR+GHOST_RMS_ERROR)/(NUMBER+GHOST_NUMBER)) !Global RMS values !Collect the values across the ranks - CALL MPI_ALLREDUCE(MPI_IN_PLACE,NUMBER,1,MPI_INTEGER,MPI_SUM, & - & computationalEnvironment%mpiCommunicator,MPI_IERROR) + CALL MPI_ALLREDUCE(MPI_IN_PLACE,NUMBER,1,MPI_INTEGER,MPI_SUM,worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_ALLREDUCE",MPI_IERROR,ERR,ERROR,*999) - CALL MPI_ALLREDUCE(MPI_IN_PLACE,RMS_ERROR,1,MPI_DOUBLE_PRECISION,MPI_SUM, & - & computationalEnvironment%mpiCommunicator,MPI_IERROR) + CALL MPI_ALLREDUCE(MPI_IN_PLACE,RMS_ERROR,1,MPI_DOUBLE_PRECISION,MPI_SUM,worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_ALLREDUCE",MPI_IERROR,ERR,ERROR,*999) GLOBAL_RMS=SQRT(RMS_ERROR/NUMBER) ENDIF diff --git a/src/base_routines.f90 b/src/base_routines.f90 index eedb27a6..7cc45ca0 100755 --- a/src/base_routines.f90 +++ b/src/base_routines.f90 @@ -146,8 +146,8 @@ MODULE BaseRoutines !Module variables - INTEGER(INTG), SAVE :: myComputationalNodeNumber !Set the computational node numbers. Note: this is done as a subroutine as ComputationalEnvironment depends on BaseRoutines. - SUBROUTINE ComputationalNodeNumbersSet(myNodeNumber,numberOfNodes,err,error,*) + !>Set the computation node numbers. Note: this is done as a subroutine as ComputationEnvironment depends on BaseRoutines. + SUBROUTINE ComputationNodeNumbersSet(myNodeNumber,numberOfNodes,err,error,*) !Argument variables INTEGER(INTG), INTENT(IN) :: myNodeNumber !0) THEN IF(myNodeNumber>=0.AND.myNodeNumber<=numberOfNodes-1) THEN - myComputationalNodeNumber=myNodeNumber - numberOfComputationalNodes=numberOfNodes + myWorldComputationNodeNumber=myNodeNumber + numberOfWorldComputationNodes=numberOfNodes ELSE CALL FlagError("Invalid node number.",err,error,*999) ENDIF @@ -562,12 +562,12 @@ SUBROUTINE ComputationalNodeNumbersSet(myNodeNumber,numberOfNodes,err,error,*) CALL FlagError("Invalid number of nodes.",err,error,*999) ENDIF - EXITS("ComputationalNodeNumbersSet") + EXITS("ComputationNodeNumbersSet") RETURN -999 ERRORSEXITS("ComputationalNodeNumbersSet",err,error) +999 ERRORSEXITS("ComputationNodeNumbersSet",err,error) RETURN 1 - END SUBROUTINE ComputationalNodeNumbersSet + END SUBROUTINE ComputationNodeNumbersSet ! !================================================================================================================================ @@ -708,8 +708,8 @@ SUBROUTINE FlagWarningC(string,err,error,*) TYPE(VARYING_STRING), INTENT(OUT) :: error !1) THEN - WRITE(outputString,'(">>WARNING (",I0,"): ",A)') myComputationalNodeNumber,string + IF(numberOfWorldComputationNodes>1) THEN + WRITE(outputString,'(">>WARNING (",I0,"): ",A)') myWorldComputationNodeNumber,string ELSE WRITE(outputString,'(">>WARNING: ",A)') string ENDIF @@ -734,8 +734,8 @@ SUBROUTINE FlagWarningVS(string,err,error,*) TYPE(VARYING_STRING), INTENT(OUT) :: error !1) THEN - WRITE(outputString,'(">>WARNING (",I0,"): ",A)') myComputationalNodeNumber,CHAR(string) + IF(numberOfWorldComputationNodes>1) THEN + WRITE(outputString,'(">>WARNING (",I0,"): ",A)') myWorldComputationNodeNumber,CHAR(string) ELSE WRITE(outputString,'(">>WARNING: ",A)') CHAR(string) ENDIF @@ -783,8 +783,8 @@ SUBROUTINE BaseRoutines_Initialise(err,error,*) err=0 error="" - myComputationalNodeNumber=0 - numberOfComputationalNodes=1 + myWorldComputationNodeNumber=0 + numberOfWorldComputationNodes=1 diagnostics=.FALSE. diagnostics1=.FALSE. diagnostics2=.FALSE. @@ -928,8 +928,8 @@ SUBROUTINE DiagnosticsSetOn(diagType,levelList,diagFilename,routineList,err,erro IF(LEN_TRIM(diagFilename)>=1) THEN IF(diagFileOpen) CLOSE(UNIT=DIAGNOSTICS_FILE_UNIT) - IF(numberOfComputationalNodes>1) THEN - WRITE(filename,'(A,".diag.",I0)') diagFilename(1:LEN_TRIM(diagFilename)),myComputationalNodeNumber + IF(numberOfWorldComputationNodes>1) THEN + WRITE(filename,'(A,".diag.",I0)') diagFilename(1:LEN_TRIM(diagFilename)),myWorldComputationNodeNumber ELSE filename=diagFilename(1:LEN_TRIM(diagFilename))//".diag" ENDIF @@ -1063,8 +1063,8 @@ SUBROUTINE OutputSetOn(echoFilename,err,error,*) IF(echoOutput) THEN CALL FlagError("Write output is already on.",err,error,*999) ELSE - IF(numberOfComputationalNodes>1) THEN - WRITE(filename,'(A,".out.",I0)') echoFilename(1:LEN_TRIM(echoFilename)),myComputationalNodeNumber + IF(numberOfWorldComputationNodes>1) THEN + WRITE(filename,'(A,".out.",I0)') echoFilename(1:LEN_TRIM(echoFilename)),myWorldComputationNodeNumber ELSE filename=echoFilename(1:LEN_TRIM(echoFilename))//".out" ENDIF @@ -1233,8 +1233,8 @@ SUBROUTINE TimingSetOn(timingType,timingSummaryFlag,timingFilename,routineList,e NULLIFY(routine) IF(LEN_TRIM(timingFilename)>=1) THEN IF(timingFileOpen) CLOSE(UNIT=TIMING_FILE_UNIT) - IF(numberOfComputationalNodes>1) THEN - WRITE(filename,'(A,".timing.",I0)') timingFilename(1:LEN_TRIM(timingFilename)),myComputationalNodeNumber + IF(numberOfWorldComputationNodes>1) THEN + WRITE(filename,'(A,".timing.",I0)') timingFilename(1:LEN_TRIM(timingFilename)),myWorldComputationNodeNumber ELSE filename=timingFilename(1:LEN_TRIM(timingFilename))//".timing" ENDIF @@ -1389,8 +1389,8 @@ SUBROUTINE WriteError(err,error,*) TYPE(VARYING_STRING) :: localError,localError2 indent=2 - IF(numberOfComputationalNodes>1) THEN - WRITE(startString,'(A,A,I0,A,X,I0,A)') indentString(1:indent),"ERROR (",myComputationalNodeNumber,"):", & + IF(numberOfWorldComputationNodes>1) THEN + WRITE(startString,'(A,A,I0,A,X,I0,A)') indentString(1:indent),"ERROR (",myWorldComputationNodeNumber,"):", & & ERR,":" startStringLength=LEN_TRIM(startString) ELSE diff --git a/src/boundary_condition_routines.f90 b/src/boundary_condition_routines.f90 index 2a9e9368..42e47b90 100755 --- a/src/boundary_condition_routines.f90 +++ b/src/boundary_condition_routines.f90 @@ -26,7 +26,7 @@ !> Auckland, the University of Oxford and King's College, London. !> All Rights Reserved. !> -!> Contributor(s): Chris Bradley +!> Contributor(s): Ting Yu, Chris Bradley !> !> Alternatively, the contents of this file may be used under the terms of !> either the GNU General Public License Version 2 or later (the "GPL"), or @@ -48,7 +48,8 @@ MODULE BOUNDARY_CONDITIONS_ROUTINES USE BasisRoutines USE BasisAccessRoutines USE CmissMPI - USE ComputationEnvironment + USE ComputationRoutines + USE ComputationAccessRoutines USE CONSTANTS USE COORDINATE_ROUTINES USE DistributedMatrixVector @@ -283,7 +284,7 @@ SUBROUTINE BOUNDARY_CONDITIONS_CREATE_FINISH(BOUNDARY_CONDITIONS,ERR,ERROR,*) INTEGER(INTG) :: MPI_IERROR,SEND_COUNT,STORAGE_TYPE, NUMBER_OF_NON_ZEROS, NUMBER_OF_ROWS,COUNT INTEGER(INTG) :: variable_idx,dof_idx, equ_matrix_idx, dirichlet_idx, row_idx, DUMMY, LAST, DIRICHLET_DOF INTEGER(INTG) :: col_idx,equations_set_idx,parameterSetIdx - INTEGER(INTG) :: pressureIdx,neumannIdx + INTEGER(INTG) :: pressureIdx,neumannIdx,numberOfWorldComputationNodes,myWorldComputationNodeNumber,worldCommunicator INTEGER(INTG), POINTER :: ROW_INDICES(:), COLUMN_INDICES(:) TYPE(BOUNDARY_CONDITIONS_VARIABLE_TYPE), POINTER :: BOUNDARY_CONDITION_VARIABLE TYPE(DOMAIN_MAPPING_TYPE), POINTER :: VARIABLE_DOMAIN_MAPPING @@ -313,8 +314,11 @@ SUBROUTINE BOUNDARY_CONDITIONS_CREATE_FINISH(BOUNDARY_CONDITIONS,ERR,ERROR,*) CALL FlagError("Boundary conditions have already been finished.",ERR,ERROR,*999) ELSE IF(ALLOCATED(BOUNDARY_CONDITIONS%BOUNDARY_CONDITIONS_VARIABLES)) THEN - IF(computationalEnvironment%numberOfComputationalNodes>0) THEN - !Transfer all the boundary conditions to all the computational nodes. + CALL ComputationEnvironment_NumberOfWorldNodesGet(computationEnvironment,numberOfWorldComputationNodes,err,error,*999) + CALL ComputationEnvironment_WorldNodeNumberGet(computationEnvironment,myWorldComputationNodeNumber,err,error,*999) + CALL ComputationEnvironment_WorldCommunicatorGet(computationEnvironment,worldCommunicator,err,error,*999) + IF(numberOfWorldComputationNodes>0) THEN + !Transfer all the boundary conditions to all the computation nodes. !\todo Look at this. DO variable_idx=1,BOUNDARY_CONDITIONS%NUMBER_OF_BOUNDARY_CONDITIONS_VARIABLES BOUNDARY_CONDITION_VARIABLE=>BOUNDARY_CONDITIONS%BOUNDARY_CONDITIONS_VARIABLES(variable_idx)%PTR @@ -324,41 +328,41 @@ SUBROUTINE BOUNDARY_CONDITIONS_CREATE_FINISH(BOUNDARY_CONDITIONS,ERR,ERROR,*) VARIABLE_DOMAIN_MAPPING=>FIELD_VARIABLE%DOMAIN_MAPPING IF(ASSOCIATED(VARIABLE_DOMAIN_MAPPING)) THEN SEND_COUNT=VARIABLE_DOMAIN_MAPPING%NUMBER_OF_GLOBAL - IF(computationalEnvironment%numberOfComputationalNodes>1) THEN + IF(numberOfWorldComputationNodes>1) THEN !\todo This operation is a little expensive as we are doing an unnecessary sum across all the ranks in order to combin !\todo the data from each rank into all ranks. We will see how this goes for now. CALL MPI_ALLREDUCE(MPI_IN_PLACE,BOUNDARY_CONDITION_VARIABLE%DOF_TYPES, & - & SEND_COUNT,MPI_INTEGER,MPI_SUM,computationalEnvironment%mpiCommunicator,MPI_IERROR) + & SEND_COUNT,MPI_INTEGER,MPI_SUM,worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_ALLREDUCE",MPI_IERROR,ERR,ERROR,*999) CALL MPI_ALLREDUCE(MPI_IN_PLACE,BOUNDARY_CONDITION_VARIABLE%CONDITION_TYPES, & - & SEND_COUNT,MPI_INTEGER,MPI_SUM,computationalEnvironment%mpiCommunicator,MPI_IERROR) + & SEND_COUNT,MPI_INTEGER,MPI_SUM,worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_ALLREDUCE",MPI_IERROR,ERR,ERROR,*999) ENDIF !mpi_in_place bug workaround - only do this when num comp nodes > 1 - ELSE + ELSE LOCAL_ERROR="Field variable domain mapping is not associated for variable type "// & & TRIM(NUMBER_TO_VSTRING(variable_idx,"*",ERR,ERROR))//"." CALL FlagError(LOCAL_ERROR,ERR,ERROR,*999) ENDIF - IF(computationalEnvironment%numberOfComputationalNodes>1) THEN + IF(numberOfWorldComputationNodes>1) THEN ! Update the total number of boundary condition types by summing across all nodes CALL MPI_ALLREDUCE(MPI_IN_PLACE,BOUNDARY_CONDITION_VARIABLE%DOF_COUNTS, & - & MAX_BOUNDARY_CONDITION_NUMBER,MPI_INTEGER,MPI_SUM,computationalEnvironment%mpiCommunicator,MPI_IERROR) + & MAX_BOUNDARY_CONDITION_NUMBER,MPI_INTEGER,MPI_SUM,worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_ALLREDUCE",MPI_IERROR,ERR,ERROR,*999) CALL MPI_ALLREDUCE(MPI_IN_PLACE,BOUNDARY_CONDITION_VARIABLE%NUMBER_OF_DIRICHLET_CONDITIONS, & - & 1,MPI_INTEGER,MPI_SUM,computationalEnvironment%mpiCommunicator,MPI_IERROR) + & 1,MPI_INTEGER,MPI_SUM,worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_ALLREDUCE",MPI_IERROR,ERR,ERROR,*999) ENDIF !mpi_in_place bug workaround - only do this when num comp nodes > 1 ! Check that the boundary conditions set are appropriate for equations sets CALL BoundaryConditions_CheckEquations(BOUNDARY_CONDITION_VARIABLE,ERR,ERROR,*999) - IF(computationalEnvironment%numberOfComputationalNodes>1) THEN + IF(numberOfWorldComputationNodes>1) THEN !Make sure the required parameter sets are created on all computational nodes and begin updating them CALL MPI_ALLREDUCE(MPI_IN_PLACE,BOUNDARY_CONDITION_VARIABLE%parameterSetRequired, & - & FIELD_NUMBER_OF_SET_TYPES,MPI_LOGICAL,MPI_LOR,computationalEnvironment%mpiCommunicator,MPI_IERROR) + & FIELD_NUMBER_OF_SET_TYPES,MPI_LOGICAL,MPI_LOR,worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_ALLREDUCE",MPI_IERROR,ERR,ERROR,*999) DO parameterSetIdx=1,FIELD_NUMBER_OF_SET_TYPES IF(BOUNDARY_CONDITION_VARIABLE%parameterSetRequired(parameterSetIdx)) THEN @@ -1432,6 +1436,7 @@ SUBROUTINE BOUNDARY_CONDITIONS_SET_LOCAL_DOF1(BOUNDARY_CONDITIONS,FIELD,VARIABLE !Local Variables ENTERS("BOUNDARY_CONDITIONS_SET_LOCAL_DOF1",ERR,ERROR,*999) + CALL BOUNDARY_CONDITIONS_SET_LOCAL_DOFS(BOUNDARY_CONDITIONS,FIELD,VARIABLE_TYPE,[DOF_INDEX],[CONDITION],[VALUE], & & ERR,ERROR,*999) @@ -2226,7 +2231,7 @@ SUBROUTINE BoundaryConditions_NeumannMatricesInitialise(boundaryConditionsVariab TYPE(DOMAIN_LINE_TYPE), POINTER :: line TYPE(DOMAIN_FACE_TYPE), POINTER :: face TYPE(LIST_TYPE), POINTER :: columnIndicesList, rowColumnIndicesList - INTEGER(INTG) :: myComputationalNodeNumber + INTEGER(INTG) :: myWorldComputationNodeNumber INTEGER(INTG) :: numberOfPointDofs, numberNonZeros, numberRowEntries, neumannConditionNumber, localNeumannConditionIdx INTEGER(INTG) :: neumannIdx, globalDof, localDof, localDofNyy, domainIdx, numberOfDomains, domainNumber, componentNumber INTEGER(INTG) :: nodeIdx, derivIdx, nodeNumber, versionNumber, derivativeNumber, columnNodeNumber, lineIdx, faceIdx, columnDof @@ -2521,11 +2526,11 @@ SUBROUTINE BoundaryConditions_NeumannMatricesInitialise(boundaryConditionsVariab !Set up vector of Neumann point values CALL DistributedVector_CreateStart(pointDofMapping,boundaryConditionsNeumann%pointValues,err,error,*999) CALL DistributedVector_CreateFinish(boundaryConditionsNeumann%pointValues,err,error,*999) - myComputationalNodeNumber=ComputationalEnvironment_NodeNumberGet(err,error) + CALL ComputationEnvironment_WorldNodeNumberGet(computationEnvironment,myWorldComputationNodeNumber,err,error,*999) !Set point values vector from boundary conditions field parameter set DO neumannIdx=1,numberOfPointDofs globalDof=boundaryConditionsNeumann%setDofs(neumannIdx) - IF(rhsVariable%DOMAIN_MAPPING%GLOBAL_TO_LOCAL_MAP(globalDof)%DOMAIN_NUMBER(1)==myComputationalNodeNumber) THEN + IF(rhsVariable%DOMAIN_MAPPING%GLOBAL_TO_LOCAL_MAP(globalDof)%DOMAIN_NUMBER(1)==myWorldComputationNodeNumber) THEN localDof=rhsVariable%DOMAIN_MAPPING%GLOBAL_TO_LOCAL_MAP(globalDof)%LOCAL_NUMBER(1) ! Set point DOF vector value localNeumannConditionIdx=boundaryConditionsNeumann%pointDofMapping%GLOBAL_TO_LOCAL_MAP(neumannIdx)%LOCAL_NUMBER(1) @@ -2647,7 +2652,7 @@ SUBROUTINE BoundaryConditions_NeumannIntegrate(rhsBoundaryConditions,err,error,* TYPE(VARYING_STRING), INTENT(OUT) :: error ! Auckland, the University of Oxford and King's College, London. !> All Rights Reserved. !> -!> Contributor(s): +!> Contributor(s): Chris Bradley !> !> Alternatively, the contents of this file may be used under the terms of !> either the GNU General Public License Version 2 or later (the "GPL"), or @@ -54,7 +54,8 @@ MODULE Cmiss USE BaseRoutines USE BasisRoutines - USE ComputationEnvironment + USE ComputationRoutines + USE ComputationAccessRoutines USE Constants USE COORDINATE_ROUTINES USE GENERATED_MESH_ROUTINES @@ -89,18 +90,18 @@ MODULE Cmiss !> \addtogroup CMFE_ErrorHandlingModes OpenCMISS::Iron::ErrorHandlingModes !> \brief Error handling mode parameters - !> \see CMISS + !> \see OpenCMISS !>@{ - INTEGER(INTG), PARAMETER :: CMFE_RETURN_ERROR_CODE = 0 !@} !Module types !Module variables - INTEGER(INTG), SAVE :: cmfe_ErrorHandlingMode !Returns the error handling mode for CMISS \see OPENOpenCMISS::Iron::CMISSErrorHandlingModeGet + !>Returns the error handling mode for OpenCMISS \see OpenCMISS::Iron::cmfe_ErrorHandlingModeGet SUBROUTINE cmfe_ErrorHandlingModeGet_(errorHandlingMode,err,error,*) !Argument variables - INTEGER(INTG), INTENT(OUT) :: errorHandlingMode !Sets the error handling mode for cmiss \see OPENOpenCMISS::Iron::CMISSErrorHandlingModeSet + !>Sets the error handling mode for cmiss \see OpenCMISS::Iron::cmfe_ErrorHandlingModeSet SUBROUTINE cmfe_ErrorHandlingModeSet_(errorHandlingMode,err,error,*) !Argument variables - INTEGER(INTG), INTENT(IN) :: errorHandlingMode !Finalises CMISS. \see OPENOpenCMISS::Iron::CMISSFinalise + !>Finalises OpenCMISS. \see OpenCMISS::Iron::cmfe_Finalise SUBROUTINE cmfe_Finalise_(err,error,*) !Argument variables @@ -217,8 +218,8 @@ SUBROUTINE cmfe_Finalise_(err,error,*) CALL Bases_Finalise(err,error,*999) !Reset the signal handler CALL cmfe_ResetFatalHandler() - !Finalise computational enviroment - CALL ComputationalEnvironment_Finalise(err,error,*999) + !Finalise computation + CALL Computation_Finalise(err,error,*999) !Finalise the base routines CALL BaseRoutines_Finalise(err,error,*999) @@ -233,7 +234,7 @@ END SUBROUTINE cmfe_Finalise_ !!TODO Underscore to avoid name clash. Can be removed upon prefix rename. - !>Initialises CMISS. \see OPENOpenCMISS::Iron::CMISSInitialise + !>Initialises OpenCMISS. \see OpenCMISS::Iron::cmfe_Initialise SUBROUTINE cmfe_Initialise_(worldRegion,err,error,*) !Argument variables @@ -241,14 +242,15 @@ SUBROUTINE cmfe_Initialise_(worldRegion,err,error,*) INTEGER(INTG), INTENT(INOUT) :: err !MODELS_FIELD%CELLML IF(ASSOCIATED(CELLML)) THEN + CALL ComputationEnvironment_WorldCommunicatorGet(computationEnvironment,worldCommunicator,err,error,*999) !Models field has not been checked before. NULLIFY(MODELS_VARIABLE) CALL Field_VariableGet(MODELS_FIELD%MODELS_FIELD,FIELD_U_VARIABLE_TYPE,MODELS_VARIABLE,ERR,ERROR,*999) @@ -2587,7 +2589,7 @@ SUBROUTINE CELLML_MODELS_FIELD_CHECK(MODELS_FIELD,ERR,ERROR,*) ENDDO !source_dof_idx onlyOneModelIndex=0 CALL MPI_ALLREDUCE(MODELS_FIELD%ONLY_ONE_MODEL_INDEX,onlyOneModelIndex,1,MPI_INTEGER,MPI_MAX, & - & computationalEnvironment%mpiCommunicator,mpiIerror) + & worldCommunicator,mpiIerror) CALL MPI_ERROR_CHECK("MPI_ALLREDUCE",mpiIerror,ERR,ERROR,*999) IF(onlyOneModelIndex==0) & & CALL FlagError("Models field does not have any models set.",ERR,ERROR,*999) diff --git a/src/computation_access_routines.f90 b/src/computation_access_routines.f90 new file mode 100644 index 00000000..ebb19bde --- /dev/null +++ b/src/computation_access_routines.f90 @@ -0,0 +1,655 @@ +!> \file +!> \author Chris Bradley +!> \brief This module contains all computation access method routines. +!> +!> \section LICENSE +!> +!> Version: MPL 1.1/GPL 2.0/LGPL 2.1 +!> +!> The contents of this file are subject to the Mozilla Public License +!> Version 1.1 (the "License"); you may not use this file except in +!> compliance with the License. You may obtain a copy of the License at +!> http://www.mozilla.org/MPL/ +!> +!> Software distributed under the License is distributed on an "AS IS" +!> basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +!> License for the specific language governing rights and limitations +!> under the License. +!> +!> The Original Code is OpenCMISS +!> +!> The Initial Developer of the Original Code is University of Auckland, +!> Auckland, New Zealand, the University of Oxford, Oxford, United +!> Kingdom and King's College, London, United Kingdom. Portions created +!> by the University of Auckland, the University of Oxford and King's +!> College, London are Copyright (C) 2007-2010 by the University of +!> Auckland, the University of Oxford and King's College, London. +!> All Rights Reserved. +!> +!> Contributor(s): Chris Bradley +!> +!> Alternatively, the contents of this file may be used under the terms of +!> either the GNU General Public License Version 2 or later (the "GPL"), or +!> the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), +!> in which case the provisions of the GPL or the LGPL are applicable instead +!> of those above. If you wish to allow use of your version of this file only +!> under the terms of either the GPL or the LGPL, and not to allow others to +!> use your version of this file under the terms of the MPL, indicate your +!> decision by deleting the provisions above and replace them with the notice +!> and other provisions required by the GPL or the LGPL. If you do not delete +!> the provisions above, a recipient may use your version of this file under +!> the terms of any one of the MPL, the GPL or the LGPL. +!> + +!> This module contains all computation access method routines. +MODULE ComputationAccessRoutines + + USE BaseRoutines + USE ISO_VARYING_STRING + USE Kinds +#ifndef NOMPIMOD + USE MPI +#endif + USE Strings + +#include "macros.h" + + IMPLICIT NONE + + PRIVATE + +#ifdef NOMPIMOD +#include "mpif.h" +#endif + + !Module parameters + + !Module types + + !>pointer type to WorkGroupType + TYPE WorkGroupPtrType + TYPE(WorkGroupType), POINTER :: ptr + END TYPE WorkGroupPtrType + + !>Contains information on logical working groups + TYPE WorkGroupType + INTEGER(INTG) :: userNumber !Contains information on a cache heirarchy + TYPE ComputationCacheType + INTEGER(INTG) :: numberOfLevels !Contains information on a computation node containing a number of processors + TYPE ComputationNodeType + INTEGER(INTG) :: numberOfProcessors !Contains information on the MPI type to transfer information about a computation node + TYPE MPIComputationNodeType + INTEGER(INTG) :: mpiType !Contains information on the computation environment the program is running in. + TYPE ComputationEnvironmentType + INTEGER(INTG) :: mpiVersion !Gets the current world communicator. + SUBROUTINE ComputationEnvironment_WorldCommunicatorGet(computationEnviron,worldCommunicator,err,error,*) + + !Argument Variables + TYPE(ComputationEnvironmentType), POINTER, INTENT(IN) :: computationEnviron !Returns the number/rank of the computation node in the world communicator + SUBROUTINE ComputationEnvironment_WorldNodeNumberGet(computationEnviron,worldNodeNumber,err,error,*) + + !Argument Variables + TYPE(ComputationEnvironmentType), POINTER, INTENT(IN) :: computationEnviron !Gets the number of computation nodes in the world communicator. + SUBROUTINE ComputationEnvironment_NumberOfWorldNodesGet(computationEnviron,numberOfWorldNodes,err,error,*) + + !Argument Variables + TYPE(ComputationEnvironmentType), POINTER, INTENT(IN) :: computationEnviron !Gets the world work group from a computational environment. + SUBROUTINE ComputationEnvironment_WorldWorkGroupGet(computationEnviron,worldWorkGroup,err,error,*) + + !Argument variables + TYPE(ComputationEnvironmentType), POINTER, INTENT(IN) :: computationEnviron !computationEnviron%worldWorkGroup + !Check world work group is associated. + IF(.NOT.ASSOCIATED(worldWorkGroup)) & + & CALL FlagError("World work group is not associated for the computation environment.",err,error,*999) + + EXITS("ComputationEnvironment_WorldWorkGroupGet") + RETURN +999 NULLIFY(worldWorkGroup) +998 ERRORSEXITS("ComputationEnvironment_WorldWorkGroupGet",err,error) + RETURN 1 + + END SUBROUTINE ComputationEnvironment_WorldWorkGroupGet + + ! + !================================================================================================================================ + ! + + !>Finds and returns a pointer to the work group with the given user number. + SUBROUTINE WorkGroup_Get(computationEnvironment,workGroupUserNumber,workGroup,err,error,*) + + !Argument variables + TYPE(ComputationEnvironmentType), POINTER, INTENT(IN) :: computationEnvironment !Gets the group communicator from a work group. + SUBROUTINE WorkGroup_GroupCommunicatorGet(workGroup,groupCommunicator,err,error,*) + + !Argument variables + TYPE(WorkGroupType), POINTER, INTENT(IN) :: workGroup !Gets the number/rank of the computation node in the work group communicator + SUBROUTINE WorkGroup_GroupNodeNumberGet(workGroup,groupNodeNumber,err,error,*) + + !Argument Variables + TYPE(WorkGroupType), POINTER, INTENT(IN) :: workGroup !Returns the character label of a work group \see OpenCMISS::Iron::cmfe_WorkGroup_LabelGet + SUBROUTINE WorkGroup_LabelGetC(workGroup,label,err,error,*) + + !Argument Variables + TYPE(WorkGroupType), POINTER, INTENT(IN) :: workGroup !Returns the varying string label of a work group \see OpenCMISS::Iron::cmfe_WorkGroup_LabelGet + SUBROUTINE WorkGroup_LabelGetVS(workGroup,label,err,error,*) + + !Argument Variables + TYPE(WorkGroupType), POINTER, INTENT(IN) :: workGroup !Gets the number of computation nodes in the work group communicator. + SUBROUTINE WorkGroup_NumberOfGroupNodesGet(workGroup,numberOfGroupNodes,err,error,*) + + !Argument Variables + TYPE(WorkGroupType), POINTER, INTENT(IN) :: workGroup !Gets the parent work group from a work group. + SUBROUTINE WorkGroup_ParentWorkGroupGet(workGroup,parentWorkGroup,err,error,*) + + !Argument variables + TYPE(WorkGroupType), POINTER, INTENT(IN) :: workGroup !workGroup%parentWorkGroup + !Check parent work group is associated. + IF(.NOT.ASSOCIATED(parentWorkGroup)) THEN + localError="Parent work group is not associated for work group "// & + & TRIM(NumberToVString(workGroup%userNumber,"*",err,error))//"." + CALL FlagError(localError,err,error,*999) + ENDIF + + EXITS("WorkGroup_ParentWorkGroupGet") + RETURN +999 NULLIFY(parentWorkGroup) +998 ERRORSEXITS("WorkGroup_ParentWorkGroupGet",err,error) + RETURN 1 + + END SUBROUTINE WorkGroup_ParentWorkGroupGet + + ! + !================================================================================================================================ + ! + + !>Finds and returns a pointer to the work group with the given user number. If no work group with that number exists work group is left nullified. + SUBROUTINE WorkGroup_UserNumberFind(userNumber,computationEnvironment,workGroup,err,error,*) + + !Argument variables + INTEGER(INTG), INTENT(IN) :: userNumber !computationEnvironment%worldWorkGroup + IF(.NOT.ASSOCIATED(worldWorkGroup)) CALL FlagError("World work group is not associated.",err,error,*999) + + NULLIFY(workGroup) + IF(userNumber==0) THEN + workGroup=>worldWorkGroup + ELSE + CALL WorkGroup_UserNumberFindPtr(userNumber,worldWorkGroup,workGroup,err,error,*999) + ENDIF + + EXITS("WorkGroup_UserNumberFind") + RETURN +999 ERRORSEXITS("WorkGroup_UserNumberFind",err,error) + RETURN 1 + + END SUBROUTINE WorkGroup_UserNumberFind + + ! + !================================================================================================================================ + ! + + !>Finds and returns a pointer to the work group with the given user number starting from the given start work group and searching all sub-groups under the start work group. If no work group with that number exists work group is left nullified. + RECURSIVE SUBROUTINE WorkGroup_UserNumberFindPtr(userNumber,startWorkGroup,workGroup,err,error,*) + + !Argument variables + INTEGER(INTG), INTENT(IN) :: userNumber !startWorkGroup + ELSE + IF(ALLOCATED(startWorkGroup%subGroups)) THEN + DO workGroupIdx=1,startWorkGroup%numberOfSubGroups + CALL WorkGroup_UserNumberFindPtr(userNumber,startWorkGroup%subGroups(workGroupIdx)%ptr,workGroup,err,error,*999) + IF(ASSOCIATED(workGroup)) EXIT + ENDDO !workGroupIdx + ENDIF + ENDIF + + EXITS("WorkGroup_UserNumberFindPtr") + RETURN +999 ERRORSEXITS("WorkGroup_UserNumberFindPtr",err,error) + RETURN 1 + + END SUBROUTINE WorkGroup_UserNumberFindPtr + + ! + !================================================================================================================================ + ! + + !>Returns the user number for a region. + SUBROUTINE WorkGroup_UserNumberGet(workGroup,userNumber,err,error,*) + + !Argument variables + TYPE(WorkGroupType), POINTER, INTENT(IN) :: workGroup !Gets a sub work group from a work group. + SUBROUTINE WorkGroup_WorkSubGroupGet(workGroup,subGroupIdx,subWorkGroup,err,error,*) + + !Argument variables + TYPE(WorkGroupType), POINTER, INTENT(IN) :: workGroup !workGroup%numberOfSubGroups) THEN + localError="The specified sub group index of "//TRIM(NumberToVString(subGroupIdx,"*",err,error))// & + & " is invalid. The sub group index must be >=1 and <= "// & + & TRIM(NumberToVString(workGroup%numberOfSubGroups,"*",err,error))//"." + CALL FlagError(localError,err,error,*999) + ENDIF + + !Get the parent work group + subWorkGroup=>workGroup%subGroups(subGroupIdx)%ptr + !Check sub work group is associated. + IF(.NOT.ASSOCIATED(subWorkGroup)) THEN + localError="The sub work group is not associated for sub group index "// & + & TRIM(NumberToVString(subGroupIdx,"*",err,error))//" of work group "// & + & TRIM(NumberToVString(workGroup%userNumber,"*",err,error))//"." + CALL FlagError(localError,err,error,*999) + ENDIF + + EXITS("WorkGroup_WorkSubGroupGet") + RETURN +999 NULLIFY(subWorkGroup) +998 ERRORSEXITS("WorkGroup_WorkSubGroupGet",err,error) + RETURN 1 + + END SUBROUTINE WorkGroup_WorkSubGroupGet + + ! + !================================================================================================================================ + ! + +END MODULE ComputationAccessRoutines diff --git a/src/computation_routines.f90 b/src/computation_routines.f90 new file mode 100755 index 00000000..6375d292 --- /dev/null +++ b/src/computation_routines.f90 @@ -0,0 +1,1005 @@ +!> \file +!> \author Chris Bradley +!> \brief This module contains all computation routines. +!> +!> \section LICENSE +!> +!> Version: MPL 1.1/GPL 2.0/LGPL 2.1 +!> +!> The contents of this file are subject to the Mozilla Public License +!> Version 1.1 (the "License"); you may not use this file except in +!> compliance with the License. You may obtain a copy of the License at +!> http://www.mozilla.org/MPL/ +!> +!> Software distributed under the License is distributed on an "AS IS" +!> basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the +!> License for the specific language governing rights and limitations +!> under the License. +!> +!> The Original Code is OpenCMISS +!> +!> The Initial Developer of the Original Code is University of Auckland, +!> Auckland, New Zealand, the University of Oxford, Oxford, United +!> Kingdom and King's College, London, United Kingdom. Portions created +!> by the University of Auckland, the University of Oxford and King's +!> College, London are Copyright (C) 2007-2010 by the University of +!> Auckland, the University of Oxford and King's College, London. +!> All Rights Reserved. +!> +!> Contributor(s): +!> +!> Alternatively, the contents of this file may be used under the terms of +!> either the GNU General Public License Version 2 or later (the "GPL"), or +!> the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), +!> in which case the provisions of the GPL or the LGPL are applicable instead +!> of those above. If you wish to allow use of your version of this file only +!> under the terms of either the GPL or the LGPL, and not to allow others to +!> use your version of this file under the terms of the MPL, indicate your +!> decision by deleting the provisions above and replace them with the notice +!> and other provisions required by the GPL or the LGPL. If you do not delete +!> the provisions above, a recipient may use your version of this file under +!> the terms of any one of the MPL, the GPL or the LGPL. +!> + +!> This module contains all computation routines. + +MODULE ComputationRoutines + + USE BaseRoutines + USE CmissMPI + USE CmissPetsc + USE ComputationAccessRoutines + USE Constants + USE Kinds +#ifndef NOMPIMOD + USE MPI +#endif + USE INPUT_OUTPUT + USE ISO_VARYING_STRING + USE Sorting + USE Strings + +#include "macros.h" + + IMPLICIT NONE + + PRIVATE + +#ifdef NOMPIMOD +#include "mpif.h" +#endif + + !Module parameters + + !Module types + + !Module variables + + LOGICAL, SAVE :: cmissMPIInitialised !Finalises the computation node data structures and deallocates all memory. + SUBROUTINE Computation_ComputationNodeFinalise(computationNode,err,error,*) + + !Argument Variables + TYPE(ComputationNodeType),INTENT(INOUT) :: computationNode !Initialises the computation node data structures. + SUBROUTINE Computation_ComputationNodeInitialise(computationNode,rank,err,error,*) + + !Argument Variables + TYPE(ComputationNodeType), INTENT(OUT) :: computationNode !Finalises the MPI computation node type data structure and deallocates all memory. + SUBROUTINE Computation_MPIComputationNodeFinalise(mpiComputationNode,err,error,*) + + !Argument Variables + TYPE(MPIComputationNodeType) :: mpiComputationNode !Initialises the data structure containing the MPI type information for the ComputationNodeType. + SUBROUTINE Computation_MPIComputationNodeInitialise(computationEnvironment,rank,err,error,*) + + !Argument Variables + TYPE(ComputationEnvironmentType), POINTER, INTENT(INOUT) :: computationEnvironment !computationEnvironment%numberOfWorldComputationNodes) THEN + localError="The specified rank of "//TRIM(NumberToVString(rank,"*",err,error))// & + & " is invalid. The rank should be >= 0 and <= "// & + & TRIM(NumberToVString(computationEnvironment%numberOfWorldComputationNodes,"*",err,error))//"." + CALL FlagError(localError,err,error,*999) + ENDIF + + computationEnvironment%mpiComputationNode%mpiType=MPI_DATATYPE_NULL + + computationEnvironment%mpiComputationNode%numberOfBlocks=4 + computationEnvironment%mpiComputationNode%types=[MPI_INTEGER,MPI_INTEGER,MPI_INTEGER,MPI_CHARACTER] + computationEnvironment%mpiComputationNode%blockLengths=[1,1,1,MPI_MAX_PROCESSOR_NAME] + + CALL MPI_GET_ADDRESS(computationEnvironment%computationNodes(rank)%numberOfProcessors, & + & computationEnvironment%mpiComputationNode%displacements(1),mpiIError) + CALL MPI_ERROR_CHECK("MPI_GET_ADDRESS",mpiIError,err,error,*999) + CALL MPI_GET_ADDRESS(computationEnvironment%computationNodes(rank)%rank, & + & computationEnvironment%mpiComputationNode%displacements(2),mpiIError) + CALL MPI_ERROR_CHECK("MPI_GET_ADDRESS",mpiIError,err,error,*999) + CALL MPI_GET_ADDRESS(computationEnvironment%computationNodes(rank)%nodeNameLength, & + & computationEnvironment%mpiComputationNode%displacements(3),mpiIError) + CALL MPI_ERROR_CHECK("MPI_GET_ADDRESS",mpiIError,err,error,*999) + !CPB 19/02/07 AIX compiler complains about the type of the first parameter i.e., the previous 3 have been integers + !and this one is not so cast the type. + CALL MPI_GET_ADDRESS(computationEnvironment%computationNodes(rank)%nodeName, & + & computationEnvironment%mpiComputationNode%displacements(4),mpiIError) + CALL MPI_ERROR_CHECK("MPI_GET_ADDRESS",mpiIError,err,error,*999) + + DO blockIdx=4,1,-1 + computationEnvironment%mpiComputationNode%displacements(blockIdx)= & + & computationEnvironment%mpiComputationNode%displacements(blockIdx)- & + & computationEnvironment%mpiComputationNode%displacements(1) + ENDDO !blockIdx + + CALL MPI_TYPE_CREATE_STRUCT(computationEnvironment%mpiComputationNode%numberOfBlocks, & + & computationEnvironment%mpiComputationNode%blockLengths, & + & computationEnvironment%mpiComputationNode%displacements, & + & computationEnvironment%mpiComputationNode%types, & + & computationEnvironment%mpiComputationNode%mpiType,mpiIError) + CALL MPI_ERROR_CHECK("MPI_TYPE_CREATE_STRUCT",mpiIError,err,error,*999) + + CALL MPI_TYPE_COMMIT(computationEnvironment%mpiComputationNode%mpiType,mpiIError) + CALL MPI_ERROR_CHECK("MPI_TYPE_COMMIT",mpiIError,err,error,*999) + + IF(diagnostics1) THEN + CALL WriteString(DIAGNOSTIC_OUTPUT_TYPE,"MPI Computation Node Type Data:",err,error,*999) + CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," MPI type = ", & + & computationEnvironment%mpiComputationNode%mpiType,err,error,*999) + CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," Number of blocks = ", & + & computationEnvironment%mpiComputationNode%numberOfBlocks,err,error,*999) + CALL WriteStringVector(DIAGNOSTIC_OUTPUT_TYPE,1,1,computationEnvironment%mpiComputationNode%numberOfBlocks,4,4, & + & computationEnvironment%mpiComputationNode%types,'(" Block types =",4(X,I15))','(15X,4(X,I15))',err,error,*999) + CALL WriteStringVector(DIAGNOSTIC_OUTPUT_TYPE,1,1,computationEnvironment%mpiComputationNode%numberOfBlocks,8,8, & + & computationEnvironment%mpiComputationNode%blockLengths,'(" Block lengths =",8(X,I5))','(17X,8(X,I5))',err,error,*999) + CALL WriteStringVector(DIAGNOSTIC_OUTPUT_TYPE,1,1,computationEnvironment%mpiComputationNode%numberOfBlocks,8,8, & + & computationEnvironment%mpiComputationNode%displacements,'(" Displacements =",8(X,I5))','(17X,8(X,I5))',err,error,*999) + ENDIF + + EXITS("Computation_MPIComputationNodeInitialise") + RETURN +999 CALL Computation_MPIComputationNodeFinalise(computationEnvironment%mpiComputationNode,dummyErr,dummyError,*998) +998 ERRORS("Computation_MPIComputationNodeInitialise",err,error) + EXITS("Computation_MPIComputationNodeInitialise") + RETURN 1 + + END SUBROUTINE Computation_MPIComputationNodeInitialise + + ! + !================================================================================================================================= + ! + + !>Finalises the computation data structures and deallocates all memory. + SUBROUTINE Computation_Finalise(err,error,*) + + !Argument Variables + INTEGER(INTG), INTENT(OUT) :: err !Initialises the computation data structures. + SUBROUTINE Computation_Initialise(err,error,*) + + !Argument Variables + INTEGER(INTG), INTENT(OUT) :: err !Finalises the computation environment data structures and deallocates all memory. + SUBROUTINE ComputationEnvironment_Finalise(computationEnvironment,err,error,*) + + !Argument Variables + TYPE(ComputationEnvironmentType), POINTER :: computationEnvironment !Initialises the computation environment data structures. + SUBROUTINE ComputationEnvironment_Initialise(computationEnvironment,err,error,*) + + !Argument Variables + TYPE(ComputationEnvironmentType), POINTER :: computationEnvironment !computationEnvironment%worldWorkGroup + computationEnvironment%worldWorkGroup%numberOfGroupComputationNodes=computationEnvironment%numberOfWorldComputationNodes + !computationEnvironment%worldWorkGroup%computationEnvironment=>computationEnvironment + ALLOCATE(computationEnvironment%worldWorkGroup%worldRanks(computationEnvironment%numberOfWorldComputationNodes),STAT=err) + IF(err/=0) CALL FlagError("Could not allocate world work group world ranks.",err,error,*999) + ALLOCATE(computationEnvironment%worldWorkGroup%availableRanks(computationEnvironment%numberOfWorldComputationNodes),STAT=err) + IF(err/=0) CALL FlagError("Could not allocate world work group available ranks.",err,error,*999) + DO rankIdx=1,computationEnvironment%numberOfWorldComputationNodes + computationEnvironment%worldWorkGroup%worldRanks(rankIdx)=rankIdx-1 + computationEnvironment%worldWorkGroup%availableRanks(rankIdx)=rankIdx-1 + ENDDO !rankIdx + computationEnvironment%worldWorkGroup%numberOfAvailableRanks=computationEnvironment%numberOfWorldComputationNodes + !Create a new MPI group + CALL MPI_GROUP_INCL(computationEnvironment%mpiGroupWorld,computationEnvironment%worldWorkGroup%numberOfGroupComputationNodes, & + & computationEnvironment%worldWorkGroup%worldRanks,computationEnvironment%worldWorkGroup%mpiGroup,mpiIError) + CALL MPI_ERROR_CHECK("MPI_GROUP_INCL",mpiIError,err,error,*999) + CALL MPI_COMM_CREATE(computationEnvironment%mpiCommWorld,computationEnvironment%worldWorkGroup%mpiGroup, & + & computationEnvironment%worldWorkGroup%mpiGroupCommunicator,mpiIError) + CALL MPI_ERROR_CHECK("MPI_COMM_CREATE",mpiIError,err,error,*999) + !Determine ranks + CALL MPI_COMM_RANK(computationEnvironment%mpiCommWorld,rank,mpiIError) + CALL MPI_ERROR_CHECK("MPI_COMM_RANK",mpiIError,err,error,*999) + computationEnvironment%worldWorkGroup%myWorldComputationNodeNumber=rank + CALL MPI_COMM_RANK(computationEnvironment%worldWorkGroup%mpiGroupCommunicator,rank,mpiIError) + CALL MPI_ERROR_CHECK("MPI_COMM_RANK",mpiIError,err,error,*999) + computationEnvironment%worldWorkGroup%myWorldComputationNodeNumber=rank + + computationEnvironment%worldWorkGroup%workGroupFinished=.TRUE. + + IF(diagnostics1) THEN + !Just let the master node write out this information + IF(rank==0) THEN + CALL WriteString(DIAGNOSTIC_OUTPUT_TYPE,"Computation environment:",err,error,*999) + CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," MPI version = ", & + & computationEnvironment%mpiVersion,err,error,*999) + CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," MPI subversion = ", & + & computationEnvironment%mpiSubversion,err,error,*999) + CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," MPI world communicator = ", & + & computationEnvironment%mpiCommWorld,err,error,*999) + CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," MPI world group = ", & + & computationEnvironment%mpiGroupWorld,err,error,*999) + CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," Number of world computation nodes = ", & + & computationEnvironment%numberOfWorldComputationNodes,err,error,*999) + CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," My world computation node number = ", & + & computationEnvironment%myWorldComputationNodeNumber,err,error,*999) + IF(diagnostics2) THEN + DO computationNodeIdx=0,computationEnvironment%numberOfWorldComputationNodes-1 + CALL WriteString(DIAGNOSTIC_OUTPUT_TYPE," Computation Node:",err,error,*999) + CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," Number of Processors = ", & + & computationEnvironment%computationNodes(computationNodeIdx)%numberOfProcessors,err,error,*999) + CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," MPI rank = ", & + & computationEnvironment%computationNodes(computationNodeIdx)%rank,err,error,*999) + CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," Node Name = ", & + & computationEnvironment%computationNodes(computationNodeIdx)%nodeName,err,error,*999) + ENDDO !computationNodeIdx + ENDIF + ENDIF + ENDIF + + EXITS("ComputationEnvironment_Initialise") + RETURN +999 CALL ComputationEnvironment_Finalise(computationEnvironment,dummyErr,dummyError,*998) +998 ERRORS("ComputationEnvironment_Initialise",err,error) + EXITS("ComputationEnvironment_Initialise") + RETURN 1 + + END SUBROUTINE ComputationEnvironment_Initialise + + ! + !================================================================================================================================= + ! + + !>Start the creation of a work group \see OpenCMISS::Iron::cmfe_WorkGroup_CreateStart + SUBROUTINE WorkGroup_CreateStart(userNumber,parentWorkGroup,workGroup,err,error,*) + + !Argument Variables + INTEGER(INTG), INTENT(IN) :: userNumber !parentWorkGroup%computationEnvironment + !Add the work group to the list of parent sub groups + ALLOCATE(newSubGroups(parentWorkGroup%numberOfSubGroups+1),STAT=err) + IF(err/=0) CALL FlagError("Could not allocate new sub groups.",err,error,*999) + workGroup%parentWorkGroup=>parentWorkGroup + DO subGroupIdx=1,parentWorkGroup%numberOfSubGroups + newSubGroups(subGroupIdx)%ptr=>parentWorkGroup%subGroups(subGroupIdx)%ptr + ENDDO !subGroupIdx + newSubGroups(parentWorkGroup%numberOfSubGroups+1)%ptr=>workGroup + CALL MOVE_ALLOC(newSubGroups,parentWorkGroup%subGroups) + parentWorkGroup%numberOfSubGroups=parentWorkGroup%numberOfSubGroups+1 + + EXITS("WorkGroup_CreateStart") + RETURN +999 ERRORSEXITS("WorkGroup_CreateStart",err,error) + RETURN 1 + + END SUBROUTINE WorkGroup_CreateStart + + ! + !================================================================================================================================ + ! + + !>Finish the creation of a work group \see OpenCMISS::Iron::cmfe_WorkGroup_CreateFinish + SUBROUTINE WorkGroup_CreateFinish(workGroup,err,error,*) + + !Argument Variables + TYPE(WorkGroupType), POINTER, INTENT(INOUT) :: workGroup !SIZE(parentWorkGroup%availableRanks,1)) THEN + localError="There are insufficient parent work group available ranks. There are "// & + & TRIM(NumberToVString(SIZE(parentWorkGroup%availableRanks,1),"*",err,error))// & + & " parent ranks available and "// & + & TRIM(NumberToVString(workGroup%numberOfGroupComputationNodes,"*",err,error))//" ranks are required." + CALL FlagError(localError,err,error,*999) + ENDIF + + !Get the ranks from the list of available ranks of the parent. + ALLOCATE(workGroup%worldRanks(workGroup%numberOfGroupComputationNodes),STAT=err) + IF(err/=0) CALL FlagError("Could not allocate work group world ranks.",err,error,*999) + newNumberOfAvailableRanks=parentWorkGroup%numberOfAvailableRanks-workGroup%numberOfGroupComputationNodes + ALLOCATE(newAvailableRanks(newNumberOfAvailableRanks),STAT=err) + IF(err/=0) CALL FlagError("Could not allocate work group parent new available ranks.",err,error,*999) + DO rankIdx=1,workGroup%numberOfGroupComputationNodes + workGroup%worldRanks(rankIdx)=parentWorkGroup%availableRanks(rankIdx) + ENDDO !rankIdx + DO rankIdx=1,newNumberOfAvailableRanks + newAvailableRanks(rankIdx)=parentWorkGroup%availableRanks(rankIdx+workGroup%numberOfGroupComputationNodes) + ENDDO !rankIdx + CALL Sorting_HeapSort(newAvailableRanks,err,error,*999) + CALL MOVE_ALLOC(newAvailableRanks,parentWorkGroup%availableRanks) + parentWorkGroup%numberOfAvailableRanks=newNumberOfAvailableRanks + !Set up the available ranks for this work group + ALLOCATE(workGroup%availableRanks(workGroup%numberOfGroupComputationNodes),STAT=err) + IF(err/=0) CALL FlagError("Could not allocate work group available ranks.",err,error,*999) + DO rankIdx=1,workGroup%numberOfGroupComputationNodes + workGroup%availableRanks(rankIdx)=workGroup%worldRanks(rankIdx) + ENDDO !rankIdx + workGroup%numberOfAvailableRanks=workGroup%numberOfGroupComputationNodes + + !Create a new MPI group + CALL MPI_GROUP_INCL(computationEnvironment%mpiGroupWorld,workGroup%numberOfGroupComputationNodes,workGroup%worldRanks, & + & workGroup%mpiGroup,mpiIError) + CALL MPI_ERROR_CHECK("MPI_GROUP_INCL",mpiIError,err,error,*999) + CALL MPI_COMM_CREATE(computationEnvironment%mpiCommWorld,workGroup%mpiGroup,workGroup%mpiGroupCommunicator,mpiIError) + CALL MPI_ERROR_CHECK("MPI_COMM_CREATE",mpiIError,err,error,*999) + + !Determine my processes rank in the group communicator + IF(workGroup%mpiGroupCommunicator /= MPI_COMM_NULL) THEN + CALL MPI_COMM_RANK(workGroup%mpiGroupCommunicator,groupRank,mpiIError) + CALL MPI_ERROR_CHECK("MPI_COMM_RANK",mpiIError,err,error,*999) + workGroup%myGroupComputationNodeNumber=groupRank + ELSE + workGroup%myGroupComputationNodeNumber=-1 + ENDIF + !Determine my process rank in the world communicator + CALL MPI_COMM_RANK(computationEnvironment%mpiCommWorld,worldRank,mpiIError) + CALL MPI_ERROR_CHECK("MPI_COMM_RANK",mpiIError,err,error,*999) + workGroup%myWorldComputationNodeNumber=worldRank + + + workGroup%workGroupFinished=.TRUE. + + EXITS("WorkGroup_CreateFinish") + RETURN +999 IF(ALLOCATED(newAvailableRanks)) DEALLOCATE(newAvailableRanks) + ERRORSEXITS("WorkGroup_CreateFinish",err,error) + RETURN 1 + + END SUBROUTINE WorkGroup_CreateFinish + + ! + !================================================================================================================================= + ! + + !>Destroy a work group \see OpenCMISS::Iron::cmfe_WorkGroup_Destroy + SUBROUTINE WorkGroup_Destroy(workGroup,err,error,*) + + !Argument Variables + TYPE(WorkGroupType), POINTER, INTENT(INOUT) :: workGroup !Destroy a work group given by a user number and all sub groups under it + RECURSIVE SUBROUTINE WorkGroup_DestroyNumber(workGroupUserNumber,err,error,*) + + !Argument Variables + INTEGER(INTG), INTENT(IN) :: workGroupUserNumber !1) THEN + !If the parentWorkGroup has more than one sub groups then remove this work group from the list of sub groups. + ALLOCATE(newSubGroups(parentWorkGroup%numberOfSubGroups-1),STAT=err) + IF(err/=0) CALL FlagError("Could not allocate new sub groups.",err,error,*999) + count=0 + DO subGroupIdx=1,parentWorkGroup%numberOfSubGroups + NULLIFY(subGroup) + CALL WorkGroup_WorkSubGroupGet(parentWorkGroup,subGroupIdx,subGroup,err,error,*999) + IF(subGroup%userNumber/=workGroup%userNumber) THEN + count=count+1 + newSubGroups(count)%ptr=>parentWorkGroup%subGroups(subGroupIdx)%ptr + ENDIF + ENDDO !subGroupIdx + CALL MOVE_ALLOC(newSubGroups,parentWorkGroup%subGroups) + parentWorkGroup%numberOfSubGroups=parentWorkGroup%numberOfSubGroups-1 + ELSE + IF(ALLOCATED(parentWorkGroup%subGroups)) DEALLOCATE(parentWorkGroup%subGroups) + parentWorkGroup%numberOfSubGroups=0 + ENDIF + !Put the work group ranks back into the parent work group available ranks + ALLOCATE(newAvailableRanks(parentWorkGroup%numberOfAvailableRanks+workGroup%numberOfGroupComputationNodes),STAT=err) + IF(err/=0) CALL FlagError("Could not allocate new available ranks.",err,error,*999) + DO rankIdx=1,parentWorkGroup%numberOfAvailableRanks + newAvailableRanks(rankIdx)=parentWorkGroup%availableRanks(rankIdx) + ENDDO !rankIdx + DO rankIdx=1,workGroup%numberOfGroupComputationNodes + newAvailableRanks(parentWorkGroup%numberOfAvailableRanks+rankIdx)=workGroup%worldRanks(rankIdx) + ENDDO !rankIdx + CALL Sorting_HeapSort(newAvailableRanks,err,error,*999) + CALL MOVE_ALLOC(newAvailableRanks,parentWorkGroup%availableRanks) + parentWorkGroup%numberOfAvailableRanks=parentWorkGroup%numberOfAvailableRanks+workGroup%numberOfGroupComputationNodes + !Finalise the work group + CALL WorkGroup_Finalise(workGroup,err,error,*999) + ELSE + !Recursively delete the sub groups first + DO WHILE(workGroup%numberOfSubGroups>0) + NULLIFY(workGroup2) + CALL WorkGroup_WorkSubGroupGet(workGroup,1,workGroup2,err,error,*999) + CALL WorkGroup_DestroyNumber(workGroup2%userNumber,err,error,*999) + ENDDO + !Now delete this instance + CALL WorkGroup_DestroyNumber(workGroup%userNumber,err,error,*999) + ENDIF + + EXITS("WorkGroup_DestroyNumber") + RETURN +999 IF(ALLOCATED(newSubGroups)) DEALLOCATE(newSubGroups) + IF(ALLOCATED(newAvailableRanks)) DEALLOCATE(newAvailableRanks) + ERRORSEXITS("WorkGroup_DestroyNumber",err,error) + RETURN 1 + + END SUBROUTINE WorkGroup_DestroyNumber + + ! + !================================================================================================================================= + ! + + !>Finalise a work group and deallocate all memory + RECURSIVE SUBROUTINE WorkGroup_Finalise(workGroup,err,error,*) + + !Argument Variables + TYPE(WorkGroupType),POINTER :: workGroup !Add the work sub-group to the parent group based on the computation requirements (called by user) + SUBROUTINE WorkGroup_Initialise(workGroup,err,error,*) + + !Argument Variables + TYPE(WorkGroupType),POINTER, INTENT(OUT) :: workGroup !Set the character label of a work group \see OpenCMISS::Iron::cmfe_WorkGroup_LabelSet + SUBROUTINE WorkGroup_LabelSetC(workGroup,label,err,error,*) + + !Argument Variables + TYPE(WorkGroupType), POINTER, INTENT(IN) :: workGroup !Set the varying string label of a work group \see OpenCMISS::Iron::cmfe_WorkGroup_LabelSet + SUBROUTINE WorkGroup_LabelSetVS(workGroup,label,err,error,*) + + !Argument Variables + TYPE(WorkGroupType), POINTER, INTENT(IN) :: workGroup !Set the number of group nodes in a work group \see OpenCMISS::Iron::cmfe_WorkGroup_NumberOfGroupNodesSet + SUBROUTINE WorkGroup_NumberOfGroupNodesSet(workGroup,numberOfGroupComputationNodes,err,error,*) + + !Argument Variables + TYPE(WorkGroupType), POINTER, INTENT(OUT) :: workGroup !parentWorkGroup%numberOfAvailableRanks) THEN + localError="The number of group nodes of "//TRIM(NumberToVString(numberOfGroupComputationNodes,"*",err,error))// & + & " is invalid. The number of group nodes should be > 0 and <= "// & + & TRIM(NumberToVString(parentWorkGroup%numberOfAvailableRanks,"*",err,error))//"." + CALL FlagError(localError,err,error,*999) + ENDIF + + workGroup%numberOfGroupComputationNodes=numberOfGroupComputationNodes + + EXITS("WorkGroup_NumberOfGroupNodesSet") + RETURN +999 ERRORSEXITS("WorkGroup_NumberOfGroupNodesSet",err,error) + RETURN 1 + + END SUBROUTINE WorkGroup_NumberOfGroupNodesSet + + ! + !================================================================================================================================= + ! + +END MODULE ComputationRoutines diff --git a/src/computational_environment.f90 b/src/computational_environment.f90 deleted file mode 100755 index fcca2407..00000000 --- a/src/computational_environment.f90 +++ /dev/null @@ -1,821 +0,0 @@ -!> \file -!> \author Chris Bradley -!> \brief This module contains all computational environment variables. -!> -!> \section LICENSE -!> -!> Version: MPL 1.1/GPL 2.0/LGPL 2.1 -!> -!> The contents of this file are subject to the Mozilla Public License -!> Version 1.1 (the "License"); you may not use this file except in -!> compliance with the License. You may obtain a copy of the License at -!> http://www.mozilla.org/MPL/ -!> -!> Software distributed under the License is distributed on an "AS IS" -!> basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the -!> License for the specific language governing rights and limitations -!> under the License. -!> -!> The Original Code is OpenCMISS -!> -!> The Initial Developer of the Original Code is University of Auckland, -!> Auckland, New Zealand, the University of Oxford, Oxford, United -!> Kingdom and King's College, London, United Kingdom. Portions created -!> by the University of Auckland, the University of Oxford and King's -!> College, London are Copyright (C) 2007-2010 by the University of -!> Auckland, the University of Oxford and King's College, London. -!> All Rights Reserved. -!> -!> Contributor(s): -!> -!> Alternatively, the contents of this file may be used under the terms of -!> either the GNU General Public License Version 2 or later (the "GPL"), or -!> the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), -!> in which case the provisions of the GPL or the LGPL are applicable instead -!> of those above. If you wish to allow use of your version of this file only -!> under the terms of either the GPL or the LGPL, and not to allow others to -!> use your version of this file under the terms of the MPL, indicate your -!> decision by deleting the provisions above and replace them with the notice -!> and other provisions required by the GPL or the LGPL. If you do not delete -!> the provisions above, a recipient may use your version of this file under -!> the terms of any one of the MPL, the GPL or the LGPL. -!> - -!> This module contains all computational environment variables. - -MODULE ComputationEnvironment - - USE BaseRoutines - USE CmissMPI - USE CmissPetsc - USE Constants - USE Kinds -#ifndef NOMPIMOD - USE MPI -#endif - USE INPUT_OUTPUT - USE ISO_VARYING_STRING - USE Strings - -#include "macros.h" - - IMPLICIT NONE - -#ifdef NOMPIMOD -#include "mpif.h" -#endif - - PRIVATE - - !Module parameters - - !Module types - - !>pointer type to ComputationalWorkGroupType - TYPE :: ComputationalWorkGroupPtrType - TYPE(ComputationalWorkGroupType), POINTER :: ptr - END TYPE ComputationalWorkGroupPtrType - - !>Contains information on logical working groups (added by Robert on 01/04/2010) - TYPE :: ComputationalWorkGroupType - INTEGER(INTG) :: numberOfComputationalNodes !Contains information on a cache heirarchy - TYPE ComputationalCacheType - INTEGER(INTG) :: NUMBER_LEVELS !Contains information on a computational node containing a number of processors - TYPE ComputationalNodeType - INTEGER(INTG) :: NUMBER_PROCESSORS !Contains information on the MPI type to transfer information about a computational node - TYPE MPIComputationalNodeType - INTEGER(INTG) :: MPI_TYPE !Contains information on the computational environment the program is running in. - TYPE ComputationalEnvironmentType - LOGICAL :: cmissMPIInitialised !Add the work sub-group to the parent group based on the computational requirements (called by user) - SUBROUTINE COMPUTATIONAL_WORK_GROUP_SUBGROUP_ADD(PARENT_WORK_GROUP, numberOfComputationalNodes, & - & ADDED_WORK_GROUP,err,error,*) - - !Argument Variables - TYPE(ComputationalWorkGroupType),POINTER, INTENT(INOUT) :: PARENT_WORK_GROUP - TYPE(ComputationalWorkGroupType),POINTER, INTENT(INOUT) :: ADDED_WORK_GROUP - INTEGER(INTG),INTENT(IN) :: numberOfComputationalNodes - INTEGER(INTG), INTENT(OUT) :: err !PARENT_WORK_GROUP%SUB_WORK_GROUPS(I)%ptr - ENDDO - !SUB_WORK_GROUPS(1:PARENT_WORK_GROUP%numberOfSubWorkGroups)=>PARENT_WORK_GROUP%SUB_WORK_GROUPS(:) - - IF(ALLOCATED(PARENT_WORK_GROUP%SUB_WORK_GROUPS)) THEN - DEALLOCATE(PARENT_WORK_GROUP%SUB_WORK_GROUPS) - ENDIF - SUB_WORK_GROUPS(1+PARENT_WORK_GROUP%numberOfSubWorkGroups)%ptr=>NEW_WORK_GROUP%ptr - ALLOCATE(PARENT_WORK_GROUP%SUB_WORK_GROUPS(SIZE(SUB_WORK_GROUPS,1))) - DO I=1,SIZE(SUB_WORK_GROUPS,1) - PARENT_WORK_GROUP%SUB_WORK_GROUPS(I)%ptr => SUB_WORK_GROUPS(I)%ptr - ENDDO - !PARENT_WORK_GROUP%SUB_WORK_GROUPS(:) => SUB_WORK_GROUPS(:) - - DEALLOCATE(SUB_WORK_GROUPS) - PARENT_WORK_GROUP%numberOfSubWorkGroups = 1+PARENT_WORK_GROUP%numberOfSubWorkGroups - NEW_WORK_GROUP%ptr%PARENT => PARENT_WORK_GROUP - TMP_PARENT_WORK_GROUP => PARENT_WORK_GROUP - DO WHILE(ASSOCIATED(TMP_PARENT_WORK_GROUP)) !Update the computational number of its ancestors - TMP_PARENT_WORK_GROUP%numberOfComputationalNodes = TMP_PARENT_WORK_GROUP%numberOfComputationalNodes & - & + NEW_WORK_GROUP%ptr%numberOfComputationalNodes - TMP_PARENT_WORK_GROUP => TMP_PARENT_WORK_GROUP%PARENT - ENDDO - ELSE !Top level group - CALL FlagError('PARENT_WORK_GROUP is not associated, call COMPUTATIONAL_WORK_GROUP_CREATE_START first',& - & err,error,*999) - ENDIF - ADDED_WORK_GROUP => NEW_WORK_GROUP%ptr - - EXITS("COMPUTATIONAL_WORK_GROUP_SUBGROUP_ADD") - RETURN -999 ERRORSEXITS("COMPUTATIONAL_WORK_GROUP_SUBGROUP_ADD",err,error) - RETURN 1 - END SUBROUTINE COMPUTATIONAL_WORK_GROUP_SUBGROUP_ADD - - ! - !================================================================================================================================ - ! - - !>Create the highest level work group (Default: GROUP_WORLD) - SUBROUTINE COMPUTATIONAL_WORK_GROUP_CREATE_START(WORLD_WORK_GROUP,numberOfComputationalNodes,err,error,*) - - !Argument Variables - TYPE(ComputationalWorkGroupType),POINTER, INTENT(INOUT) :: WORLD_WORK_GROUP - INTEGER(INTG),INTENT(IN) :: numberOfComputationalNodes - INTEGER(INTG), INTENT(OUT) :: err !NEW_WORK_GROUP%ptr - ENDIF - - EXITS("COMPUTATIONAL_WORK_GROUP_CREATE_START") - RETURN -999 ERRORSEXITS("COMPUTATIONAL_WORK_GROUP_CREATE_START",err,error) - RETURN 1 - END SUBROUTINE COMPUTATIONAL_WORK_GROUP_CREATE_START - - ! - !================================================================================================================================ - ! - - !>Generate computational environment for current level work group tree and all it's subgroups recursively - RECURSIVE SUBROUTINE Computational_WorkGroupGenerateCompEnviron(WORK_GROUP,AVAILABLE_RANK_LIST,err,error,*) - - !Argument Variables - TYPE(ComputationalWorkGroupType),POINTER, INTENT(INOUT) :: WORK_GROUP - INTEGER(INTG), ALLOCATABLE, INTENT(INOUT) :: AVAILABLE_RANK_LIST(:) - INTEGER(INTG), INTENT(OUT) :: err !Generate the hierarchy computational environment based on work group tree - SUBROUTINE COMPUTATIONAL_WORK_GROUP_CREATE_FINISH(WORLD_WORK_GROUP,err,error,*) - - !Argument Variables - TYPE(ComputationalWorkGroupType),POINTER,INTENT(INOUT) :: WORLD_WORK_GROUP - INTEGER(INTG),INTENT(OUT) :: err ! computationalEnvironment - WORLD_WORK_GROUP%computationalEnvironmentFinished = .TRUE. - - !generate the communicators for subgroups if any - ALLOCATE(AVAILABLE_RANK_LIST(WORLD_WORK_GROUP%computationalEnvironment%numberOfComputationalNodes)) - DO I=0,SIZE(AVAILABLE_RANK_LIST,1)-1 - AVAILABLE_RANK_LIST(I+1) = I - END DO - DO I=1,WORLD_WORK_GROUP%numberOfSubWorkGroups,1 - CALL Computational_WorkGroupGenerateCompEnviron(WORLD_WORK_GROUP%SUB_WORK_GROUPS(I)%ptr,AVAILABLE_RANK_LIST,err,error,*999) - END DO - - EXITS("COMPUTATIONAL_WORK_GROUP_CREATE_FINISH") - RETURN -999 ERRORSEXITS("COMPUTATIONAL_WORK_GROUP_CREATE_FINISH",err,error) - RETURN 1 - END SUBROUTINE COMPUTATIONAL_WORK_GROUP_CREATE_FINISH - - ! - !================================================================================================================================ - ! - - !>Finalises the computational node data structures and deallocates all memory. - SUBROUTINE COMPUTATIONAL_NODE_FINALISE(COMPUTATIONAL_NODE,err,error,*) - - !Argument Variables - TYPE(ComputationalNodeType),INTENT(INOUT) :: COMPUTATIONAL_NODE !Initialises the computational node data structures. - SUBROUTINE COMPUTATIONAL_NODE_INITIALISE(COMPUTATIONAL_NODE,RANK,err,error,*) - - !Argument Variables - TYPE(ComputationalNodeType), INTENT(OUT) :: COMPUTATIONAL_NODE !Finalises the data structure containing the MPI type information for the ComputationalNodeType. - SUBROUTINE COMPUTATIONAL_NODE_MPI_TYPE_FINALISE(err,error,*) - - !Argument Variables - INTEGER(INTG), INTENT(OUT) :: err !Initialises the data structure containing the MPI type information for the ComputationalNodeType. - SUBROUTINE COMPUTATIONAL_NODE_MPI_TYPE_INITIALISE(COMPUTATIONAL_NODE,err,error,*) - - !Argument Variables - TYPE(ComputationalNodeType), INTENT(IN) :: COMPUTATIONAL_NODE !Finalises the computational environment data structures and deallocates all memory. - SUBROUTINE COMPUTATIONAL_ENVIRONMENT_FINALISE(err,error,*) - - !Argument Variables - INTEGER(INTG), INTENT(OUT) :: err !Initialises the computational environment data structures. - SUBROUTINE COMPUTATIONAL_ENVIRONMENT_INITIALISE(err,error,*) - - !Argument Variables - INTEGER(INTG), INTENT(OUT) :: err !Sets the world communicator to the given on. Note: This routine should be called straight after the main OpenCMISS initialise - !>routine. If it is called after objects have started to be setup then good luck! - SUBROUTINE ComputationalEnvironment_WorldCommunicatorSet(worldCommunicator,err,error,*) - - !Argument Variables - INTEGER(INTG), INTENT(IN) :: worldCommunicator !Gets the current world communicator. - SUBROUTINE ComputationalEnvironment_WorldCommunicatorGet(worldCommunicator,err,error,*) - - !Argument Variables - INTEGER(INTG), INTENT(OUT) :: worldCommunicator !Returns the number/rank of the computational nodes. - FUNCTION COMPUTATIONAL_NODE_NUMBER_GET(err,error) - - !Argument Variables - INTEGER(INTG), INTENT(OUT) :: err !Returns the number of computational nodes. - FUNCTION COMPUTATIONAL_NODES_NUMBER_GET(err,error) - - !Argument Variables - INTEGER(INTG), INTENT(OUT) :: err ! Auckland, the University of Oxford and King's College, London. !> All Rights Reserved. !> -!> Contributor(s): Chris Bradley, Kumar Mithraratne, Xiani (Nancy) Yan, Prasad Babarenda Gamage +!> Contributor(s): Tim Wu, Chris Bradley, Kumar Mithraratne, Xiani (Nancy) Yan, Prasad Babarenda Gamage !> !> Alternatively, the contents of this file may be used under the terms of !> either the GNU General Public License Version 2 or later (the "GPL"), or @@ -48,7 +48,8 @@ MODULE DataProjectionRoutines USE BasisRoutines USE BasisAccessRoutines USE CmissMPI - USE ComputationEnvironment + USE ComputationRoutines + USE ComputationAccessRoutines USE Constants USE CoordinateSystemAccessRoutines USE DataPointAccessRoutines @@ -1351,7 +1352,7 @@ SUBROUTINE DataProjection_DataPointsProjectionEvaluate(dataProjection,projection TYPE(DOMAIN_MAPPING_TYPE), POINTER :: domainMappingElements TYPE(DOMAIN_MAPPINGS_TYPE), POINTER :: domainMappings TYPE(DOMAIN_TOPOLOGY_TYPE), POINTER :: domainTopology - INTEGER(INTG) :: myComputationalNode,numberOfComputationalNodes !1) THEN + !use MPI if number of computation nodes is greater than 1 + IF(numberOfWorldComputationNodes>1) THEN !Use MPI !Allocate arrays for MPI communication ALLOCATE(globalToLocalNumberOfClosestCandidates(numberOfDataPoints),STAT=err) IF(err/=0) CALL FlagError("Could not allocate global to local number of closest elements.",err,error,*999) - ALLOCATE(globalNumberOfClosestCandidates(numberOfComputationalNodes),STAT=err) + ALLOCATE(globalNumberOfClosestCandidates(numberOfWorldComputationNodes),STAT=err) IF(err/=0) CALL FlagError("Could not allocate global number of closest candidates.",err,error,*999) - ALLOCATE(globalMPIDisplacements(numberOfComputationalNodes),STAT=err) + ALLOCATE(globalMPIDisplacements(numberOfWorldComputationNodes),STAT=err) IF(err/=0) CALL FlagError("Could not allocate global MPI displacements.",err,error,*999) - ALLOCATE(globalNumberOfProjectedPoints(numberOfComputationalNodes),STAT=err) + ALLOCATE(globalNumberOfProjectedPoints(numberOfWorldComputationNodes),STAT=err) IF(err/=0) CALL FlagError("Could not allocate all number of projected points.",err,error,*999) ALLOCATE(projectionExitTag(numberOfDataPoints),STAT=err) IF(err/=0) CALL FlagError("Could not allocate projected.",err,error,*999) @@ -1680,13 +1680,13 @@ SUBROUTINE DataProjection_DataPointsProjectionEvaluate(dataProjection,projection IF(err/=0) CALL FlagError("Could not allocate projected vectors.",err,error,*999) ALLOCATE(sortingIndices2(numberOfDataPoints),STAT=err) IF(err/=0) CALL FlagError("Could not allocate sorting indices 2.",err,error,*999) - !gather and distribute the number of closest elements from all computational nodes + !gather and distribute the number of closest elements from all computation nodes CALL MPI_ALLGATHER(numberOfClosestCandidates,1,MPI_INTEGER,globalNumberOfClosestCandidates,1,MPI_INTEGER, & - & computationalEnvironment%mpiCommunicator,MPIIError) + & worldCommunicator,MPIIError) CALL MPI_ERROR_CHECK("MPI_ALLGATHER",MPIIError,err,error,*999) - !Sum all number of closest candidates from all computational nodes + !Sum all number of closest candidates from all computation nodes totalNumberOfClosestCandidates=SUM(globalNumberOfClosestCandidates,1) - !Allocate arrays to store information gathered from all computational node + !Allocate arrays to store information gathered from all computation node !The information for each data point is stored in the corresponding row so they are contiguous in memory for !easy MPI access ALLOCATE(globalClosestDistances(numberOfDataPoints,totalNumberOfClosestCandidates),STAT=err) @@ -1700,27 +1700,27 @@ SUBROUTINE DataProjection_DataPointsProjectionEvaluate(dataProjection,projection CALL MPI_ERROR_CHECK("MPI_TYPE_COMMIT",MPIIError,err,error,*999) !Create displacement vectors for MPI_ALLGATHERV globalMPIDisplacements(1)=0 - DO computationalNodeIdx=1,(numberOfComputationalNodes-1) - globalMPIDisplacements(computationalNodeIdx+1)=globalMPIDisplacements(computationalNodeIdx)+ & - & globalNumberOfClosestCandidates(computationalNodeIdx) - ENDDO !computationalNodeIdx + DO computationNodeIdx=1,(numberOfWorldComputationNodes-1) + globalMPIDisplacements(computationNodeIdx+1)=globalMPIDisplacements(computationNodeIdx)+ & + & globalNumberOfClosestCandidates(computationNodeIdx) + ENDDO !computationNodeIdx !Share closest element distances between all domains CALL MPI_ALLGATHERV(closestDistances(1,1),numberOfClosestCandidates,MPIClosestDistances, & & globalClosestDistances,globalNumberOfClosestCandidates,globalMPIDisplacements, & - & MPIClosestDistances,computationalEnvironment%mpiCommunicator,MPIIError) + & MPIClosestDistances,worldCommunicator,MPIIError) CALL MPI_ERROR_CHECK("MPI_ALLGATHERV",MPIIError,err,error,*999) reducedNumberOfCLosestCandidates=MIN(dataProjection%numberOfClosestElements,totalNumberOfClosestCandidates) - projectedDistance(2,:)=myComputationalNode + projectedDistance(2,:)=myComputationNode !Find the globally closest distance in the current domain DO dataPointIdx=1,numberOfDataPoints CALL Sorting_BubbleIndexSort(globalClosestDistances(dataPointIdx,:),sortingIndices1,err,error,*999) sortingIndices1(1:totalNumberOfClosestCandidates)=sortingIndices1(1:totalNumberOfClosestCandidates)- & - & globalMPIDisplacements(myComputationalNode+1) !shift the index to current computational node + & globalMPIDisplacements(myComputationNode+1) !shift the index to current computation node globalToLocalNumberOfClosestCandidates(dataPointIdx)=0 DO elementIdx=1,reducedNumberOfCLosestCandidates - !Sorted index indicates it is in the current computational domain + !Sorted index indicates it is in the current computation domain IF((sortingIndices1(elementIdx)>=1).AND. & - & (sortingIndices1(elementIdx)<=globalNumberOfClosestCandidates(myComputationalNode+1))) & + & (sortingIndices1(elementIdx)<=globalNumberOfClosestCandidates(myComputationNode+1))) & & globalToLocalNumberOfClosestCandidates(dataPointIdx)= & & globalToLocalNumberOfClosestCandidates(dataPointIdx)+1 ENDDO !elementIdx @@ -1814,47 +1814,47 @@ SUBROUTINE DataProjection_DataPointsProjectionEvaluate(dataProjection,projection END SELECT !Find the shortest projected distance in all domains CALL MPI_ALLREDUCE(MPI_IN_PLACE,projectedDistance,numberOfDataPoints,MPI_2DOUBLE_PRECISION,MPI_MINLOC, & - & computationalEnvironment%mpiCommunicator,MPIIError) + & worldCommunicator,MPIIError) CALL MPI_ERROR_CHECK("MPI_ALLREDUCE",MPIIError,err,error,*999) - !Sort the computational node/rank from 0 to number of computational node + !Sort the computation node/rank from 0 to number of computation node CALL Sorting_BubbleIndexSort(projectedDistance(2,:),sortingIndices2,err,error,*999) - DO computationalNodeIdx=0,(numberOfComputationalNodes-1) - globalNumberOfProjectedPoints(computationalNodeIdx+1)=COUNT(ABS(projectedDistance(2,:)- & - & REAL(computationalNodeIdx))1 + ENDIF !numberOfWorldComputationNodes>1 !Compute full elemental xi IF(dataProjection%numberOfXi==dataProjection%numberOfElementXi) THEN DO dataPointIdx=1,numberOfDataPoints @@ -2225,7 +2225,7 @@ SUBROUTINE DataProjection_NewtonElementsEvaluate_1(dataProjection,interpolatedPo TYPE(VARYING_STRING), INTENT(OUT) :: error !=1) THEN - IF(numberOfComputationalNodes>1) THEN - WRITE(analFilename,('(A,A,I0)')) filename(1:filenameLength),".opdataproj.",myComputationalNodeNumber + IF(numberOfWorldComputationNodes>1) THEN + WRITE(analFilename,('(A,A,I0)')) filename(1:filenameLength),".opdataproj.",myWorldComputationNodeNumber ELSE analFilename=filename(1:filenameLength)//".opdataproj" ENDIF diff --git a/src/distributed_matrix_vector.f90 b/src/distributed_matrix_vector.f90 index 927e4a10..0466793b 100755 --- a/src/distributed_matrix_vector.f90 +++ b/src/distributed_matrix_vector.f90 @@ -47,7 +47,8 @@ MODULE DistributedMatrixVector USE BaseRoutines USE CmissMPI USE CmissPetsc - USE ComputationEnvironment + USE ComputationRoutines + USE ComputationAccessRoutines USE DistributedMatrixVectorAccessRoutines USE INPUT_OUTPUT USE ISO_VARYING_STRING @@ -1660,7 +1661,7 @@ END SUBROUTINE DistributedMatrix_DataTypeSet !================================================================================================================================ ! - !>Gets the dimensions of a matrix on this computational node. + !>Gets the dimensions of a matrix on this computation node. SUBROUTINE DistributedMatrix_DimensionsGet(distributedMatrix,m,n,err,error,*) !Argument variables @@ -2467,7 +2468,7 @@ SUBROUTINE DistributedMatrix_PETScCreateFinish(petscMatrix,err,error,*) INTEGER(INTG), INTENT(OUT) :: err !0) THEN - myComputationalNodeNumber=ComputationalEnvironment_NodeNumberGet(err,error) - IF(err/=0) GOTO 999 + CALL ComputationEnvironment_WorldNodeNumberGet(computationEnvironment,myComputationNodeNumber,err,error,*999) IF(distributedVector%ghostingType==DISTRIBUTED_MATRIX_VECTOR_INCLUDE_GHOSTS_TYPE) THEN ALLOCATE(cmissVector%transfers(domainMapping%NUMBER_OF_ADJACENT_DOMAINS),STAT=err) IF(err/=0) CALL FlagError("Could not allocate CMISS distributed vector transfer buffers.",err,error,*999) @@ -5843,11 +5844,11 @@ SUBROUTINE DistributedVector_CMISSCreateFinish(cmissVector,err,error,*) cmissVector%transfers(domainIdx)%receiveBufferSize=domainMapping%ADJACENT_DOMAINS(domainIdx)%NUMBER_OF_RECEIVE_GHOSTS cmissVector%transfers(domainIdx)%dataType=distributedVector%dataType cmissVector%transfers(domainIdx)%sendTagNumber=cmissVector%baseTagNumber+ & - & domainMapping%ADJACENT_DOMAINS_PTR(myComputationalNodeNumber)+domainIdx-1 + & domainMapping%ADJACENT_DOMAINS_PTR(myComputationNodeNumber)+domainIdx-1 domainNumber=domainMapping%ADJACENT_DOMAINS(domainIdx)%DOMAIN_NUMBER found=.FALSE. DO domainIdx2=domainMapping%ADJACENT_DOMAINS_PTR(domainNumber),domainMapping%ADJACENT_DOMAINS_PTR(domainNumber+1)-1 - IF(domainMapping%ADJACENT_DOMAINS_LIST(domainIdx2)==myComputationalNodeNumber) THEN + IF(domainMapping%ADJACENT_DOMAINS_LIST(domainIdx2)==myComputationNodeNumber) THEN found=.TRUE. EXIT ENDIF @@ -6871,7 +6872,7 @@ SUBROUTINE DistributedVector_PETScCreateFinish(petscVector,err,error,*) INTEGER(INTG), INTENT(OUT) :: err !1) THEN CALL DistributedVector_UpdateWaitFinished(distributedVector,err,error,*999) !Copy the receive buffers back to the ghost positions in the data vector @@ -7347,7 +7348,7 @@ SUBROUTINE DistributedVector_UpdateStart(distributedVector,err,error,*) INTEGER(INTG), INTENT(OUT) :: err !1) THEN + CALL ComputationEnvironment_WorldCommunicatorGet(computationEnvironment,worldCommunicator,err,error,*999) + CALL ComputationEnvironment_NumberOfWorldNodesGet(computationEnvironment,numberOfComputationNodes,err,error,*999) + IF(numberOfComputationNodes>1) THEN IF(domainMapping%NUMBER_OF_ADJACENT_DOMAINS>0) THEN !Fill in the send buffers with the send ghost values DO domainIdx=1,domainMapping%NUMBER_OF_ADJACENT_DOMAINS @@ -7409,8 +7410,7 @@ SUBROUTINE DistributedVector_UpdateStart(distributedVector,err,error,*) & cmissVector%transfers(domainIdx)%receiveBufferSize,MPI_INTEGER, & & domainMapping%ADJACENT_DOMAINS(domainIdx)%DOMAIN_NUMBER, & & cmissVector%transfers(domainIdx)%receiveTagNumber, & - & computationalEnvironment%mpiCommunicator, & - & cmissVector%transfers(domainIdx)%mpiReceiveRequest,mpiIError) + & worldCommunicator,cmissVector%transfers(domainIdx)%mpiReceiveRequest,mpiIError) CALL MPI_ErrorCheck("MPI_IRECV",mpiIError,err,error,*999) IF(diagnostics5) THEN CALL WriteString(DIAGNOSTIC_OUTPUT_TYPE,"MPI IRECV call posted:",err,error,*999) @@ -7421,8 +7421,7 @@ SUBROUTINE DistributedVector_UpdateStart(distributedVector,err,error,*) & DOMAIN_NUMBER,err,error,*999) CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," Receive tag = ",cmissVector%transfers(domainIdx)% & & receiveTagNumber,err,error,*999) - CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," Receive comm = ",computationalEnvironment%mpiCommunicator, & - & err,error,*999) + CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," Receive comm = ",worldCommunicator,err,error,*999) CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," Receive request = ",cmissVector%transfers(domainIdx)% & & mpiReceiveRequest,err,error,*999) ENDIF @@ -7431,8 +7430,7 @@ SUBROUTINE DistributedVector_UpdateStart(distributedVector,err,error,*) & cmissVector%transfers(domainIdx)%receiveBufferSize,MPI_REAL, & & domainMapping%ADJACENT_DOMAINS(domainIdx)%DOMAIN_NUMBER, & & cmissVector%transfers(domainIdx)%receiveTagNumber, & - & computationalEnvironment%mpiCommunicator, & - & cmissVector%transfers(domainIdx)%mpiReceiveRequest,mpiIError) + & worldCommunicator,cmissVector%transfers(domainIdx)%mpiReceiveRequest,mpiIError) CALL MPI_ErrorCheck("MPI_IRECV",mpiIError,err,error,*999) IF(diagnostics5) THEN CALL WriteString(DIAGNOSTIC_OUTPUT_TYPE,"MPI IRECV call posted:",err,error,*999) @@ -7443,8 +7441,7 @@ SUBROUTINE DistributedVector_UpdateStart(distributedVector,err,error,*) & DOMAIN_NUMBER,err,error,*999) CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," Receive tag = ",cmissVector%transfers(domainIdx)% & & receiveTagNumber,err,error,*999) - CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," Receive comm = ",computationalEnvironment%mpiCommunicator, & - & err,error,*999) + CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," Receive comm = ",worldCommunicator,err,error,*999) CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," Receive request = ",cmissVector%transfers(domainIdx)% & & mpiReceiveRequest,err,error,*999) ENDIF @@ -7453,8 +7450,7 @@ SUBROUTINE DistributedVector_UpdateStart(distributedVector,err,error,*) & cmissVector%transfers(domainIdx)%receiveBufferSize,MPI_DOUBLE_PRECISION, & & domainMapping%ADJACENT_DOMAINS(domainIdx)%DOMAIN_NUMBER, & & cmissVector%transfers(domainIdx)%receiveTagNumber, & - & computationalEnvironment%mpiCommunicator, & - & cmissVector%transfers(domainIdx)%mpiReceiveRequest,mpiIError) + & worldCommunicator,cmissVector%transfers(domainIdx)%mpiReceiveRequest,mpiIError) CALL MPI_ErrorCheck("MPI_IRECV",mpiIError,err,error,*999) IF(diagnostics5) THEN CALL WriteString(DIAGNOSTIC_OUTPUT_TYPE,"MPI IRECV call posted:",err,error,*999) @@ -7465,8 +7461,7 @@ SUBROUTINE DistributedVector_UpdateStart(distributedVector,err,error,*) & DOMAIN_NUMBER,err,error,*999) CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," Receive tag = ",cmissVector%transfers(domainIdx)%receiveTagNumber, & & err,error,*999) - CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," Receive comm = ",computationalEnvironment%mpiCommunicator, & - & err,error,*999) + CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," Receive comm = ",worldCommunicator,err,error,*999) CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," Receive request = ",cmissVector%transfers(domainIdx)% & & mpiReceiveRequest,err,error,*999) ENDIF @@ -7475,8 +7470,7 @@ SUBROUTINE DistributedVector_UpdateStart(distributedVector,err,error,*) & cmissVector%transfers(domainIdx)%receiveBufferSize,MPI_LOGICAL, & & domainMapping%ADJACENT_DOMAINS(domainIdx)%DOMAIN_NUMBER, & & cmissVector%transfers(domainIdx)%receiveTagNumber, & - & computationalEnvironment%mpiCommunicator, & - & cmissVector%transfers(domainIdx)%mpiReceiveRequest,mpiIError) + & worldCommunicator,cmissVector%transfers(domainIdx)%mpiReceiveRequest,mpiIError) CALL MPI_ErrorCheck("MPI_IRECV",mpiIError,err,error,*999) IF(diagnostics5) THEN CALL WriteString(DIAGNOSTIC_OUTPUT_TYPE,"MPI IRECV call posted:",err,error,*999) @@ -7487,8 +7481,7 @@ SUBROUTINE DistributedVector_UpdateStart(distributedVector,err,error,*) & DOMAIN_NUMBER,err,error,*999) CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," Receive tag = ",cmissVector%transfers(domainIdx)% & & receiveTagNumber,err,error,*999) - CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," Receive comm = ",computationalEnvironment%mpiCommunicator, & - & err,error,*999) + CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," Receive comm = ",worldCommunicator,err,error,*999) CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," Receive request = ",cmissVector%transfers(domainIdx)% & & mpiReceiveRequest,err,error,*999) ENDIF @@ -7510,8 +7503,7 @@ SUBROUTINE DistributedVector_UpdateStart(distributedVector,err,error,*) & cmissVector%transfers(domainIdx)%sendBufferSize,MPI_INTEGER, & & domainMapping%ADJACENT_DOMAINS(domainIdx)%DOMAIN_NUMBER, & & cmissVector%transfers(domainIdx)%sendTagNumber, & - & computationalEnvironment%mpiCommunicator, & - & cmissVector%transfers(domainIdx)%mpiSendRequest,mpiIError) + & worldCommunicator,cmissVector%transfers(domainIdx)%mpiSendRequest,mpiIError) CALL MPI_ErrorCheck("MPI_ISEND",mpiIError,err,error,*999) IF(diagnostics5) THEN CALL WriteString(DIAGNOSTIC_OUTPUT_TYPE,"MPI ISEND call posted:",err,error,*999) @@ -7522,8 +7514,7 @@ SUBROUTINE DistributedVector_UpdateStart(distributedVector,err,error,*) & DOMAIN_NUMBER,err,error,*999) CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," Send tag = ",cmissVector%transfers(domainIdx)%sendTagNumber, & & err,error,*999) - CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," Send comm = ",computationalEnvironment%mpiCommunicator, & - & err,error,*999) + CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," Send comm = ",worldCommunicator,err,error,*999) CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," Send request = ",cmissVector%transfers(domainIdx)% & & mpiSendRequest,err,error,*999) ENDIF @@ -7532,8 +7523,7 @@ SUBROUTINE DistributedVector_UpdateStart(distributedVector,err,error,*) & cmissVector%transfers(domainIdx)%sendBufferSize,MPI_REAL, & & domainMapping%ADJACENT_DOMAINS(domainIdx)%DOMAIN_NUMBER, & & cmissVector%transfers(domainIdx)%sendTagNumber, & - & computationalEnvironment%mpiCommunicator, & - & cmissVector%transfers(domainIdx)%mpiSendRequest,mpiIError) + & worldCommunicator,cmissVector%transfers(domainIdx)%mpiSendRequest,mpiIError) CALL MPI_ErrorCheck("MPI_ISEND",mpiIError,err,error,*999) IF(diagnostics5) THEN CALL WriteString(DIAGNOSTIC_OUTPUT_TYPE,"MPI ISEND call posted:",err,error,*999) @@ -7544,8 +7534,7 @@ SUBROUTINE DistributedVector_UpdateStart(distributedVector,err,error,*) & DOMAIN_NUMBER,err,error,*999) CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," Send tag = ",cmissVector%transfers(domainIdx)%sendTagNumber, & & err,error,*999) - CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," Send comm = ",computationalEnvironment%mpiCommunicator, & - & err,error,*999) + CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," Send comm = ",worldCommunicator,err,error,*999) CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," Send request = ",cmissVector%transfers(domainIdx)% & & mpiSendRequest,err,error,*999) ENDIF @@ -7554,8 +7543,7 @@ SUBROUTINE DistributedVector_UpdateStart(distributedVector,err,error,*) & cmissVector%transfers(domainIdx)%sendBufferSize,MPI_DOUBLE_PRECISION, & & domainMapping%ADJACENT_DOMAINS(domainIdx)%DOMAIN_NUMBER, & & cmissVector%transfers(domainIdx)%sendTagNumber, & - & computationalEnvironment%mpiCommunicator, & - & cmissVector%transfers(domainIdx)%mpiSendRequest,mpiIError) + & worldCommunicator,cmissVector%transfers(domainIdx)%mpiSendRequest,mpiIError) CALL MPI_ErrorCheck("MPI_ISEND",mpiIError,err,error,*999) IF(diagnostics5) THEN CALL WriteString(DIAGNOSTIC_OUTPUT_TYPE,"MPI ISEND call posted:",err,error,*999) @@ -7566,8 +7554,7 @@ SUBROUTINE DistributedVector_UpdateStart(distributedVector,err,error,*) & DOMAIN_NUMBER,err,error,*999) CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," Send tag = ",cmissVector%transfers(domainIdx)%sendTagNumber, & & err,error,*999) - CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," Send comm = ",computationalEnvironment%mpiCommunicator, & - & err,error,*999) + CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," Send comm = ",worldCommunicator,err,error,*999) CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," Send request = ",cmissVector%transfers(domainIdx)%mpiSendRequest, & & err,error,*999) ENDIF @@ -7576,8 +7563,7 @@ SUBROUTINE DistributedVector_UpdateStart(distributedVector,err,error,*) & cmissVector%transfers(domainIdx)%sendBufferSize,MPI_LOGICAL, & & domainMapping%ADJACENT_DOMAINS(domainIdx)%DOMAIN_NUMBER, & & cmissVector%transfers(domainIdx)%sendTagNumber, & - & computationalEnvironment%mpiCommunicator, & - & cmissVector%transfers(domainIdx)%mpiSendRequest,mpiIError) + & worldCommunicator,cmissVector%transfers(domainIdx)%mpiSendRequest,mpiIError) CALL MPI_ErrorCheck("MPI_ISEND",mpiIError,err,error,*999) IF(diagnostics5) THEN CALL WriteString(DIAGNOSTIC_OUTPUT_TYPE,"MPI ISEND call posted:",err,error,*999) @@ -7588,8 +7574,7 @@ SUBROUTINE DistributedVector_UpdateStart(distributedVector,err,error,*) & DOMAIN_NUMBER,err,error,*999) CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," Send tag = ",cmissVector%transfers(domainIdx)%sendTagNumber, & & err,error,*999) - CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," Send comm = ",computationalEnvironment%mpiCommunicator, & - & err,error,*999) + CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," Send comm = ",worldCommunicator,err,error,*999) CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," Send request = ",cmissVector%transfers(domainIdx)%mpiSendRequest, & & err,error,*999) ENDIF @@ -7897,7 +7882,7 @@ END SUBROUTINE DistributedVector_DotProductSP ! !>Calculates the dot product of 2 distributed double-precision vectors on this computational node - SUBROUTINE DistributedVector_DotProductDp(distributedVectorA,distributedVectorB,dotProduct,err,error,*) + SUBROUTINE DistributedVector_DotProductDP(distributedVectorA,distributedVectorB,dotProduct,err,error,*) !Argument variables TYPE(DistributedVectorType), INTENT(IN), POINTER :: distributedVectorA ! Auckland, the University of Oxford and King's College, London. !> All Rights Reserved. !> -!> Contributor(s): +!> Contributor(s): Chris Bradley !> !> Alternatively, the contents of this file may be used under the terms of !> either the GNU General Public License Version 2 or later (the "GPL"), or @@ -45,7 +45,8 @@ MODULE DOMAIN_MAPPINGS USE BaseRoutines - USE ComputationEnvironment + USE ComputationRoutines + USE ComputationAccessRoutines USE INPUT_OUTPUT USE ISO_VARYING_STRING USE KINDS @@ -150,6 +151,7 @@ SUBROUTINE DOMAIN_MAPPINGS_GLOBAL_TO_LOCAL_GET(DOMAIN_MAPPING,GLOBAL_NUMBER,LOCA INTEGER(INTG), INTENT(OUT) :: ERR !=1.AND.GLOBAL_NUMBER<=DOMAIN_MAPPING%NUMBER_OF_GLOBAL) THEN - IF(DOMAIN_MAPPING%GLOBAL_TO_LOCAL_MAP(GLOBAL_NUMBER)%DOMAIN_NUMBER(1)== & - & computationalEnvironment%myComputationalNodeNumber) THEN + CALL ComputationEnvironment_WorldNodeNumberGet(computationEnvironment,myWorldComputationNodeNumber,err,error,*999) + IF(DOMAIN_MAPPING%GLOBAL_TO_LOCAL_MAP(GLOBAL_NUMBER)%DOMAIN_NUMBER(1)==myWorldComputationNodeNumber) THEN LOCAL_NUMBER=DOMAIN_MAPPING%GLOBAL_TO_LOCAL_MAP(GLOBAL_NUMBER)%LOCAL_NUMBER(1) LOCAL_EXISTS=.TRUE. ENDIF @@ -192,7 +194,7 @@ SUBROUTINE DOMAIN_MAPPINGS_LOCAL_FROM_GLOBAL_CALCULATE(DOMAIN_MAPPING,ERR,ERROR, TYPE(VARYING_STRING), INTENT(OUT) :: ERROR !0) THEN TOTAL_NUMBER_OF_ADJACENT_DOMAINS=TOTAL_NUMBER_OF_ADJACENT_DOMAINS+1 - IF(domain_no==myComputationalNodeNumber) NUMBER_OF_ADJACENT_DOMAINS=NUMBER_OF_ADJACENT_DOMAINS+1 + IF(domain_no==myWorldComputationNodeNumber) NUMBER_OF_ADJACENT_DOMAINS=NUMBER_OF_ADJACENT_DOMAINS+1 ENDIF ENDIF ENDDO !domain_no2 @@ -375,8 +377,7 @@ SUBROUTINE DOMAIN_MAPPINGS_LOCAL_FROM_GLOBAL_CALCULATE(DOMAIN_MAPPING,ERR,ERROR, ! get number of current adjacent domain domain_no= & - & DOMAIN_MAPPING%ADJACENT_DOMAINS_LIST(DOMAIN_MAPPING%ADJACENT_DOMAINS_PTR(myComputationalNodeNumber)+domain_idx-1) - + & DOMAIN_MAPPING%ADJACENT_DOMAINS_LIST(DOMAIN_MAPPING%ADJACENT_DOMAINS_PTR(myWorldComputationNodeNumber)+domain_idx-1) ! set number in adjacent_domains and adjacent_domain_map DOMAIN_MAPPING%ADJACENT_DOMAINS(domain_idx)%DOMAIN_NUMBER=domain_no ADJACENT_DOMAIN_MAP(domain_no)=domain_idx @@ -430,7 +431,7 @@ SUBROUTINE DOMAIN_MAPPINGS_LOCAL_FROM_GLOBAL_CALCULATE(DOMAIN_MAPPING,ERR,ERROR, local_type=DOMAIN_MAPPING%GLOBAL_TO_LOCAL_MAP(global_number)%LOCAL_TYPE(domain_idx) IF(local_type/=DOMAIN_LOCAL_GHOST) THEN - IF(domain_no==myComputationalNodeNumber) SEND_GLOBAL=.TRUE. + IF(domain_no==myWorldComputationNodeNumber) SEND_GLOBAL=.TRUE. IF(RECEIVE_FROM_DOMAIN==-1) THEN RECEIVE_FROM_DOMAIN=domain_no ELSE @@ -456,7 +457,7 @@ SUBROUTINE DOMAIN_MAPPINGS_LOCAL_FROM_GLOBAL_CALCULATE(DOMAIN_MAPPING,ERR,ERROR, local_number=DOMAIN_MAPPING%GLOBAL_TO_LOCAL_MAP(global_number)%LOCAL_NUMBER(domain_idx) local_type=DOMAIN_MAPPING%GLOBAL_TO_LOCAL_MAP(global_number)%LOCAL_TYPE(domain_idx) - IF(domain_no==myComputationalNodeNumber) THEN + IF(domain_no==myWorldComputationNodeNumber) THEN ! set local number DOMAIN_MAPPING%LOCAL_TO_GLOBAL_MAP(local_number)=global_number diff --git a/src/equations_matrices_routines.f90 b/src/equations_matrices_routines.f90 index 6c852925..fe42f634 100755 --- a/src/equations_matrices_routines.f90 +++ b/src/equations_matrices_routines.f90 @@ -3286,9 +3286,9 @@ SUBROUTINE EquationsMatrices_VectorOutput(id,vectorMatrices,err,error,*) CALL DistributedVector_Output(id,sourceVector%vector,err,error,*999) ENDIF - EXITS("EquationsMatrices_Output") + EXITS("EquationsMatrices_VectorOutput") RETURN -999 ERRORSEXITS("EquationsMatrices_Output",err,error) +999 ERRORSEXITS("EquationsMatrices_VectorOutput",err,error) RETURN 1 END SUBROUTINE EquationsMatrices_VectorOutput diff --git a/src/equations_routines.f90 b/src/equations_routines.f90 index 365c8e21..ba77f897 100755 --- a/src/equations_routines.f90 +++ b/src/equations_routines.f90 @@ -362,7 +362,7 @@ SUBROUTINE Equations_EquationTypeGet(equations,equationType,err,error,*) EXITS("Equations_EquationTypeGet") RETURN -999 ERRORSEXITS("Equations_EquaitonTypeGet",err,error) +999 ERRORSEXITS("Equations_EquationTypeGet",err,error) RETURN 1 END SUBROUTINE Equations_EquationTypeGet diff --git a/src/equations_set_routines.f90 b/src/equations_set_routines.f90 index c37c09c7..0b42e9ac 100644 --- a/src/equations_set_routines.f90 +++ b/src/equations_set_routines.f90 @@ -51,7 +51,8 @@ MODULE EQUATIONS_SET_ROUTINES USE BOUNDARY_CONDITIONS_ROUTINES USE CLASSICAL_FIELD_ROUTINES USE CmissMPI - USE ComputationEnvironment + USE ComputationRoutines + USE ComputationAccessRoutines USE Constants USE COORDINATE_ROUTINES USE DistributedMatrixVector @@ -6277,7 +6278,7 @@ SUBROUTINE EQUATIONS_SET_BOUNDARY_CONDITIONS_INCREMENT(EQUATIONS_SET,BOUNDARY_CO TYPE(BOUNDARY_CONDITIONS_DIRICHLET_TYPE), POINTER :: DIRICHLET_BOUNDARY_CONDITIONS TYPE(BOUNDARY_CONDITIONS_PRESSURE_INCREMENTED_TYPE), POINTER :: PRESSURE_INCREMENTED_BOUNDARY_CONDITIONS INTEGER(INTG) :: variable_idx,variable_type,dirichlet_idx,dirichlet_dof_idx,neumann_point_dof - INTEGER(INTG) :: condition_idx, condition_global_dof, condition_local_dof, myComputationalNodeNumber + INTEGER(INTG) :: condition_idx, condition_global_dof, condition_local_dof, myWorldComputationNodeNumber REAL(DP), POINTER :: FULL_LOADS(:),CURRENT_LOADS(:), PREV_LOADS(:) REAL(DP) :: FULL_LOAD, CURRENT_LOAD, NEW_LOAD, PREV_LOAD TYPE(VARYING_STRING) :: localError @@ -6292,9 +6293,9 @@ SUBROUTINE EQUATIONS_SET_BOUNDARY_CONDITIONS_INCREMENT(EQUATIONS_SET,BOUNDARY_CO NULLIFY(PREV_LOADS) NULLIFY(CURRENT_LOADS) - myComputationalNodeNumber=ComputationalEnvironment_NodeNumberGet(err,error) + CALL ComputationEnvironment_WorldNodeNumberGet(computationEnvironment,myWorldComputationNodeNumber,err,error,*999) - !Take the stored load, scale it down appropriately then apply to the unknown variables + !Take the stored load, scale it down appropriately then apply to the unknown variables IF(ASSOCIATED(EQUATIONS_SET)) THEN IF(DIAGNOSTICS1) THEN CALL WriteStringValue(DIAGNOSTIC_OUTPUT_TYPE," equations set",EQUATIONS_SET%USER_NUMBER,err,error,*999) @@ -6339,7 +6340,7 @@ SUBROUTINE EQUATIONS_SET_BOUNDARY_CONDITIONS_INCREMENT(EQUATIONS_SET,BOUNDARY_CO & BOUNDARY_CONDITION_MOVED_WALL_INCREMENTED) !Convert dof index to local index IF(DOMAIN_MAPPING%GLOBAL_TO_LOCAL_MAP(dirichlet_dof_idx)%DOMAIN_NUMBER(1)== & - & myComputationalNodeNumber) THEN + & myWorldComputationNodeNumber) THEN dirichlet_dof_idx=DOMAIN_MAPPING%GLOBAL_TO_LOCAL_MAP(dirichlet_dof_idx)%LOCAL_NUMBER(1) IF(0 Auckland, the University of Oxford and King's College, London. !> All Rights Reserved. !> -!> Contributor(s): +!> Contributor(s): Heye Zhang !> !> Alternatively, the contents of this file may be used under the terms of !> either the GNU General Public License Version 2 or later (the "GPL"), or @@ -50,7 +50,8 @@ MODULE FIELD_IO_ROUTINES USE BasisAccessRoutines USE MESH_ROUTINES USE NODE_ROUTINES - USE ComputationEnvironment + USE ComputationRoutines + USE ComputationAccessRoutines USE COORDINATE_ROUTINES USE ISO_VARYING_STRING USE MACHINE_CONSTANTS @@ -1022,7 +1023,7 @@ END FUNCTION FIELD_IO_ELEMENT_DERIVATIVE_INDEX SUBROUTINE FIELD_IO_CREATE_FIELDS(NAME, REGION, DECOMPOSITION, FIELD_VALUES_SET_TYPE, NUMBER_OF_FIELDS, & !&USER_NODAL_NUMBER_MAP_GLOBAL_NODAL_NUMBER, &MESH_COMPONENTS_OF_FIELD_COMPONENTS, COMPONENTS_IN_FIELDS, NUMBER_OF_EXNODE_FILES, & - &MASTER_COMPUTATIONAL_NUMBER, myComputationalNodeNumber, FIELD_SCALING_TYPE, ERR, ERROR, *) + &MASTER_COMPUTATION_NUMBER, myWorldComputationNodeNumber, FIELD_SCALING_TYPE, ERR, ERROR, *) !Argument variables TYPE(VARYING_STRING), INTENT(IN) :: NAME TYPE(REGION_TYPE), POINTER :: REGION !=NUMBER_OF_EXNODE_FILES) EXIT ENDIF - !IF(MASTER_COMPUTATIONAL_NUMBER/=myComputationalNodeNumber) PRINT * , idx_exnode + !IF(MASTER_COMPUTATION_NUMBER/=myWorldComputationNodeNumber) PRINT * , idx_exnode !goto the start of mesh part - IF(MASTER_COMPUTATIONAL_NUMBER==myComputationalNodeNumber) THEN + IF(MASTER_COMPUTATION_NUMBER==myWorldComputationNodeNumber) THEN IF(FILE_END) THEN FILE_ID=1030+idx_exnode @@ -1301,14 +1303,13 @@ SUBROUTINE FIELD_IO_CREATE_FIELDS(NAME, REGION, DECOMPOSITION, FIELD_VALUES_SET_ NODE_SECTION=.TRUE. ENDDO !idx_field ENDIF !FILE_END==.FALSE..AND.SECTION_START=.TRUE..AND.NODE_SECTION=.FALSE. - ENDIF !MASTER_COMPUTATIONAL_NUMBER + ENDIF !MASTER_COMPUTATION_NUMBER !broadcasting total_number_of_devs - CALL MPI_BCAST(total_number_of_devs,1,MPI_INTEGER,MASTER_COMPUTATIONAL_NUMBER, & - & computationalEnvironment%mpiCommunicator,MPI_IERROR) + CALL MPI_BCAST(total_number_of_devs,1,MPI_INTEGER,MASTER_COMPUTATION_NUMBER,worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_BCAST",MPI_IERROR,ERR,ERROR,*999) - IF(MASTER_COMPUTATIONAL_NUMBER/=myComputationalNodeNumber) THEN + IF(MASTER_COMPUTATION_NUMBER/=myWorldComputationNodeNumber) THEN CALL REALLOCATE( LIST_DEV, total_number_of_devs, & & "Could not allocate memory for nodal derivative index in non-master node", ERR, ERROR, *999 ) ENDIF @@ -1317,16 +1318,14 @@ SUBROUTINE FIELD_IO_CREATE_FIELDS(NAME, REGION, DECOMPOSITION, FIELD_VALUES_SET_ & "Could not allocate memory for nodal derivative index in non-master node", ERR, ERROR, *999 ) !broadcasting total_number_of_comps - CALL MPI_BCAST(LIST_DEV_POS,total_number_of_comps,MPI_INTEGER,MASTER_COMPUTATIONAL_NUMBER, & - & computationalEnvironment%mpiCommunicator,MPI_IERROR) + CALL MPI_BCAST(LIST_DEV_POS,total_number_of_comps,MPI_INTEGER,MASTER_COMPUTATION_NUMBER,worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_BCAST",MPI_IERROR,ERR,ERROR,*999) !broadcasting total_number_of_devs - CALL MPI_BCAST(LIST_DEV,total_number_of_devs,MPI_INTEGER,MASTER_COMPUTATIONAL_NUMBER, & - & computationalEnvironment%mpiCommunicator,MPI_IERROR) + CALL MPI_BCAST(LIST_DEV,total_number_of_devs,MPI_INTEGER,MASTER_COMPUTATION_NUMBER,worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_BCAST",MPI_IERROR,ERR,ERROR,*999) !goto the start of mesh part - IF(MASTER_COMPUTATIONAL_NUMBER==myComputationalNodeNumber) THEN + IF(MASTER_COMPUTATION_NUMBER==myWorldComputationNodeNumber) THEN !have not touched the end IF((.NOT.FILE_END).AND.SECTION_START.AND.NODE_SECTION) THEN @@ -1378,17 +1377,15 @@ SUBROUTINE FIELD_IO_CREATE_FIELDS(NAME, REGION, DECOMPOSITION, FIELD_VALUES_SET_ IF(VERIFY(CMISS_KEYWORD_NODE, LINE)/=0) NODE_SECTION=.FALSE. ENDIF ENDIF !FILE_END==.FALSE..AND.SECTION_START=.TRUE..AND.NODE_SECTION=.TRUE. - ENDIF !(MASTER_COMPUTATIONAL_NUMBER==myComputationalNodeNumber) + ENDIF !(MASTER_COMPUTATION_NUMBER==myWorldComputationNodeNumber) !broadcasting total_number_of_devs - CALL MPI_BCAST(LIST_DEV_VALUE,total_number_of_devs,MPI_REAL8,MASTER_COMPUTATIONAL_NUMBER, & - & computationalEnvironment%mpiCommunicator,MPI_IERROR) + CALL MPI_BCAST(LIST_DEV_VALUE,total_number_of_devs,MPI_REAL8,MASTER_COMPUTATION_NUMBER,worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_BCAST",MPI_IERROR,ERR,ERROR,*999) - CALL MPI_BCAST(NODAL_USER_NUMBER,1,MPI_INTEGER,MASTER_COMPUTATIONAL_NUMBER, & - & computationalEnvironment%mpiCommunicator,MPI_IERROR) + CALL MPI_BCAST(NODAL_USER_NUMBER,1,MPI_INTEGER,MASTER_COMPUTATION_NUMBER,worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_BCAST",MPI_IERROR,ERR,ERROR,*999) - !IF(MASTER_COMPUTATIONAL_NUMBER/=myComputationalNodeNumber) THEN + !IF(MASTER_COMPUTATION_NUMBER/=myWorldComputationNodeNumber) THEN print *, "user number:" print *, NODAL_USER_NUMBER print *, LIST_DEV_VALUE @@ -1506,7 +1503,7 @@ END SUBROUTINE FIELD_IO_CREATE_DECOMPISTION !================================================================================================================================ ! - !>Import fields from files into different computational nodes + !>Import fields from files into different computation nodes SUBROUTINE FIELD_IO_FIELDS_IMPORT(NAME, METHOD, REGION, MESH, MESH_USER_NUMBER, DECOMPOSITION, DECOMPOSITION_USER_NUMBER, & &DECOMPOSITION_METHOD, FIELD_VALUES_SET_TYPE, FIELD_SCALING_TYPE, ERR, ERROR, *) !Argument variables @@ -1524,9 +1521,9 @@ SUBROUTINE FIELD_IO_FIELDS_IMPORT(NAME, METHOD, REGION, MESH, MESH_USER_NUMBER, INTEGER(INTG), INTENT(OUT) :: ERR !Read the global mesh into one computational node first and then broadcasting to others nodes - SUBROUTINE FIELD_IO_IMPORT_GLOBAL_MESH(NAME, REGION, MESH, MESH_USER_NUMBER, MASTER_COMPUTATIONAL_NUMBER, & - & myComputationalNodeNumber, &!USER_NODAL_NUMBER_MAP_GLOBAL_NODAL_NUMBER, + !>Read the global mesh into one computation node first and then broadcasting to others nodes + SUBROUTINE FIELD_IO_IMPORT_GLOBAL_MESH(NAME, REGION, MESH, MESH_USER_NUMBER, MASTER_COMPUTATION_NUMBER, & + & myWorldComputationNodeNumber, &!USER_NODAL_NUMBER_MAP_GLOBAL_NODAL_NUMBER, &MESH_COMPONENTS_OF_FIELD_COMPONENTS, & & COMPONENTS_IN_FIELDS, NUMBER_OF_FIELDS, NUMBER_OF_EXNODE_FILES, ERR, ERROR, *) !Argument variables @@ -1633,8 +1628,8 @@ SUBROUTINE FIELD_IO_IMPORT_GLOBAL_MESH(NAME, REGION, MESH, MESH_USER_NUMBER, MAS TYPE(MESH_TYPE), POINTER :: MESH !Write the header of a group elements using FORTRAN SUBROUTINE FieldIO_ExportElementalGroupHeaderFortran( global_number, MAX_NODE_COMP_INDEX,NUM_OF_SCALING_FACTOR_SETS, & - & LIST_COMP_SCALE, myComputationalNodeNumber, elementalInfoSet, sessionHandle, ERR,ERROR, *) + & LIST_COMP_SCALE, myWorldComputationNodeNumber, elementalInfoSet, sessionHandle, ERR,ERROR, *) !Argument variables INTEGER(INTG), INTENT(IN) :: global_number !elementalInfoSet%COMPONENTS(comp_idx)%PTR%DOMAIN !get the domain index for this variable component according to my own computional node number local_number = FindMyLocalDomainNumber( componentDomain%MAPPINGS%ELEMENTS%GLOBAL_TO_LOCAL_MAP( global_number ),& - & myComputationalNodeNumber ) + & myWorldComputationNodeNumber ) GROUP_LOCAL_NUMBER(comp_idx)=local_number !use local domain information find the out the maximum number of derivatives DOMAIN_ELEMENTS=>componentDomain%TOPOLOGY%ELEMENTS @@ -3467,13 +3454,13 @@ END SUBROUTINE FieldIO_ExportElementalGroupHeaderFortran ! SUBROUTINE FIELD_IO_EXPORT_ELEMENT_SCALE_FACTORS( sessionHandle, components, componentScales, globalNumber, & - & myComputationalNodeNumber, ERR, ERROR, * ) + & myWorldComputationNodeNumber, ERR, ERROR, * ) !Argument variables INTEGER(INTG) :: sessionHandle TYPE(FIELD_IO_COMPONENT_INFO_SET), INTENT(INOUT) :: components ! component%DOMAIN%TOPOLOGY%ELEMENTS domainNodes => component%DOMAIN%TOPOLOGY%NODES @@ -3584,13 +3571,13 @@ END SUBROUTINE FIELD_IO_EXPORT_ELEMENT_SCALE_FACTORS ! !>Write all the elemental information from LOCAL_PROCESS_NODAL_INFO_SET to exelem files - SUBROUTINE FIELD_IO_EXPORT_ELEMENTS_INTO_LOCAL_FILE(ELEMENTAL_INFO_SET, NAME, myComputationalNodeNumber, & + SUBROUTINE FIELD_IO_EXPORT_ELEMENTS_INTO_LOCAL_FILE(ELEMENTAL_INFO_SET, NAME, myWorldComputationNodeNumber, & & ERR, ERROR, *) - !the reason that myComputationalNodeNumber is used in the argument is for future extension + !the reason that myWorldComputationNodeNumber is used in the argument is for future extension !Argument variables TYPE(FIELD_IO_INFO_SET), INTENT(INOUT) :: ELEMENTAL_INFO_SET !component%DOMAIN%TOPOLOGY%ELEMENTS !get the domain index for this variable component according to my own computional node number local_number = FindMyLocalDomainNumber( DOMAIN_MAPPING_ELEMENTS%GLOBAL_TO_LOCAL_MAP( global_number ), & - & myComputationalNodeNumber ) + & myWorldComputationNodeNumber ) !use local domain information find the out the maximum number of derivatives BASIS => DOMAIN_ELEMENTS%ELEMENTS( local_number )%BASIS @@ -3891,7 +3878,7 @@ SUBROUTINE FIELD_IO_EXPORT_ELEMENTS_INTO_LOCAL_FILE(ELEMENTAL_INFO_SET, NAME, my ENDIF CALL FIELD_IO_EXPORT_ELEMENT_SCALE_FACTORS( sessionHandle, components, & - & LIST_COMP_SCALE, global_number, myComputationalNodeNumber, ERR, ERROR, *999 ) + & LIST_COMP_SCALE, global_number, myWorldComputationNodeNumber, ERR, ERROR, *999 ) ENDDO !elem_idx @@ -3917,10 +3904,10 @@ END SUBROUTINE FIELD_IO_EXPORT_ELEMENTS_INTO_LOCAL_FILE ! !>Sort the Elemental_info_set according to the type of field variable components - SUBROUTINE FIELD_IO_ELEMENTAL_INFO_SET_SORT(ELEMENTAL_INFO_SET, myComputationalNodeNumber, ERR,ERROR,*) + SUBROUTINE FIELD_IO_ELEMENTAL_INFO_SET_SORT(ELEMENTAL_INFO_SET, myWorldComputationNodeNumber, ERR,ERROR,*) !Argument variables TYPE(FIELD_IO_INFO_SET), INTENT(INOUT) :: ELEMENTAL_INFO_SET !& & ELEMENTAL_INFO_SET%COMPONENT_INFO_SET(nn1)%PTR%COMPONENTS(component_idx)%PTR% & & DOMAIN%TOPOLOGY%ELEMENTS @@ -4021,7 +4008,7 @@ SUBROUTINE FIELD_IO_ELEMENTAL_INFO_SET_SORT(ELEMENTAL_INFO_SET, myComputationalN !get the domain index for this variable component according to my own computional node number !local number of nn2'th node in the damain assoicated with component(component_idx) local_number2 = FindMyLocalDomainNumber( DOMAIN_MAPPING_ELEMENTS%GLOBAL_TO_LOCAL_MAP( global_number2 ), & - & myComputationalNodeNumber ) + & myWorldComputationNodeNumber ) DOMAIN_ELEMENTS2=>& & ELEMENTAL_INFO_SET%COMPONENT_INFO_SET(nn2)%PTR%COMPONENTS(component_idx)%PTR% & & DOMAIN%TOPOLOGY%ELEMENTS @@ -4311,23 +4298,23 @@ END SUBROUTINE FieldIO_ElementalInfoSetAttachLocalProcess ! TYPE(VARYING_STRING), INTENT(OUT) :: ERROR !Sort nodal information according to the type of field variable component - SUBROUTINE FIELD_IO_NODAL_INFO_SET_SORT(NODAL_INFO_SET, myComputationalNodeNumber, ERR,ERROR,*) + SUBROUTINE FIELD_IO_NODAL_INFO_SET_SORT(NODAL_INFO_SET, myWorldComputationNodeNumber, ERR,ERROR,*) !Argument variables TYPE(FIELD_IO_INFO_SET), INTENT(INOUT) :: NODAL_INFO_SET !Write the header of a group nodes using FORTRAIN !SUBROUTINE FIELD_IO_IMPORT_NODAL_GROUP_HEADER_FORTRAN(NODAL_INFO_SET, LOCAL_NODAL_NUMBER, MAX_NUM_OF_NODAL_DERIVATIVES, & - !&myComputationalNodeNumber, FILE_ID, ERR,ERROR, *) + !&myWorldComputationNodeNumber, FILE_ID, ERR,ERROR, *) ! !Argument variables ! TYPE(FIELD_IO_INFO_SET), INTENT(INOUT) :: NODAL_INFO_SET !NODAL_INFO_SET%COMPONENT_INFO_SET(LOCAL_NODAL_NUMBER)%& ! &COMPONENTS(comp_idx)%PTR%DOMAIN%MAPPINGS%NODES ! !get the domain index for this variable component according to my own computional node number - ! local_number = FindMyLocalDomainNumber( DOMAIN_MAPPING_NODES%GLOBAL_TO_LOCAL_MAP(global_number), myComputationalNodeNumber ) + ! local_number = FindMyLocalDomainNumber( DOMAIN_MAPPING_NODES%GLOBAL_TO_LOCAL_MAP(global_number), myWorldComputationNodeNumber ) ! !use local domain information find the out the maximum number of derivatives ! DOMAIN_NODES=>NODAL_INFO_SET%COMPONENT_INFO_SET(LOCAL_NODAL_NUMBER)%COMPONENTS(comp_idx)%PTR%DOMAIN%TOPOLOGY%NODES ! MAX_NUM_OF_NODAL_DERIVATIVES=MAX(DOMAIN_NODES%NODES(local_number)%NUMBER_OF_DERIVATIVES,MAX_NUM_OF_NODAL_DERIVATIVES) @@ -5121,7 +5108,7 @@ END FUNCTION FIELD_IO_GET_COMPONENT_INFO_LABEL ! DOMAIN_MAPPING_NODES=>NODAL_INFO_SET%COMPONENT_INFO_SET(LOCAL_NODAL_NUMBER)%COMPONENTS(comp_idx)%PTR%& ! &DOMAIN%MAPPINGS%NODES ! !get the domain index for this variable component according to my own computional node number - ! local_number = FindMyLocalDomainNumber( DOMAIN_MAPPING_NODES%GLOBAL_TO_LOCAL_MAP(global_number), myComputationalNodeNumber ) + ! local_number = FindMyLocalDomainNumber( DOMAIN_MAPPING_NODES%GLOBAL_TO_LOCAL_MAP(global_number), myWorldComputationNodeNumber ) ! !use local domain information find the out the maximum number of derivatives ! DOMAIN_NODES=>NODAL_INFO_SET%COMPONENT_INFO_SET(LOCAL_NODAL_NUMBER)%COMPONENTS(comp_idx)%PTR%DOMAIN%TOPOLOGY%NODES ! !get the nodal partial derivatives @@ -5167,12 +5154,12 @@ END FUNCTION FIELD_IO_GET_COMPONENT_INFO_LABEL !>Write the header of a group nodes using FORTRAIN SUBROUTINE FIELD_IO_EXPORT_NODAL_GROUP_HEADER_FORTRAN(fieldInfoSet, global_number, MAX_NUM_OF_NODAL_DERIVATIVES, & - &myComputationalNodeNumber, sessionHandle, paddingInfo, ERR,ERROR, *) + &myWorldComputationNodeNumber, sessionHandle, paddingInfo, ERR,ERROR, *) !Argument variables TYPE(FIELD_IO_COMPONENT_INFO_SET), INTENT(IN) :: fieldInfoSet INTEGER(INTG), INTENT(IN) :: global_number INTEGER(INTG), INTENT(INOUT) :: MAX_NUM_OF_NODAL_DERIVATIVES !Write all the nodal information from NODAL_INFO_SET to local exnode files - SUBROUTINE FIELD_IO_EXPORT_NODES_INTO_LOCAL_FILE(NODAL_INFO_SET, NAME, myComputationalNodeNumber,ERR, ERROR, *) - !the reason that myComputationalNodeNumber is used in the argument is for future extension + SUBROUTINE FIELD_IO_EXPORT_NODES_INTO_LOCAL_FILE(NODAL_INFO_SET, NAME, myWorldComputationNodeNumber,ERR, ERROR, *) + !the reason that myWorldComputationNodeNumber is used in the argument is for future extension !Argument variables TYPE(FIELD_IO_INFO_SET), INTENT(INOUT):: NODAL_INFO_SET !Collect nodal information from each MPI process - SUBROUTINE FieldIO_NodelInfoSetAttachLocalProcess(NODAL_INFO_SET, FIELDS, myComputationalNodeNumber, ERR,ERROR,*) + SUBROUTINE FieldIO_NodelInfoSetAttachLocalProcess(NODAL_INFO_SET, FIELDS, myWorldComputationNodeNumber, ERR,ERROR,*) !Argument variables TYPE(FIELD_IO_INFO_SET), INTENT(INOUT):: NODAL_INFO_SET ! Auckland, the University of Oxford and King's College, London. !> All Rights Reserved. !> -!> Contributor(s): +!> Contributor(s): Chris Bradley !> !> Alternatively, the contents of this file may be used under the terms of !> either the GNU General Public License Version 2 or later (the "GPL"), or @@ -47,7 +47,8 @@ MODULE FIELD_ROUTINES USE BaseRoutines USE BasisRoutines USE BasisAccessRoutines - USE ComputationEnvironment + USE ComputationRoutines + USE ComputationAccessRoutines USE COORDINATE_ROUTINES USE CmissMPI USE DistributedMatrixVector @@ -9892,10 +9893,10 @@ SUBROUTINE FIELD_MAPPINGS_CALCULATE(FIELD,ERR,ERROR,*) & NUMBER_OF_CONSTANT_DOFS,NUMBER_OF_ELEMENT_DOFS,NUMBER_OF_NODE_DOFS,NUMBER_OF_GRID_POINT_DOFS,NUMBER_OF_GAUSS_POINT_DOFS, & & NUMBER_OF_LOCAL_VARIABLE_DOFS,TOTAL_NUMBER_OF_VARIABLE_DOFS,NUMBER_OF_DOMAINS,variable_global_ny, & & variable_local_ny,domain_idx,domain_no,constant_nyy,element_ny,element_nyy,node_ny,node_nyy,grid_point_nyy, & - & Gauss_point_nyy,version_idx,derivative_idx,ny,NUMBER_OF_COMPUTATIONAL_NODES, & - & myComputationalNodeNumber,domain_type_stop,start_idx,stop_idx,element_idx,node_idx,NUMBER_OF_LOCAL, NGP, MAX_NGP, & + & Gauss_point_nyy,version_idx,derivative_idx,ny,NUMBER_OF_COMPUTATION_NODES, & + & myWorldComputationNodeNumber,domain_type_stop,start_idx,stop_idx,element_idx,node_idx,NUMBER_OF_LOCAL, NGP, MAX_NGP, & & gp,MPI_IERROR,NUMBER_OF_GLOBAL_DOFS,gauss_point_idx,NUMBER_OF_DATA_POINT_DOFS,data_point_nyy,dataPointIdx,elementIdx, & - & localDataNumber,globalElementNumber + & localDataNumber,globalElementNumber,worldCommunicator INTEGER(INTG), ALLOCATABLE :: VARIABLE_LOCAL_DOFS_OFFSETS(:),VARIABLE_GHOST_DOFS_OFFSETS(:), & & localDataParamCount(:),ghostDataParamCount(:) TYPE(DECOMPOSITION_TYPE), POINTER :: DECOMPOSITION @@ -9910,10 +9911,9 @@ SUBROUTINE FIELD_MAPPINGS_CALCULATE(FIELD,ERR,ERROR,*) ENTERS("FIELD_MAPPINGS_CALCULATE",ERR,ERROR,*999) IF(ASSOCIATED(FIELD)) THEN - NUMBER_OF_COMPUTATIONAL_NODES=ComputationalEnvironment_NumberOfNodesGet(ERR,ERROR) - IF(ERR/=0) GOTO 999 - myComputationalNodeNumber=ComputationalEnvironment_NodeNumberGet(ERR,ERROR) - IF(ERR/=0) GOTO 999 + CALL ComputationEnvironment_WorldCommunicatorGet(computationEnvironment,worldCommunicator,err,error,*999) + CALL ComputationEnvironment_NumberOfWorldNodesGet(computationEnvironment,NUMBER_OF_COMPUTATION_NODES,err,error,*999) + CALL ComputationEnvironment_WorldNodeNumberGet(computationEnvironment,myWorldComputationNodeNumber,err,error,*999) !Calculate the number of global and local degrees of freedom for the field variables and components. Each field variable !component has a set of DOFs so loop over the components for each variable component and count up the DOFs. DO variable_idx=1,FIELD%NUMBER_OF_VARIABLES @@ -9959,7 +9959,7 @@ SUBROUTINE FIELD_MAPPINGS_CALCULATE(FIELD,ERR,ERROR,*) NGP=BASIS%QUADRATURE%QUADRATURE_SCHEME_MAP(BASIS_DEFAULT_QUADRATURE_SCHEME)%PTR%NUMBER_OF_GAUSS MAX_NGP=MAX(MAX_NGP,NGP) ENDDO !element_idx - CALL MPI_ALLREDUCE(MPI_IN_PLACE,MAX_NGP,1,MPI_INTEGER,MPI_MAX,computationalEnvironment%mpiCommunicator,MPI_IERROR) + CALL MPI_ALLREDUCE(MPI_IN_PLACE,MAX_NGP,1,MPI_INTEGER,MPI_MAX,worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_ALLREDUCE",MPI_IERROR,ERR,ERROR,*999) NUMBER_OF_GAUSS_POINT_DOFS=NUMBER_OF_GAUSS_POINT_DOFS+DOMAIN_TOPOLOGY%ELEMENTS%TOTAL_NUMBER_OF_ELEMENTS*MAX_NGP NUMBER_OF_LOCAL_VARIABLE_DOFS=NUMBER_OF_LOCAL_VARIABLE_DOFS+DOMAIN_TOPOLOGY%ELEMENTS%NUMBER_OF_ELEMENTS*MAX_NGP @@ -10028,7 +10028,7 @@ SUBROUTINE FIELD_MAPPINGS_CALCULATE(FIELD,ERR,ERROR,*) IF(ERR/=0) CALL FlagError("Could not allocate variable ghost dofs offsets.",ERR,ERROR,*999) !We want to ensure that the ghost DOFs are at the end so loop over the DOFs in two passes. The first pass will process !the local DOFs for each variable component and the second pass will process the ghost DOFs for each variable component. - IF(NUMBER_OF_COMPUTATIONAL_NODES==1) THEN + IF(NUMBER_OF_COMPUTATION_NODES==1) THEN domain_type_stop=1 !Local only ELSE domain_type_stop=2 !Local+Ghosts @@ -10074,7 +10074,7 @@ SUBROUTINE FIELD_MAPPINGS_CALCULATE(FIELD,ERR,ERROR,*) variable_global_ny=1+VARIABLE_GLOBAL_DOFS_OFFSET CALL DOMAIN_MAPPINGS_MAPPING_GLOBAL_INITIALISE(FIELD_VARIABLE_DOFS_MAPPING% & & GLOBAL_TO_LOCAL_MAP(variable_global_ny),ERR,ERROR,*999) - NUMBER_OF_DOMAINS=NUMBER_OF_COMPUTATIONAL_NODES !Constant is in all domains + NUMBER_OF_DOMAINS=NUMBER_OF_COMPUTATION_NODES !Constant is in all domains ALLOCATE(FIELD_VARIABLE_DOFS_MAPPING%GLOBAL_TO_LOCAL_MAP(variable_global_ny)%LOCAL_NUMBER(NUMBER_OF_DOMAINS), & & STAT=ERR) IF(ERR/=0) CALL FlagError("Could not allocate field variable dofs global to local map local number.", & @@ -10454,7 +10454,7 @@ SUBROUTINE FIELD_MAPPINGS_CALCULATE(FIELD,ERR,ERROR,*) decompositionTopology=>domain%DECOMPOSITION%TOPOLOGY IF(domain_type_idx==1) THEN ! domain_type_idx==1 -> non ghosts !Allocate parameter to dof map for this field variable component - !including both local and ghost data points on this computational domain. + !including both local and ghost data points on this computation domain. ALLOCATE(FIELD_COMPONENT%PARAM_TO_DOF_MAP%DATA_POINT_PARAM2DOF_MAP%DATA_POINTS(decompositionTopology% & & dataPoints%totalNumberOfDataPoints),STAT=ERR) IF(ERR/=0) CALL FlagError("Could not allocate field component parameter to dof data point map.",ERR,ERROR,*999) @@ -10602,13 +10602,13 @@ SUBROUTINE FIELD_MAPPINGS_CALCULATE(FIELD,ERR,ERROR,*) CASE(FIELD_CONSTANT_INTERPOLATION) DO component_idx=1,FIELD%VARIABLES(variable_idx)%NUMBER_OF_COMPONENTS FIELD_COMPONENT=>FIELD%VARIABLES(variable_idx)%COMPONENTS(component_idx) - variable_local_ny=1+VARIABLE_LOCAL_DOFS_OFFSETS(myComputationalNodeNumber) + variable_local_ny=1+VARIABLE_LOCAL_DOFS_OFFSETS(myWorldComputationNodeNumber) !Allocate and set up global to local domain map for variable mapping IF(ASSOCIATED(FIELD_VARIABLE_DOFS_MAPPING)) THEN variable_global_ny=1+VARIABLE_GLOBAL_DOFS_OFFSET CALL DOMAIN_MAPPINGS_MAPPING_GLOBAL_INITIALISE(FIELD_VARIABLE_DOFS_MAPPING% & & GLOBAL_TO_LOCAL_MAP(variable_global_ny),ERR,ERROR,*999) - NUMBER_OF_DOMAINS=NUMBER_OF_COMPUTATIONAL_NODES !Constant is in all domains + NUMBER_OF_DOMAINS=NUMBER_OF_COMPUTATION_NODES !Constant is in all domains ALLOCATE(FIELD_VARIABLE_DOFS_MAPPING%GLOBAL_TO_LOCAL_MAP(variable_global_ny)%LOCAL_NUMBER(NUMBER_OF_DOMAINS), & & STAT=ERR) IF(ERR/=0) CALL FlagError("Could not allocate field variable dofs global to local map local number.", & @@ -10710,7 +10710,7 @@ SUBROUTINE FIELD_MAPPINGS_CALCULATE(FIELD,ERR,ERROR,*) DO component_idx=1,FIELD%VARIABLES(variable_idx)%NUMBER_OF_COMPONENTS FIELD_COMPONENT=>FIELD%VARIABLES(variable_idx)%COMPONENTS(component_idx) element_ny=element_ny+1 - variable_local_ny=element_ny+VARIABLE_LOCAL_DOFS_OFFSETS(myComputationalNodeNumber) + variable_local_ny=element_ny+VARIABLE_LOCAL_DOFS_OFFSETS(myWorldComputationNodeNumber) element_nyy=element_nyy+1 !Setup dof to parameter map FIELD%VARIABLES(variable_idx)%DOF_TO_PARAM_MAP%DOF_TYPE(1,variable_local_ny)=FIELD_ELEMENT_DOF_TYPE @@ -10817,7 +10817,7 @@ SUBROUTINE FIELD_MAPPINGS_CALCULATE(FIELD,ERR,ERROR,*) FIELD_COMPONENT=>FIELD%VARIABLES(variable_idx)%COMPONENTS(component_idx) DOMAIN=>FIELD_COMPONENT%DOMAIN node_ny=node_ny+1 - variable_local_ny=node_ny+VARIABLE_LOCAL_DOFS_OFFSETS(myComputationalNodeNumber) + variable_local_ny=node_ny+VARIABLE_LOCAL_DOFS_OFFSETS(myWorldComputationNodeNumber) node_nyy=node_nyy+1 version_idx=DOMAIN%TOPOLOGY%DOFS%DOF_INDEX(1,ny) derivative_idx=DOMAIN%TOPOLOGY%DOFS%DOF_INDEX(2,ny) @@ -10918,7 +10918,7 @@ SUBROUTINE FIELD_MAPPINGS_CALCULATE(FIELD,ERR,ERROR,*) FIELD_COMPONENT=>FIELD%VARIABLES(variable_idx)%COMPONENTS(component_idx) DOMAIN=>FIELD_COMPONENT%DOMAIN element_ny=element_ny+1 - variable_local_ny=element_ny+VARIABLE_LOCAL_DOFS_OFFSETS(myComputationalNodeNumber) + variable_local_ny=element_ny+VARIABLE_LOCAL_DOFS_OFFSETS(myWorldComputationNodeNumber) node_nyy=node_nyy+1 !Setup dof to parameter map FIELD%VARIABLES(variable_idx)%DOF_TO_PARAM_MAP%DOF_TYPE(1,variable_local_ny)=FIELD_GAUSS_POINT_DOF_TYPE @@ -23674,7 +23674,7 @@ SUBROUTINE Field_ParameterSetUpdateElementDataPointDP(field,variableType,fieldSe IF(ASSOCIATED(decompositionTopology)) THEN dataProjection=>field%dataProjection IF(ASSOCIATED(dataProjection)) THEN - ! Use element topology to check if data point is on current computational node + ! Use element topology to check if data point is on current computation node CALL DECOMPOSITION_TOPOLOGY_ELEMENT_CHECK_EXISTS(decompositionTopology,userElementNumber, & & userElementExists,decompositionLocalElementNumber,ghostElement,err,error,*999) IF(userElementExists) THEN diff --git a/src/fieldml_input_routines.f90 b/src/fieldml_input_routines.f90 index 27b1f081..5ce715a3 100755 --- a/src/fieldml_input_routines.f90 +++ b/src/fieldml_input_routines.f90 @@ -26,7 +26,7 @@ !> Auckland, the University of Oxford and King's College, London. !> All Rights Reserved. !> -!> Contributor(s): +!> Contributor(s): Caton Little !> !> Alternatively, the contents of this file may be used under the terms of !> either the GNU General Public License Version 2 or later (the "GPL"), or @@ -50,7 +50,8 @@ MODULE FIELDML_INPUT_ROUTINES USE BasisAccessRoutines USE CMISS USE CONSTANTS - USE ComputationEnvironment + USE ComputationRoutines + USE ComputationAccessRoutines USE COORDINATE_ROUTINES USE FIELD_ROUTINES USE FieldAccessRoutines @@ -1262,7 +1263,7 @@ SUBROUTINE FieldmlInput_FieldNodalParametersUpdate( FIELDML_INFO, EVALUATOR_NAME INTEGER(INTG), TARGET :: OFFSETS(2), SIZES(2) REAL(C_DOUBLE), ALLOCATABLE, TARGET :: BUFFER(:) INTEGER(INTG) :: READER - INTEGER(INTG) :: myComputationalNodeNumber,nodeDomain,meshComponentNumber + INTEGER(INTG) :: myWorldComputationNodeNumber,nodeDomain,meshComponentNumber ENTERS( "FieldmlInput_FieldNodalParametersUpdate", ERR, ERROR, *999 ) @@ -1313,10 +1314,10 @@ SUBROUTINE FieldmlInput_FieldNodalParametersUpdate( FIELDML_INFO, EVALUATOR_NAME !Default to version 1 of each node derivative (value hardcoded in loop) VERSION_NUMBER = 1 - myComputationalNodeNumber = ComputationalEnvironment_NodeNumberGet(err,error) + CALL ComputationEnvironment_WorldNodeNumberGet(computationEnvironment,myWorldComputationNodeNumber,err,error,*999) CALL DECOMPOSITION_MESH_COMPONENT_NUMBER_GET(FIELD%DECOMPOSITION,meshComponentNumber,err,error,*999) CALL DECOMPOSITION_NODE_DOMAIN_GET(FIELD%DECOMPOSITION,NODE_NUMBER,meshComponentNumber,nodeDomain,err,error,*999) - IF(nodeDomain==myComputationalNodeNumber) THEN + IF(nodeDomain==myWorldComputationNodeNumber) THEN CALL FIELD_PARAMETER_SET_UPDATE_NODE( FIELD, VARIABLE_TYPE, SET_TYPE, VERSION_NUMBER, & & NO_GLOBAL_DERIV, NODE_NUMBER, COMPONENT_NUMBER, BUFFER( COMPONENT_NUMBER ), ERR, ERROR, *999 ) ENDIF diff --git a/src/fieldml_output_routines.f90 b/src/fieldml_output_routines.f90 index ed104d35..af7126b8 100755 --- a/src/fieldml_output_routines.f90 +++ b/src/fieldml_output_routines.f90 @@ -49,7 +49,7 @@ MODULE FIELDML_OUTPUT_ROUTINES USE BasisRoutines USE COORDINATE_ROUTINES USE CONSTANTS - USE ComputationEnvironment + USE ComputationAccessRoutines USE FIELD_ROUTINES USE FieldAccessRoutines USE FIELDML_API @@ -1533,7 +1533,7 @@ SUBROUTINE FIELDML_OUTPUT_ADD_FIELD_NODE_DOFS( FIELDML_INFO, BASE_NAME, DOF_FORM LOGICAL, ALLOCATABLE :: IS_NODE_BASED(:) TYPE(C_PTR) :: SIZE_POINTER TYPE(VARYING_STRING) :: ARRAY_LOCATION - INTEGER(INTG) :: myComputationalNodeNumber,nodeDomain,meshComponentNumber + INTEGER(INTG) :: myWorldComputationNodeNumber,nodeDomain,meshComponentNumber ENTERS( "FIELDML_OUTPUT_ADD_FIELD_NODE_DOFS", ERR, ERROR, *999 ) @@ -1630,10 +1630,10 @@ SUBROUTINE FIELDML_OUTPUT_ADD_FIELD_NODE_DOFS( FIELDML_INFO, BASE_NAME, DOF_FORM !Default to version 1 of each node derivative (value hardcoded in loop) VERSION_NUMBER = 1 - myComputationalNodeNumber = ComputationalEnvironment_NodeNumberGet(err,error) + CALL ComputationEnvironment_WorldNodeNumberGet(computationEnvironment,myWorldComputationNodeNumber,err,error,*999) CALL DECOMPOSITION_MESH_COMPONENT_NUMBER_GET(FIELD%DECOMPOSITION,meshComponentNumber,err,error,*999) CALL DECOMPOSITION_NODE_DOMAIN_GET(FIELD%DECOMPOSITION,I,meshComponentNumber,nodeDomain,err,error,*999) - IF(nodeDomain==myComputationalNodeNumber) THEN + IF(nodeDomain==myWorldComputationNodeNumber) THEN CALL FIELD_PARAMETER_SET_GET_NODE( FIELD, VARIABLE_TYPE, SET_TYPE, VERSION_NUMBER, & & NO_GLOBAL_DERIV, I, FIELD_COMPONENT_NUMBERS(J), DVALUE, ERR, ERROR, *999 ) ENDIF diff --git a/src/finite_elasticity_routines.f90 b/src/finite_elasticity_routines.f90 index ab1ebcf6..c097d216 100644 --- a/src/finite_elasticity_routines.f90 +++ b/src/finite_elasticity_routines.f90 @@ -26,7 +26,7 @@ !> Auckland, the University of Oxford and King's College, London. !> All Rights Reserved. !> -!> Contributor(s): Kumar Mithraratne, Jack Lee, Alice Hung, Sander Arens +!> Contributor(s): Chris Bradley, Kumar Mithraratne, Jack Lee, Alice Hung, Sander Arens !> !> Alternatively, the contents of this file may be used under the terms of !> either the GNU General Public License Version 2 or later (the "GPL"), or @@ -48,7 +48,8 @@ MODULE FINITE_ELASTICITY_ROUTINES USE BasisRoutines USE BasisAccessRoutines USE BOUNDARY_CONDITIONS_ROUTINES - USE ComputationEnvironment + USE ComputationRoutines + USE ComputationAccessRoutines USE Constants USE CONTROL_LOOP_ROUTINES USE ControlLoopAccessRoutines @@ -164,7 +165,7 @@ SUBROUTINE FiniteElasticity_BoundaryConditionsAnalyticCalculate(EQUATIONS_SET,BO TYPE(VARYING_STRING), INTENT(OUT) :: ERROR !Evaluates the functions f(J) and f\'(J); - !> Eq.(21) in Chapelle, Gerbeau, Sainte-Marie, Vignon-Clementel, Computational Mechanics (2010) + !> Eq.(21) in Chapelle, Gerbeau, Sainte-Marie, Vignon-Clementel, Computation Mechanics (2010) SUBROUTINE EVALUATE_CHAPELLE_FUNCTION(Jznu,ffact,dfdJfact,err,error,*) !Argument variables diff --git a/src/generated_mesh_routines.f90 b/src/generated_mesh_routines.f90 index 589f2246..eeba9d63 100755 --- a/src/generated_mesh_routines.f90 +++ b/src/generated_mesh_routines.f90 @@ -46,20 +46,21 @@ MODULE GENERATED_MESH_ROUTINES USE BaseRoutines USE BasisRoutines - USE ComputationEnvironment - USE CONSTANTS + USE ComputationRoutines + USE ComputationAccessRoutines + USE Constants USE COORDINATE_ROUTINES USE FIELD_ROUTINES USE FieldAccessRoutines USE GeneratedMeshAccessRoutines USE INPUT_OUTPUT USE ISO_VARYING_STRING - USE KINDS - USE MATHS + USE Kinds + USE Maths USE MESH_ROUTINES USE NODE_ROUTINES - USE STRINGS - USE TYPES + USE Strings + USE Types #include "macros.h" @@ -3305,7 +3306,7 @@ SUBROUTINE GeneratedMesh_RegularGeometricParametersCalculate(REGULAR_MESH,FIELD, END IF ENDIF END SELECT - !Update geometric parameters in this computational domain only + !Update geometric parameters in this computation domain only DOMAIN=>FIELD_VARIABLE_COMPONENT%DOMAIN DOMAIN_NODES=>DOMAIN%TOPOLOGY%NODES DO component_node=1,TOTAL_NUMBER_OF_NODES_XI(1)*TOTAL_NUMBER_OF_NODES_XI(2)*TOTAL_NUMBER_OF_NODES_XI(3) @@ -3570,7 +3571,7 @@ SUBROUTINE GeneratedMesh_EllipsoidGeometricParametersCalculate(ELLIPSOID_MESH,FI TYPE(DOMAIN_NODES_TYPE), POINTER :: DOMAIN_NODES TYPE(FIELD_VARIABLE_TYPE), POINTER :: FIELD_VARIABLE TYPE(FIELD_VARIABLE_COMPONENT_TYPE), POINTER :: FIELD_VARIABLE_COMPONENT - INTEGER(INTG) :: MY_COMPUTATIONAL_NODE,DOMAIN_NUMBER,MESH_COMPONENT,basis_idx + INTEGER(INTG) :: MY_COMPUTATION_NODE,DOMAIN_NUMBER,MESH_COMPONENT,basis_idx INTEGER(INTG) :: NUMBER_ELEMENTS_XI(3),NUMBER_OF_NODES_XIC(3) INTEGER(INTG) :: TOTAL_NUMBER_NODES_XI(3),INTERPOLATION_TYPES(3) INTEGER(INTG) :: component_idx,xi_idx @@ -3586,7 +3587,7 @@ SUBROUTINE GeneratedMesh_EllipsoidGeometricParametersCalculate(ELLIPSOID_MESH,FI ENTERS("GeneratedMesh_EllipsoidGeometricParametersCalculate",ERR,ERROR,*999) - MY_COMPUTATIONAL_NODE=ComputationalEnvironment_NodeNumberGet(ERR,ERROR) + CALL ComputationEnvironment_WorldNodeNumberGet(computationEnvironment,MY_COMPUTATION_NODE,err,error,*999) ! assign to the field np=0 @@ -3649,7 +3650,7 @@ SUBROUTINE GeneratedMesh_EllipsoidGeometricParametersCalculate(ELLIPSOID_MESH,FI np=1 npg=COMPONENT_NODE_TO_USER_NUMBER(ELLIPSOID_MESH%GENERATED_MESH,basis_idx,np,ERR,ERROR) CALL DECOMPOSITION_NODE_DOMAIN_GET(DECOMPOSITION,npg,MESH_COMPONENT,DOMAIN_NUMBER,ERR,ERROR,*999) - IF(DOMAIN_NUMBER==MY_COMPUTATIONAL_NODE) THEN + IF(DOMAIN_NUMBER==MY_COMPUTATION_NODE) THEN RECT_COORDS(1)=0 RECT_COORDS(2)=0 RECT_COORDS(3)=-ELLIPSOID_EXTENT(1) @@ -3676,7 +3677,7 @@ SUBROUTINE GeneratedMesh_EllipsoidGeometricParametersCalculate(ELLIPSOID_MESH,FI np=np+1 npg=COMPONENT_NODE_TO_USER_NUMBER(ELLIPSOID_MESH%GENERATED_MESH,basis_idx,np,ERR,ERROR) CALL DECOMPOSITION_NODE_DOMAIN_GET(DECOMPOSITION,npg,MESH_COMPONENT,DOMAIN_NUMBER,ERR,ERROR,*999) - IF(DOMAIN_NUMBER==MY_COMPUTATIONAL_NODE) THEN + IF(DOMAIN_NUMBER==MY_COMPUTATION_NODE) THEN DO component_idx=1,FIELD_VARIABLE%NUMBER_OF_COMPONENTS CALL FIELD_PARAMETER_SET_UPDATE_NODE(FIELD,FIELD_U_VARIABLE_TYPE,FIELD_VALUES_SET_TYPE,1,1,npg, & & component_idx,RECT_COORDS(component_idx),ERR,ERROR,*999) @@ -3699,7 +3700,7 @@ SUBROUTINE GeneratedMesh_EllipsoidGeometricParametersCalculate(ELLIPSOID_MESH,FI np=np+1 npg=COMPONENT_NODE_TO_USER_NUMBER(ELLIPSOID_MESH%GENERATED_MESH,basis_idx,np,ERR,ERROR) CALL DECOMPOSITION_NODE_DOMAIN_GET(DECOMPOSITION,npg,MESH_COMPONENT,DOMAIN_NUMBER,ERR,ERROR,*999) - IF(DOMAIN_NUMBER==MY_COMPUTATIONAL_NODE) THEN + IF(DOMAIN_NUMBER==MY_COMPUTATION_NODE) THEN DO component_idx=1,FIELD_VARIABLE%NUMBER_OF_COMPONENTS CALL FIELD_PARAMETER_SET_UPDATE_NODE(FIELD,FIELD_U_VARIABLE_TYPE,FIELD_VALUES_SET_TYPE,1,1,npg, & & component_idx,RECT_COORDS(component_idx),ERR,ERROR,*999) @@ -3730,7 +3731,7 @@ SUBROUTINE GeneratedMesh_EllipsoidGeometricParametersCalculate(ELLIPSOID_MESH,FI np=np+1 npg=COMPONENT_NODE_TO_USER_NUMBER(ELLIPSOID_MESH%GENERATED_MESH,basis_idx,np,ERR,ERROR) CALL DECOMPOSITION_NODE_DOMAIN_GET(DECOMPOSITION,npg,MESH_COMPONENT,DOMAIN_NUMBER,ERR,ERROR,*999) - IF(DOMAIN_NUMBER==MY_COMPUTATIONAL_NODE) THEN + IF(DOMAIN_NUMBER==MY_COMPUTATION_NODE) THEN DO component_idx=1,FIELD_VARIABLE%NUMBER_OF_COMPONENTS CALL FIELD_PARAMETER_SET_UPDATE_NODE(FIELD,FIELD_U_VARIABLE_TYPE,FIELD_VALUES_SET_TYPE,1,1,npg, & & component_idx,RECT_COORDS(component_idx),ERR,ERROR,*999) @@ -3757,7 +3758,7 @@ SUBROUTINE GeneratedMesh_EllipsoidGeometricParametersCalculate(ELLIPSOID_MESH,FI np=np+1 npg=COMPONENT_NODE_TO_USER_NUMBER(ELLIPSOID_MESH%GENERATED_MESH,basis_idx,np,ERR,ERROR) CALL DECOMPOSITION_NODE_DOMAIN_GET(DECOMPOSITION,npg,MESH_COMPONENT,DOMAIN_NUMBER,ERR,ERROR,*999) - IF(DOMAIN_NUMBER==MY_COMPUTATIONAL_NODE) THEN + IF(DOMAIN_NUMBER==MY_COMPUTATION_NODE) THEN DO component_idx=1,FIELD_VARIABLE%NUMBER_OF_COMPONENTS CALL FIELD_PARAMETER_SET_UPDATE_NODE(FIELD,FIELD_U_VARIABLE_TYPE,FIELD_VALUES_SET_TYPE,1,1,npg, & & component_idx,RECT_COORDS(component_idx),ERR,ERROR,*999) @@ -3780,7 +3781,7 @@ SUBROUTINE GeneratedMesh_EllipsoidGeometricParametersCalculate(ELLIPSOID_MESH,FI np=np+1 npg=COMPONENT_NODE_TO_USER_NUMBER(ELLIPSOID_MESH%GENERATED_MESH,basis_idx,np,ERR,ERROR) CALL DECOMPOSITION_NODE_DOMAIN_GET(DECOMPOSITION,npg,MESH_COMPONENT,DOMAIN_NUMBER,ERR,ERROR,*999) - IF(DOMAIN_NUMBER==MY_COMPUTATIONAL_NODE) THEN + IF(DOMAIN_NUMBER==MY_COMPUTATION_NODE) THEN DO component_idx=1,FIELD_VARIABLE%NUMBER_OF_COMPONENTS CALL FIELD_PARAMETER_SET_UPDATE_NODE(FIELD,FIELD_U_VARIABLE_TYPE,FIELD_VALUES_SET_TYPE,1,1,npg, & & component_idx,RECT_COORDS(component_idx),ERR,ERROR,*999) @@ -3807,7 +3808,7 @@ SUBROUTINE GeneratedMesh_EllipsoidGeometricParametersCalculate(ELLIPSOID_MESH,FI np=1 npg=COMPONENT_NODE_TO_USER_NUMBER(ELLIPSOID_MESH%GENERATED_MESH,basis_idx,np,ERR,ERROR) CALL DECOMPOSITION_NODE_DOMAIN_GET(DECOMPOSITION,npg,MESH_COMPONENT,DOMAIN_NUMBER,ERR,ERROR,*999) - IF(DOMAIN_NUMBER==MY_COMPUTATIONAL_NODE) THEN + IF(DOMAIN_NUMBER==MY_COMPUTATION_NODE) THEN RECT_COORDS(1)=0 RECT_COORDS(2)=0 RECT_COORDS(3)=-ELLIPSOID_EXTENT(1) @@ -3833,7 +3834,7 @@ SUBROUTINE GeneratedMesh_EllipsoidGeometricParametersCalculate(ELLIPSOID_MESH,FI np=np+1 npg=COMPONENT_NODE_TO_USER_NUMBER(ELLIPSOID_MESH%GENERATED_MESH,basis_idx,np,ERR,ERROR) CALL DECOMPOSITION_NODE_DOMAIN_GET(DECOMPOSITION,npg,MESH_COMPONENT,DOMAIN_NUMBER,ERR,ERROR,*999) - IF(DOMAIN_NUMBER==MY_COMPUTATIONAL_NODE) THEN + IF(DOMAIN_NUMBER==MY_COMPUTATION_NODE) THEN DO component_idx=1,FIELD_VARIABLE%NUMBER_OF_COMPONENTS CALL FIELD_PARAMETER_SET_UPDATE_NODE(FIELD,FIELD_U_VARIABLE_TYPE,FIELD_VALUES_SET_TYPE,1,1,npg, & & component_idx,RECT_COORDS(component_idx),ERR,ERROR,*999) @@ -3856,7 +3857,7 @@ SUBROUTINE GeneratedMesh_EllipsoidGeometricParametersCalculate(ELLIPSOID_MESH,FI np=np+1 npg=COMPONENT_NODE_TO_USER_NUMBER(ELLIPSOID_MESH%GENERATED_MESH,basis_idx,np,ERR,ERROR) CALL DECOMPOSITION_NODE_DOMAIN_GET(DECOMPOSITION,npg,MESH_COMPONENT,DOMAIN_NUMBER,ERR,ERROR,*999) - IF(DOMAIN_NUMBER==MY_COMPUTATIONAL_NODE) THEN + IF(DOMAIN_NUMBER==MY_COMPUTATION_NODE) THEN DO component_idx=1,FIELD_VARIABLE%NUMBER_OF_COMPONENTS CALL FIELD_PARAMETER_SET_UPDATE_NODE(FIELD,FIELD_U_VARIABLE_TYPE,FIELD_VALUES_SET_TYPE,1,1,npg, & & component_idx,RECT_COORDS(component_idx),ERR,ERROR,*999) @@ -3887,7 +3888,7 @@ SUBROUTINE GeneratedMesh_EllipsoidGeometricParametersCalculate(ELLIPSOID_MESH,FI np=np+1 npg=COMPONENT_NODE_TO_USER_NUMBER(ELLIPSOID_MESH%GENERATED_MESH,basis_idx,np,ERR,ERROR) CALL DECOMPOSITION_NODE_DOMAIN_GET(DECOMPOSITION,npg,MESH_COMPONENT,DOMAIN_NUMBER,ERR,ERROR,*999) - IF(DOMAIN_NUMBER==MY_COMPUTATIONAL_NODE) THEN + IF(DOMAIN_NUMBER==MY_COMPUTATION_NODE) THEN DO component_idx=1,FIELD_VARIABLE%NUMBER_OF_COMPONENTS CALL FIELD_PARAMETER_SET_UPDATE_NODE(FIELD,FIELD_U_VARIABLE_TYPE,FIELD_VALUES_SET_TYPE,1,1,npg, & & component_idx,RECT_COORDS(component_idx),ERR,ERROR,*999) diff --git a/src/mesh_routines.f90 b/src/mesh_routines.f90 index e5d467f7..2fa8fa6d 100644 --- a/src/mesh_routines.f90 +++ b/src/mesh_routines.f90 @@ -26,7 +26,7 @@ !> Auckland, the University of Oxford and King's College, London. !> All Rights Reserved. !> -!> Contributor(s): +!> Contributor(s): Chris Bradley !> !> Alternatively, the contents of this file may be used under the terms of !> either the GNU General Public License Version 2 or later (the "GPL"), or @@ -49,7 +49,8 @@ MODULE MESH_ROUTINES USE BasisAccessRoutines USE CmissMPI USE CMISS_PARMETIS - USE ComputationEnvironment + USE ComputationRoutines + USE ComputationAccessRoutines USE COORDINATE_ROUTINES USE DataProjectionAccessRoutines USE DOMAIN_MAPPINGS @@ -154,6 +155,8 @@ MODULE MESH_ROUTINES PUBLIC DECOMPOSITION_NUMBER_OF_DOMAINS_GET,DECOMPOSITION_NUMBER_OF_DOMAINS_SET + PUBLIC Decomposition_WorkGroupSet + PUBLIC DecompositionTopology_DataPointCheckExists PUBLIC DecompositionTopology_DataProjectionCalculate @@ -282,7 +285,7 @@ END SUBROUTINE DECOMPOSITION_ADJACENT_ELEMENT_INITIALISE !================================================================================================================================ ! - !>Finishes the creation of a domain decomposition on a given mesh. \see OPENCMISS::Iron::cmfe_DecompositionCreateFinish + !>Finishes the creation of a domain decomposition on a given mesh. \see OpenCMISS::Iron::cmfe_Decomposition_CreateFinish SUBROUTINE DECOMPOSITION_CREATE_FINISH(DECOMPOSITION,ERR,ERROR,*) !Argument variables @@ -300,7 +303,7 @@ SUBROUTINE DECOMPOSITION_CREATE_FINISH(DECOMPOSITION,ERR,ERROR,*) CALL DECOMPOSITION_ELEMENT_DOMAIN_CALCULATE(DECOMPOSITION,ERR,ERROR,*999) !Initialise the topology information for this decomposition CALL DECOMPOSITION_TOPOLOGY_INITIALISE(DECOMPOSITION,ERR,ERROR,*999) - !Initialise the domain for this computational node + !Initialise the domain for this computation node CALL DOMAIN_INITIALISE(DECOMPOSITION,ERR,ERROR,*999) !Calculate the decomposition topology CALL DECOMPOSITION_TOPOLOGY_CALCULATE(DECOMPOSITION,ERR,ERROR,*999) @@ -332,13 +335,14 @@ SUBROUTINE DECOMPOSITION_CREATE_FINISH(DECOMPOSITION,ERR,ERROR,*) RETURN 999 ERRORSEXITS("DECOMPOSITION_CREATE_FINISH",ERR,ERROR) RETURN 1 + END SUBROUTINE DECOMPOSITION_CREATE_FINISH ! !================================================================================================================================ ! - !>Starts the creation of a domain decomposition for a given mesh. \see OPENCMISS::Iron::cmfe_DecompositionCreateStart + !>Starts the creation of a domain decomposition for a given mesh. \see OpenCMISS::Iron::cmfe_Decomposition_CreateStart SUBROUTINE DECOMPOSITION_CREATE_START(USER_NUMBER,MESH,DECOMPOSITION,ERR,ERROR,*) !Argument variables @@ -349,9 +353,10 @@ SUBROUTINE DECOMPOSITION_CREATE_START(USER_NUMBER,MESH,DECOMPOSITION,ERR,ERROR,* TYPE(VARYING_STRING), INTENT(OUT) :: ERROR !worldWorkGroup newDecomposition%NUMBER_OF_DOMAINS=1 newDecomposition%numberOfElements=mesh%NUMBER_OF_ELEMENTS ALLOCATE(newDecomposition%ELEMENT_DOMAIN(MESH%NUMBER_OF_ELEMENTS),STAT=ERR) @@ -433,7 +441,7 @@ END SUBROUTINE DECOMPOSITION_CREATE_START !================================================================================================================================ ! - !>Destroys a domain decomposition identified by a user number and deallocates all memory. \see OPENCMISS::Iron::cmfe_DecompositionDestroy + !>Destroys a domain decomposition identified by a user number and deallocates all memory. \see OpenCMISS::Iron::cmfe_Decomposition_Destroy SUBROUTINE DECOMPOSITION_DESTROY_NUMBER(USER_NUMBER,MESH,ERR,ERROR,*) !Argument variables @@ -514,7 +522,7 @@ END SUBROUTINE DECOMPOSITION_DESTROY_NUMBER !================================================================================================================================ ! - !>Destroys a domain decomposition identified by an object and deallocates all memory. \see OPENCMISS::Iron::cmfe_DecompositionDestroy + !>Destroys a domain decomposition identified by an object and deallocates all memory. \see OpenCMISS::Iron::cmfe_Decomposition_Destroy SUBROUTINE DECOMPOSITION_DESTROY(DECOMPOSITION,ERR,ERROR,*) !Argument variables @@ -587,7 +595,7 @@ END SUBROUTINE DECOMPOSITION_DESTROY !================================================================================================================================ ! - !>Calculates the element domains for a decomposition of a mesh. \see OPENCMISS::Iron::cmfe_DecompositionElementDomainCalculate + !>Calculates the element domains for a decomposition of a mesh. \see OpenCMISS::Iron::cmfe_Decomposition_ElementDomainCalculate SUBROUTINE DECOMPOSITION_ELEMENT_DOMAIN_CALCULATE(DECOMPOSITION,ERR,ERROR,*) !Argument variables @@ -595,13 +603,14 @@ SUBROUTINE DECOMPOSITION_ELEMENT_DOMAIN_CALCULATE(DECOMPOSITION,ERR,ERROR,*) INTEGER(INTG), INTENT(OUT) :: ERR !(MESH%NUMBER_OF_ELEMENTS-ELEMENT_STOP)) & - & ELEMENT_STOP=MESH%NUMBER_OF_ELEMENTS-(number_computational_nodes-1-no_computational_node) + IF((number_computation_nodes-1-no_computation_node)>(MESH%NUMBER_OF_ELEMENTS-ELEMENT_STOP)) & + & ELEMENT_STOP=MESH%NUMBER_OF_ELEMENTS-(number_computation_nodes-1-no_computation_node) IF(ELEMENT_START>MESH%NUMBER_OF_ELEMENTS) ELEMENT_START=MESH%NUMBER_OF_ELEMENTS IF(ELEMENT_STOP>MESH%NUMBER_OF_ELEMENTS) ELEMENT_STOP=MESH%NUMBER_OF_ELEMENTS - DISPLACEMENTS(no_computational_node)=ELEMENT_START-1 - ELEMENT_DISTANCE(no_computational_node+1)=ELEMENT_STOP !C numbering + DISPLACEMENTS(no_computation_node)=ELEMENT_START-1 + ELEMENT_DISTANCE(no_computation_node+1)=ELEMENT_STOP !C numbering NUMBER_OF_ELEMENTS=ELEMENT_STOP-ELEMENT_START+1 - RECEIVE_COUNTS(no_computational_node)=NUMBER_OF_ELEMENTS + RECEIVE_COUNTS(no_computation_node)=NUMBER_OF_ELEMENTS IF(NUMBER_OF_ELEMENTS>MAX_NUMBER_ELEMENTS_PER_NODE) MAX_NUMBER_ELEMENTS_PER_NODE=NUMBER_OF_ELEMENTS - IF(no_computational_node==my_computational_node_number) THEN + IF(no_computation_node==my_computation_node_number) THEN MY_ELEMENT_START=ELEMENT_START MY_ELEMENT_STOP=ELEMENT_STOP MY_NUMBER_OF_ELEMENTS=ELEMENT_STOP-ELEMENT_START+1 @@ -675,7 +680,7 @@ SUBROUTINE DECOMPOSITION_ELEMENT_DOMAIN_CALCULATE(DECOMPOSITION,ERR,ERROR,*) number_elem_indicies=number_elem_indicies+BASIS%NUMBER_OF_NODES ENDDO !ne ENDIF - ENDDO !no_computational_node + ENDDO !no_computation_node ALLOCATE(ELEMENT_PTR(0:MY_NUMBER_OF_ELEMENTS),STAT=ERR) IF(ERR/=0) CALL FlagError("Could not allocate element pointer list.",ERR,ERROR,*999) @@ -702,7 +707,7 @@ SUBROUTINE DECOMPOSITION_ELEMENT_DOMAIN_CALCULATE(DECOMPOSITION,ERR,ERROR,*) !Set up ParMETIS variables WEIGHT_FLAG=0 !No weights ELEMENT_WEIGHT(1)=1 !Isn't used due to weight flag - NUMBER_FLAG=0 !C Numbering as there is a bug with Fortran numbering + NUMBER_FLAG=0 !C Numbering as there is a bug with Fortran numbering NUMBER_OF_CONSTRAINTS=1 IF(minNumberXi==1) THEN NUMBER_OF_COMMON_NODES=1 @@ -721,14 +726,14 @@ SUBROUTINE DECOMPOSITION_ELEMENT_DOMAIN_CALCULATE(DECOMPOSITION,ERR,ERROR,*) !Call ParMETIS to calculate the partitioning of the mesh graph. CALL PARMETIS_PARTMESHKWAY(ELEMENT_DISTANCE,ELEMENT_PTR,ELEMENT_INDICIES,ELEMENT_WEIGHT,WEIGHT_FLAG,NUMBER_FLAG, & & NUMBER_OF_CONSTRAINTS,NUMBER_OF_COMMON_NODES,DECOMPOSITION%NUMBER_OF_DOMAINS,TPWGTS,UBVEC,PARMETIS_OPTIONS, & - & DECOMPOSITION%NUMBER_OF_EDGES_CUT,DECOMPOSITION%ELEMENT_DOMAIN(DISPLACEMENTS(my_computational_node_number)+1:), & - & computationalEnvironment%mpiCommunicator,ERR,ERROR,*999) + & DECOMPOSITION%NUMBER_OF_EDGES_CUT,DECOMPOSITION%ELEMENT_DOMAIN(DISPLACEMENTS(my_computation_node_number)+1:), & + & worldCommunicator,ERR,ERROR,*999) - !Transfer all the element domain information to the other computational nodes so that each rank has all the info - IF(number_computational_nodes>1) THEN + !Transfer all the element domain information to the other computation nodes so that each rank has all the info + IF(number_computation_nodes>1) THEN !This should work on a single processor but doesn't for mpich2 under windows. Maybe a bug? Avoid for now. CALL MPI_ALLGATHERV(MPI_IN_PLACE,MAX_NUMBER_ELEMENTS_PER_NODE,MPI_INTEGER,DECOMPOSITION%ELEMENT_DOMAIN, & - & RECEIVE_COUNTS,DISPLACEMENTS,MPI_INTEGER,computationalEnvironment%mpiCommunicator,MPI_IERROR) + & RECEIVE_COUNTS,DISPLACEMENTS,MPI_INTEGER,worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_ALLGATHERV",MPI_IERROR,ERR,ERROR,*999) ENDIF @@ -748,28 +753,28 @@ SUBROUTINE DECOMPOSITION_ELEMENT_DOMAIN_CALCULATE(DECOMPOSITION,ERR,ERROR,*) END SELECT !Check decomposition and check that each domain has an element in it. - ALLOCATE(ELEMENT_COUNT(0:number_computational_nodes-1),STAT=ERR) + ALLOCATE(ELEMENT_COUNT(0:number_computation_nodes-1),STAT=ERR) IF(ERR/=0) CALL FlagError("Could not allocate element count.",ERR,ERROR,*999) ELEMENT_COUNT=0 DO elem_index=1,MESH%NUMBER_OF_ELEMENTS - no_computational_node=DECOMPOSITION%ELEMENT_DOMAIN(elem_index) - IF(no_computational_node>=0.AND.no_computational_node=0.AND.no_computation_nodeGets the domain for a given element in a decomposition of a mesh. \todo should be able to specify lists of elements. \see OPENCMISS::Iron::cmfe_DecompositionElementDomainGet + !>Gets the domain for a given element in a decomposition of a mesh. \todo should be able to specify lists of elements. \see OpenCMISS::Iron::cmfe_Decomposition_ElementDomainGet SUBROUTINE DECOMPOSITION_ELEMENT_DOMAIN_GET(DECOMPOSITION,USER_ELEMENT_NUMBER,DOMAIN_NUMBER,ERR,ERROR,*) !Argument variables @@ -892,7 +897,7 @@ END SUBROUTINE DECOMPOSITION_ELEMENT_DOMAIN_GET !================================================================================================================================ ! - !>Sets the domain for a given element in a decomposition of a mesh. \todo move to user number, should be able to specify lists of elements. \see OPENCMISS::Iron::cmfe_DecompositionElementDomainSet + !>Sets the domain for a given element in a decomposition of a mesh. \todo move to user number, should be able to specify lists of elements. \see OpenCMISS::Iron::cmfe_Decomposition_ElementDomainSet SUBROUTINE DECOMPOSITION_ELEMENT_DOMAIN_SET(DECOMPOSITION,GLOBAL_ELEMENT_NUMBER,DOMAIN_NUMBER,ERR,ERROR,*) !Argument variables @@ -902,7 +907,7 @@ SUBROUTINE DECOMPOSITION_ELEMENT_DOMAIN_SET(DECOMPOSITION,GLOBAL_ELEMENT_NUMBER, INTEGER(INTG), INTENT(OUT) :: ERR !MESH%TOPOLOGY(DECOMPOSITION%MESH_COMPONENT_NUMBER)%PTR IF(ASSOCIATED(MESH_TOPOLOGY)) THEN IF(GLOBAL_ELEMENT_NUMBER>0.AND.GLOBAL_ELEMENT_NUMBER<=MESH_TOPOLOGY%ELEMENTS%NUMBER_OF_ELEMENTS) THEN - number_computational_nodes=ComputationalEnvironment_NumberOfNodesGet(ERR,ERROR) - IF(ERR/=0) GOTO 999 - IF(DOMAIN_NUMBER>=0.AND.DOMAIN_NUMBER=0.AND.DOMAIN_NUMBERGets the mesh component number which will be used for the decomposition of a mesh. \see OPENCMISS::Iron::cmfe_DecompositionMeshComponentGet + !>Gets the mesh component number which will be used for the decomposition of a mesh. \see OpenCMISS::Iron::cmfe_DecompositionMeshComponentGet SUBROUTINE DECOMPOSITION_MESH_COMPONENT_NUMBER_GET(DECOMPOSITION,MESH_COMPONENT_NUMBER,ERR,ERROR,*) !Argument variables @@ -1075,7 +1081,7 @@ END SUBROUTINE DECOMPOSITION_MESH_COMPONENT_NUMBER_GET !================================================================================================================================ ! - !>Sets/changes the mesh component number which will be used for the decomposition of a mesh. \see OPENCMISS::Iron::cmfe_DecompositionMeshComponentSet + !>Sets/changes the mesh component number which will be used for the decomposition of a mesh. \see OpenCMISS::Iron::cmfe_DecompositionMeshComponentSet SUBROUTINE DECOMPOSITION_MESH_COMPONENT_NUMBER_SET(DECOMPOSITION,MESH_COMPONENT_NUMBER,ERR,ERROR,*) !Argument variables @@ -1121,7 +1127,7 @@ END SUBROUTINE DECOMPOSITION_MESH_COMPONENT_NUMBER_SET !!MERGE: ditto - !>Gets the number of domains for a decomposition. \see OPENCMISS::Iron::cmfe_DecompositionNumberOfDomainsGet + !>Gets the number of domains for a decomposition. \see OpenCMISS::Iron::cmfe_DecompositionNumberOfDomainsGet SUBROUTINE DECOMPOSITION_NUMBER_OF_DOMAINS_GET(DECOMPOSITION,NUMBER_OF_DOMAINS,ERR,ERROR,*) !Argument variables @@ -1154,7 +1160,7 @@ END SUBROUTINE DECOMPOSITION_NUMBER_OF_DOMAINS_GET !================================================================================================================================ ! - !>Sets/changes the number of domains for a decomposition. \see OPENCMISS::Iron::cmfe_DecompositionNumberOfDomainsSet + !>Sets/changes the number of domains for a decomposition. \see OpenCMISS::Iron::cmfe_DecompositionNumberOfDomainsSet SUBROUTINE DECOMPOSITION_NUMBER_OF_DOMAINS_SET(DECOMPOSITION,NUMBER_OF_DOMAINS,ERR,ERROR,*) !Argument variables @@ -1163,7 +1169,7 @@ SUBROUTINE DECOMPOSITION_NUMBER_OF_DOMAINS_SET(DECOMPOSITION,NUMBER_OF_DOMAINS,E INTEGER(INTG), INTENT(OUT) :: ERR !=1) THEN !wolfye???<=? IF(NUMBER_OF_DOMAINS<=DECOMPOSITION%numberOfElements) THEN - !Get the number of computational nodes - numberOfComputationalNodes=ComputationalEnvironment_NumberOfNodesGet(ERR,ERROR) - IF(ERR/=0) GOTO 999 + !Get the number of computation nodes + CALL ComputationEnvironment_NumberOfWorldNodesGet(computationEnvironment,numberOfWorldComputationNodes, & + & err,error,*999) !!TODO: relax this later - !IF(NUMBER_OF_DOMAINS==numberOfComputationalNodes) THEN + !IF(NUMBER_OF_DOMAINS==numberOfWorldComputationNodes) THEN DECOMPOSITION%NUMBER_OF_DOMAINS=NUMBER_OF_DOMAINS !ELSE ! LOCAL_ERROR="The number of domains ("//TRIM(NUMBER_TO_VSTRING(NUMBER_OF_DOMAINSS,"*",ERR,ERROR))// & - ! & ") is not equal to the number of computational nodes ("// & - ! & TRIM(NUMBER_TO_VSTRING(numberOfComputationalNodes,"*",ERR,ERROR))//")" + ! & ") is not equal to the number of computation nodes ("// & + ! & TRIM(NUMBER_TO_VSTRING(numberOfWorldComputationNodes,"*",ERR,ERROR))//")" ! CALL FlagError(LOCAL_ERROR,ERR,ERROR,*999) !ENDIF ELSE @@ -1224,6 +1230,38 @@ END SUBROUTINE DECOMPOSITION_NUMBER_OF_DOMAINS_SET !================================================================================================================================ ! + !>Sets the workgroup to use for a decomposition on a given mesh. \see OpenCMISS::Iron::cmfe_Decomposition_WorkGroupSet + SUBROUTINE Decomposition_WorkGroupSet(decomposition,workGroup,err,error,*) + + !Argument variables + TYPE(DECOMPOSITION_TYPE), POINTER :: decomposition !workGroup + + EXITS("Decomposition_WorkGroupSet") + RETURN +999 ERRORSEXITS("Decomposition_WorkGroupSet",err,error) + RETURN 1 + + END SUBROUTINE Decomposition_WorkGroupSet + + ! + !================================================================================================================================ + ! + !>Calculates the topology for a decomposition. SUBROUTINE DECOMPOSITION_TOPOLOGY_CALCULATE(DECOMPOSITION,ERR,ERROR,*) @@ -1274,8 +1312,8 @@ SUBROUTINE DecompositionTopology_DataPointsCalculate(TOPOLOGY,ERR,ERROR,*) INTEGER(INTG), INTENT(OUT) :: ERR !decompositionData%DECOMPOSITION IF(ASSOCIATED(decomposition)) THEN - decompositionElements=>TOPOLOGY%ELEMENTS - IF(ASSOCIATED(decompositionElements)) THEN - elementsMapping=>decomposition%DOMAIN(decomposition%MESH_COMPONENT_NUMBER)%PTR%MAPPINGS%ELEMENTS - IF(ASSOCIATED(elementsMapping)) THEN + CALL ComputationEnvironment_WorldCommunicatorGet(computationEnvironment,worldCommunicator,err,error,*999) + decompositionElements=>TOPOLOGY%ELEMENTS + IF(ASSOCIATED(decompositionElements)) THEN + elementsMapping=>decomposition%DOMAIN(decomposition%MESH_COMPONENT_NUMBER)%PTR%MAPPINGS%ELEMENTS + IF(ASSOCIATED(elementsMapping)) THEN meshComponentNumber=decomposition%MESH_COMPONENT_NUMBER meshData=>decomposition%MESH%TOPOLOGY(meshComponentNumber)%PTR%dataPoints IF(ASSOCIATED(meshData)) THEN - NUMBER_OF_COMPUTATIONAL_NODES=ComputationalEnvironment_NumberOfNodesGet(ERR,ERROR) - IF(ERR/=0) GOTO 999 - myComputationalNodeNumber=ComputationalEnvironment_NodeNumberGet(ERR,ERROR) - IF(ERR/=0) GOTO 999 - ALLOCATE(decompositionData%numberOfDomainLocal(0:NUMBER_OF_COMPUTATIONAL_NODES-1),STAT=ERR) - ALLOCATE(decompositionData%numberOfDomainGhost(0:NUMBER_OF_COMPUTATIONAL_NODES-1),STAT=ERR) + CALL ComputationEnvironment_NumberOfWorldNodesGet(computationEnvironment,NUMBER_OF_COMPUTATION_NODES, & + & err,error,*999) + CALL ComputationEnvironment_WorldNodeNumberGet(computationEnvironment,myWorldComputationNodeNumber, & + & err,error,*999) + ALLOCATE(decompositionData%numberOfDomainLocal(0:NUMBER_OF_COMPUTATION_NODES-1),STAT=ERR) + ALLOCATE(decompositionData%numberOfDomainGhost(0:NUMBER_OF_COMPUTATION_NODES-1),STAT=ERR) ALLOCATE(decompositionData%numberOfElementDataPoints(decompositionElements%NUMBER_OF_GLOBAL_ELEMENTS),STAT=ERR) ALLOCATE(decompositionData%elementDataPoint(decompositionElements%TOTAL_NUMBER_OF_ELEMENTS),STAT=ERR) IF(ERR/=0) CALL FlagError("Could not allocate decomposition element data points.",ERR,ERROR,*999) @@ -1340,16 +1379,16 @@ SUBROUTINE DecompositionTopology_DataPointsCalculate(TOPOLOGY,ERR,ERROR,*) & INSERT_STATUS,ERR,ERROR,*999) ENDDO !dataPointIdx ENDDO !localElement - !Calculate number of ghost data points on the current computational domain + !Calculate number of ghost data points on the current computation domain NUMBER_OF_LOCAL_DATA=decompositionData%numberOfDataPoints NUMBER_OF_GHOST_DATA=decompositionData%totalNumberOfDataPoints-decompositionData%numberOfDataPoints - !Gather number of local data points on all computational nodes + !Gather number of local data points on all computation nodes CALL MPI_ALLGATHER(NUMBER_OF_LOCAL_DATA,1,MPI_INTEGER,decompositionData% & - & numberOfDomainLocal,1,MPI_INTEGER,computationalEnvironment%mpiCommunicator,MPI_IERROR) + & numberOfDomainLocal,1,MPI_INTEGER,worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_ALLGATHER",MPI_IERROR,ERR,ERROR,*999) - !Gather number of ghost data points on all computational nodes + !Gather number of ghost data points on all computation nodes CALL MPI_ALLGATHER(NUMBER_OF_GHOST_DATA,1,MPI_INTEGER,decompositionData% & - & numberOfDomainGhost,1,MPI_INTEGER,computationalEnvironment%mpiCommunicator,MPI_IERROR) + & numberOfDomainGhost,1,MPI_INTEGER,worldCommunicator,MPI_IERROR) CALL MPI_ERROR_CHECK("MPI_ALLGATHER",MPI_IERROR,ERR,ERROR,*999) ELSE CALL FlagError("Mesh data points topology is not associated.",ERR,ERROR,*999) @@ -1374,6 +1413,7 @@ SUBROUTINE DecompositionTopology_DataPointsCalculate(TOPOLOGY,ERR,ERROR,*) RETURN 999 ERRORSEXITS("DecompositionTopology_DataPointsCalculate",ERR,ERROR) RETURN 1 + END SUBROUTINE DecompositionTopology_DataPointsCalculate ! @@ -3696,7 +3736,7 @@ END SUBROUTINE DECOMPOSITION_TOPOLOGY_FACES_INITIALISE !================================================================================================================================ ! - !>Gets the decomposition type for a decomposition. \see OPENCMISS::Iron::cmfe_DecompositionTypeGet + !>Gets the decomposition type for a decomposition. \see OpenCMISS::Iron::cmfe_DecompositionTypeGet SUBROUTINE DECOMPOSITION_TYPE_GET(DECOMPOSITION,TYPE,ERR,ERROR,*) !Argument variables @@ -3728,7 +3768,7 @@ END SUBROUTINE DECOMPOSITION_TYPE_GET !================================================================================================================================ ! - !>Sets/changes the decomposition type for a decomposition. \see OPENCMISS::Iron::cmfe_DecompositionTypeSet + !>Sets/changes the decomposition type for a decomposition. \see OpenCMISS::Iron::cmfe_DecompositionTypeSet SUBROUTINE DECOMPOSITION_TYPE_SET(DECOMPOSITION,TYPE,ERR,ERROR,*) !Argument variables @@ -3772,7 +3812,7 @@ END SUBROUTINE DECOMPOSITION_TYPE_SET !================================================================================================================================ ! - !>Sets/changes whether lines should be calculated in the the decomposition. \see OPENCMISS::Iron::cmfe_DecompositionCalculateLinesSet + !>Sets/changes whether lines should be calculated in the the decomposition. \see OpenCMISS::Iron::cmfe_DecompositionCalculateLinesSet SUBROUTINE DECOMPOSITION_CALCULATE_LINES_SET(DECOMPOSITION,CALCULATE_LINES_FLAG,ERR,ERROR,*) !Argument variables @@ -3803,7 +3843,7 @@ END SUBROUTINE DECOMPOSITION_CALCULATE_LINES_SET !================================================================================================================================ ! - !>Sets/changes whether faces should be calculated in the the decomposition. \see OPENCMISS::Iron::cmfe_DecompositionCalculateFacesSet + !>Sets/changes whether faces should be calculated in the the decomposition. \see OpenCMISS::Iron::cmfe_DecompositionCalculateFacesSet SUBROUTINE DECOMPOSITION_CALCULATE_FACES_SET(DECOMPOSITION,CALCULATE_FACES_FLAG,ERR,ERROR,*) !Argument variables @@ -4070,7 +4110,7 @@ SUBROUTINE DOMAIN_MAPPINGS_ELEMENTS_CALCULATE(DOMAIN,ERR,ERROR,*) TYPE(VARYING_STRING), INTENT(OUT) :: ERROR !DOMAIN%MESH component_idx=DOMAIN%MESH_COMPONENT_NUMBER - myComputationalNodeNumber=ComputationalEnvironment_NodeNumberGet(ERR,ERROR) - IF(ERR/=0) GOTO 999 - !Calculate the local and global numbers and set up the mappings + CALL ComputationEnvironment_WorldNodeNumberGet(computationEnvironment,myWorldComputationNodeNumber, & + & err,error,*999) + + !Calculate the local and global numbers and set up the mappings ALLOCATE(ELEMENTS_MAPPING%GLOBAL_TO_LOCAL_MAP(MESH%NUMBER_OF_ELEMENTS),STAT=ERR) IF(ERR/=0) CALL FlagError("Could not allocate element mapping global to local map.",ERR,ERROR,*999) ELEMENTS_MAPPING%NUMBER_OF_GLOBAL=MESH%TOPOLOGY(component_idx)%PTR%ELEMENTS%NUMBER_OF_ELEMENTS @@ -4117,7 +4158,7 @@ SUBROUTINE DOMAIN_MAPPINGS_ELEMENTS_CALCULATE(DOMAIN,ERR,ERROR,*) !Calculate the local numbers domain_no=DECOMPOSITION%ELEMENT_DOMAIN(ne) LOCAL_ELEMENT_NUMBERS(domain_no)=LOCAL_ELEMENT_NUMBERS(domain_no)+1 - !Calculate the adjacent elements to the computational domains and the adjacent domain numbers themselves + !Calculate the adjacent elements to the computation domains and the adjacent domain numbers themselves BASIS=>MESH%TOPOLOGY(component_idx)%PTR%ELEMENTS%ELEMENTS(ne)%BASIS NULLIFY(ADJACENT_DOMAINS_LIST) CALL LIST_CREATE_START(ADJACENT_DOMAINS_LIST,ERR,ERROR,*999) @@ -4152,7 +4193,7 @@ SUBROUTINE DOMAIN_MAPPINGS_ELEMENTS_CALCULATE(DOMAIN,ERR,ERROR,*) !Element is an internal element ELEMENTS_MAPPING%GLOBAL_TO_LOCAL_MAP(ne)%LOCAL_TYPE(1)=DOMAIN_LOCAL_INTERNAL ELSE - !Element is on the boundary of computational domains + !Element is on the boundary of computation domains ELEMENTS_MAPPING%GLOBAL_TO_LOCAL_MAP(ne)%LOCAL_TYPE(1)=DOMAIN_LOCAL_BOUNDARY ENDIF ENDDO !ne @@ -4427,9 +4468,9 @@ SUBROUTINE DOMAIN_MAPPINGS_NODES_DOFS_CALCULATE(DOMAIN,ERR,ERROR,*) INTEGER(INTG), INTENT(OUT) :: ERR !DOMAIN%MESH component_idx=DOMAIN%MESH_COMPONENT_NUMBER MESH_TOPOLOGY=>MESH%TOPOLOGY(component_idx)%PTR - - numberOfComputationalNodes=ComputationalEnvironment_NumberOfNodesGet(ERR,ERROR) - IF(ERR/=0) GOTO 999 - myComputationalNodeNumber=ComputationalEnvironment_NodeNumberGet(ERR,ERROR) - IF(ERR/=0) GOTO 999 - + + CALL ComputationEnvironment_NumberOfWorldNodesGet(computationEnvironment,numberOfWorldComputationNodes, & + & err,error,*999) + CALL ComputationEnvironment_WorldNodeNumberGet(computationEnvironment,myWorldComputationNodeNumber, & + & err,error,*999) + !Calculate the local and global numbers and set up the mappings ALLOCATE(NODES_MAPPING%GLOBAL_TO_LOCAL_MAP(MESH_TOPOLOGY%NODES%numberOfNodes),STAT=ERR) IF(ERR/=0) CALL FlagError("Could not allocate node mapping global to local map.",ERR,ERROR,*999) @@ -4571,7 +4612,7 @@ SUBROUTINE DOMAIN_MAPPINGS_NODES_DOFS_CALCULATE(DOMAIN,ERR,ERROR,*) ENDDO !version_idx ENDDO !derivative_idx ELSE - !Node is on the boundary of computational domains + !Node is on the boundary of computation domains NODES_MAPPING%GLOBAL_TO_LOCAL_MAP(node_idx)%NUMBER_OF_DOMAINS=NUMBER_OF_DOMAINS DO derivative_idx=1,MESH_TOPOLOGY%NODES%NODES(node_idx)%numberOfDerivatives DO version_idx=1,MESH_TOPOLOGY%NODES%NODES(node_idx)%DERIVATIVES(derivative_idx)%numberOfVersions @@ -4697,29 +4738,29 @@ SUBROUTINE DOMAIN_MAPPINGS_NODES_DOFS_CALCULATE(DOMAIN,ERR,ERROR,*) ENDDO !domain_idx !Check decomposition and check that each domain has a node in it. - ALLOCATE(NODE_COUNT(0:numberOfComputationalNodes-1),STAT=ERR) + ALLOCATE(NODE_COUNT(0:numberOfWorldComputationNodes-1),STAT=ERR) IF(ERR/=0) CALL FlagError("Could not allocate node count.",ERR,ERROR,*999) NODE_COUNT=0 DO node_idx=1,MESH_TOPOLOGY%NODES%numberOfNodes - no_computational_node=DOMAIN%NODE_DOMAIN(node_idx) - IF(no_computational_node>=0.AND.no_computational_node=0.AND.no_computation_nodeFinishes the process of creating a mesh. \see OPENCMISS::Iron::cmfe_MeshCreateFinish + !>Finishes the process of creating a mesh. \see OpenCMISS::Iron::cmfe_MeshCreateFinish SUBROUTINE MESH_CREATE_FINISH(MESH,ERR,ERROR,*) !Argument variables @@ -6311,7 +6352,7 @@ END SUBROUTINE MESH_CREATE_START_GENERIC !================================================================================================================================ ! - !>Starts the process of creating a mesh defined by a user number with the specified NUMBER_OF_DIMENSIONS in an interface. \see OPENCMISS::Iron::cmfe_MeshCreateStart + !>Starts the process of creating a mesh defined by a user number with the specified NUMBER_OF_DIMENSIONS in an interface. \see OpenCMISS::Iron::cmfe_MeshCreateStart !>Default values set for the MESH's attributes are: !>- NUMBER_OF_COMPONENTS: 1 SUBROUTINE MESH_CREATE_START_INTERFACE(USER_NUMBER,INTERFACE,NUMBER_OF_DIMENSIONS,MESH,ERR,ERROR,*) @@ -6390,7 +6431,7 @@ END SUBROUTINE MESH_CREATE_START_INTERFACE !================================================================================================================================ ! - !>Starts the process of creating a mesh defined by a user number with the specified NUMBER_OF_DIMENSIONS in the region identified by REGION. \see OPENCMISS::Iron::cmfe_MeshCreateStart + !>Starts the process of creating a mesh defined by a user number with the specified NUMBER_OF_DIMENSIONS in the region identified by REGION. \see OpenCMISS::Iron::cmfe_MeshCreateStart !>Default values set for the MESH's attributes are: !>- NUMBER_OF_COMPONENTS: 1 SUBROUTINE MESH_CREATE_START_REGION(USER_NUMBER,REGION,NUMBER_OF_DIMENSIONS,MESH,ERR,ERROR,*) @@ -6460,7 +6501,7 @@ END SUBROUTINE MESH_CREATE_START_REGION !================================================================================================================================ ! - !>Destroys the mesh identified by a user number on the given region and deallocates all memory. \see OPENCMISS::Iron::cmfe_MeshDestroy + !>Destroys the mesh identified by a user number on the given region and deallocates all memory. \see OpenCMISS::Iron::cmfe_MeshDestroy SUBROUTINE MESH_DESTROY_NUMBER(USER_NUMBER,REGION,ERR,ERROR,*) !Argument variables @@ -6543,7 +6584,7 @@ END SUBROUTINE MESH_DESTROY_NUMBER !================================================================================================================================ ! - !>Destroys the mesh and deallocates all memory. \see OPENCMISS::Iron::cmfe_MeshDestroy + !>Destroys the mesh and deallocates all memory. \see OpenCMISS::Iron::cmfe_MeshDestroy SUBROUTINE MESH_DESTROY(MESH,ERR,ERROR,*) !Argument variables @@ -6735,7 +6776,7 @@ END SUBROUTINE MESH_INITIALISE !================================================================================================================================ ! - !>Gets the number of mesh components for a mesh identified by a pointer. \see OPENCMISS::Iron::cmfe_MeshNumberOfComponentsGet + !>Gets the number of mesh components for a mesh identified by a pointer. \see OpenCMISS::Iron::cmfe_MeshNumberOfComponentsGet SUBROUTINE MESH_NUMBER_OF_COMPONENTS_GET(MESH,NUMBER_OF_COMPONENTS,ERR,ERROR,*) !Argument variables @@ -6767,7 +6808,7 @@ END SUBROUTINE MESH_NUMBER_OF_COMPONENTS_GET !================================================================================================================================ ! - !>Changes/sets the number of mesh components for a mesh. \see OPENCMISS::Iron::cmfe_MeshNumberOfComponentsSet + !>Changes/sets the number of mesh components for a mesh. \see OpenCMISS::Iron::cmfe_MeshNumberOfComponentsSet SUBROUTINE MESH_NUMBER_OF_COMPONENTS_SET(MESH,NUMBER_OF_COMPONENTS,ERR,ERROR,*) !Argument variables @@ -6843,7 +6884,7 @@ END SUBROUTINE MESH_NUMBER_OF_COMPONENTS_SET !================================================================================================================================ ! - !>Gets the number of elements for a mesh identified by a pointer. \see OPENCMISS::Iron::cmfe_MeshNumberOfElementsGet + !>Gets the number of elements for a mesh identified by a pointer. \see OpenCMISS::Iron::cmfe_MeshNumberOfElementsGet SUBROUTINE MESH_NUMBER_OF_ELEMENTS_GET(MESH,NUMBER_OF_ELEMENTS,ERR,ERROR,*) !Argument variables @@ -6875,7 +6916,7 @@ END SUBROUTINE MESH_NUMBER_OF_ELEMENTS_GET !================================================================================================================================ ! - !>Changes/sets the number of elements for a mesh. \see OPENCMISS::Iron::cmfe_MeshNumberOfElementsSet + !>Changes/sets the number of elements for a mesh. \see OpenCMISS::Iron::cmfe_MeshNumberOfElementsSet SUBROUTINE MESH_NUMBER_OF_ELEMENTS_SET(MESH,NUMBER_OF_ELEMENTS,ERR,ERROR,*) !Argument variables @@ -6934,7 +6975,7 @@ END SUBROUTINE MESH_NUMBER_OF_ELEMENTS_SET !================================================================================================================================ ! - !>Changes/sets the surrounding elements calculate flag. \see OPENCMISS::Iron::cmfe_MeshSurroundingElementsCalculateSet + !>Changes/sets the surrounding elements calculate flag. \see OpenCMISS::Iron::cmfe_MeshSurroundingElementsCalculateSet SUBROUTINE MESH_SURROUNDING_ELEMENTS_CALCULATE_SET(MESH,SURROUNDING_ELEMENTS_CALCULATE_FLAG,ERR,ERROR,*) !Argument variables @@ -7234,7 +7275,7 @@ END SUBROUTINE MeshTopology_DofsInitialise !================================================================================================================================ ! - !>Finishes the process of creating elements for a specified mesh component in a mesh topology. \see OPENCMISS::Iron::cmfe_MeshElementsCreateFinish + !>Finishes the process of creating elements for a specified mesh component in a mesh topology. \see OpenCMISS::Iron::cmfe_MeshElementsCreateFinish SUBROUTINE MESH_TOPOLOGY_ELEMENTS_CREATE_FINISH(ELEMENTS,ERR,ERROR,*) !Argument variables @@ -7311,7 +7352,7 @@ END SUBROUTINE MESH_TOPOLOGY_ELEMENTS_CREATE_FINISH !================================================================================================================================ ! - !>Starts the process of creating elements in the mesh component identified by MESH and component_idx. The elements will be created with a default basis of BASIS. ELEMENTS is the returned pointer to the MESH_ELEMENTS data structure. \see OPENCMISS::Iron::cmfe_MeshElementsCreateStart + !>Starts the process of creating elements in the mesh component identified by MESH and component_idx. The elements will be created with a default basis of BASIS. ELEMENTS is the returned pointer to the MESH_ELEMENTS data structure. \see OpenCMISS::Iron::cmfe_MeshElementsCreateStart SUBROUTINE MESH_TOPOLOGY_ELEMENTS_CREATE_START(MESH,MESH_COMPONENT_NUMBER,BASIS,ELEMENTS,ERR,ERROR,*) !Argument variables @@ -7487,7 +7528,7 @@ END SUBROUTINE MESH_TOPOLOGY_ELEMENT_INITIALISE !!MERGE: Take user number - !>Gets the basis for a mesh element identified by a given global number. \todo should take user number \see OPENCMISS::Iron::cmfe_MeshElementsBasisGet + !>Gets the basis for a mesh element identified by a given global number. \todo should take user number \see OpenCMISS::Iron::cmfe_MeshElementsBasisGet SUBROUTINE MESH_TOPOLOGY_ELEMENTS_ELEMENT_BASIS_GET(GLOBAL_NUMBER,ELEMENTS,BASIS,ERR,ERROR,*) !Argument variables @@ -7608,7 +7649,7 @@ END SUBROUTINE MESH_TOPOLOGY_ELEMENTS_ELEMENT_BASIS_SET !================================================================================================================================ ! - !>Returns the adjacent element number for a mesh element identified by a global number. \todo specify by user number not global number \see OPENCMISS::Iron::cmfe_MeshElementsNo + !>Returns the adjacent element number for a mesh element identified by a global number. \todo specify by user number not global number \see OpenCMISS::Iron::cmfe_MeshElementsNo SUBROUTINE MESH_TOPOLOGY_ELEMENTS_ADJACENT_ELEMENT_GET(GLOBAL_NUMBER,ELEMENTS,ADJACENT_ELEMENT_XI,ADJACENT_ELEMENT_NUMBER, & & ERR,ERROR,*) @@ -7664,7 +7705,7 @@ END SUBROUTINE MESH_TOPOLOGY_ELEMENTS_ADJACENT_ELEMENT_GET !================================================================================================================================ ! - !>Gets the element nodes for a mesh element identified by a given global number. \todo specify by user number not global number \see OPENCMISS::Iron::cmfe_MeshElementsNodesGet + !>Gets the element nodes for a mesh element identified by a given global number. \todo specify by user number not global number \see OpenCMISS::Iron::cmfe_MeshElementsNodesGet SUBROUTINE MESH_TOPOLOGY_ELEMENTS_ELEMENT_NODES_GET(GLOBAL_NUMBER,ELEMENTS,USER_ELEMENT_NODES,ERR,ERROR,*) !Argument variables @@ -7712,7 +7753,7 @@ END SUBROUTINE MESH_TOPOLOGY_ELEMENTS_ELEMENT_NODES_GET !================================================================================================================================ ! - !>Changes/sets the element nodes for a mesh element identified by a given global number. \todo specify by user number not global number \see OPENCMISS::Iron::cmfe_MeshElementsNodesSet + !>Changes/sets the element nodes for a mesh element identified by a given global number. \todo specify by user number not global number \see OpenCMISS::Iron::cmfe_MeshElementsNodesSet SUBROUTINE MESH_TOPOLOGY_ELEMENTS_ELEMENT_NODES_SET(GLOBAL_NUMBER,ELEMENTS,USER_ELEMENT_NODES,ERR,ERROR,*) !Argument variables @@ -7843,7 +7884,7 @@ END SUBROUTINE MESH_TOPOLOGY_ELEMENTS_ELEMENT_NODES_SET !================================================================================================================================ ! - !>Changes/sets an element node's version for a mesh element identified by a given global number. \todo specify by user number not global number \see OPENCMISS::Iron::cmfe_MeshElementsNodesSet + !>Changes/sets an element node's version for a mesh element identified by a given global number. \todo specify by user number not global number \see OpenCMISS::Iron::cmfe_MeshElementsNodesSet SUBROUTINE MeshElements_ElementNodeVersionSet(GLOBAL_NUMBER,ELEMENTS,VERSION_NUMBER,DERIVATIVE_NUMBER, & & USER_ELEMENT_NODE_INDEX,ERR,ERROR,*) @@ -8326,7 +8367,7 @@ END SUBROUTINE MESH_TOPOLOGY_DATA_POINTS_INITIALISE !!MERGE: ditto. - !>Gets the user number for a global element identified by a given global number. \todo Check that the user number doesn't already exist. \see OPENCMISS::Iron::cmfe_MeshElementsUserNumberGet + !>Gets the user number for a global element identified by a given global number. \todo Check that the user number doesn't already exist. \see OpenCMISS::Iron::cmfe_MeshElementsUserNumberGet SUBROUTINE MESH_TOPOLOGY_ELEMENTS_NUMBER_GET(GLOBAL_NUMBER,USER_NUMBER,ELEMENTS,ERR,ERROR,*) !Argument variables @@ -8367,7 +8408,7 @@ END SUBROUTINE MESH_TOPOLOGY_ELEMENTS_NUMBER_GET !================================================================================================================================ ! - !>Returns the user number for a global element identified by a given global number. \see OPENCMISS::Iron::cmfe_MeshElementsUserNumberGet + !>Returns the user number for a global element identified by a given global number. \see OpenCMISS::Iron::cmfe_MeshElementsUserNumberGet SUBROUTINE MeshElements_ElementUserNumberGet(GLOBAL_NUMBER,USER_NUMBER,ELEMENTS,ERR,ERROR,*) !Argument variables @@ -8409,7 +8450,7 @@ END SUBROUTINE MeshElements_ElementUserNumberGet !================================================================================================================================ ! - !>Changes/sets the user number for a global element identified by a given global number. \see OPENCMISS::Iron::cmfe_MeshElementsUserNumberSet + !>Changes/sets the user number for a global element identified by a given global number. \see OpenCMISS::Iron::cmfe_MeshElementsUserNumberSet SUBROUTINE MeshElements_ElementUserNumberSet(GLOBAL_NUMBER,USER_NUMBER,ELEMENTS,ERR,ERROR,*) !Argument variables @@ -10127,7 +10168,7 @@ END SUBROUTINE MESHES_INITIALISE_REGION !================================================================================================================================ ! - !>Gets the domain for a given node in a decomposition of a mesh. \todo should be able to specify lists of elements. \see OPENCMISS::Iron::cmfe_DecompositionNodeDomainGet + !>Gets the domain for a given node in a decomposition of a mesh. \todo should be able to specify lists of elements. \see OpenCMISS::Iron::cmfe_Decomposition_NodeDomainGet SUBROUTINE DECOMPOSITION_NODE_DOMAIN_GET(DECOMPOSITION,USER_NODE_NUMBER,MESH_COMPONENT_NUMBER,DOMAIN_NUMBER,ERR,ERROR,*) !Argument variables @@ -10366,3 +10407,4 @@ END SUBROUTINE MESH_EMBEDDING_SET_GAUSS_POINT_DATA END MODULE MESH_ROUTINES + diff --git a/src/opencmiss_iron.f90 b/src/opencmiss_iron.f90 index b5fb6cf4..a4a31cb1 100644 --- a/src/opencmiss_iron.f90 +++ b/src/opencmiss_iron.f90 @@ -32,7 +32,7 @@ !> Auckland, the University of Oxford and King's College, London. !> All Rights Reserved. !> -!> Contributor(s): +!> Contributor(s): Chris Bradley !> !> Alternatively, the contents of this file may be used under the terms of !> either the GNU General Public License Version 2 or later (the "GPL"), or @@ -64,7 +64,8 @@ MODULE OpenCMISS_Iron USE Cmiss USE CmissPetsc USE CMISS_CELLML - USE ComputationEnvironment + USE ComputationRoutines + USE ComputationAccessRoutines USE Constants USE CONTROL_LOOP_ROUTINES USE ControlLoopAccessRoutines @@ -152,6 +153,12 @@ MODULE OpenCMISS_Iron TYPE(CELLML_EQUATIONS_TYPE), POINTER :: cellmlEquations END TYPE cmfe_CellMLEquationsType + !>Contains information on a computation environment + TYPE cmfe_ComputationEnvironmentType + PRIVATE + TYPE(ComputationEnvironmentType), POINTER :: computationEnvironment_ + END TYPE cmfe_ComputationEnvironmentType + !>Contains information on a control loop. TYPE cmfe_ControlLoopType PRIVATE @@ -248,14 +255,14 @@ MODULE OpenCMISS_Iron TYPE(InterfacePointsConnectivityType), POINTER :: pointsConnectivity END TYPE cmfe_InterfacePointsConnectivityType - !>A matrix that may be distributed across multiple computational nodes + !>A matrix that may be distributed across multiple computation nodes !>and may use sparse or full storage. TYPE cmfe_DistributedMatrixType PRIVATE TYPE(DistributedMatrixType), POINTER :: distributedMatrix END TYPE cmfe_DistributedMatrixType - !>A vector that may be distributed across multiple computational nodes + !>A vector that may be distributed across multiple computation nodes TYPE cmfe_DistributedVectorType PRIVATE TYPE(DistributedVectorType), POINTER :: distributedVector @@ -321,20 +328,16 @@ MODULE OpenCMISS_Iron TYPE(SOLVER_EQUATIONS_TYPE), POINTER :: solverEquations END TYPE cmfe_SolverEquationsType - !>Contains information on a computational work group - TYPE cmfe_ComputationalWorkGroupType + !>Contains information on a work group + TYPE cmfe_WorkGroupType PRIVATE - TYPE(ComputationalWorkGroupType), POINTER :: computationalWorkGroup - END TYPE cmfe_ComputationalWorkGroupType + TYPE(WorkGroupType), POINTER :: workGroup + END TYPE cmfe_WorkGroupType !Module variables TYPE(VARYING_STRING) :: error - !INTERFACE cmfe_Finalise_ - ! MODULE PROCEDURE cmfe_Finalise - !END INTERFACE cmfe_Finalise_ - INTERFACE cmfe_Initialise MODULE PROCEDURE cmfe_InitialiseNumber MODULE PROCEDURE cmfe_InitialiseObj @@ -345,7 +348,6 @@ MODULE OpenCMISS_Iron MODULE PROCEDURE cmfe_Fields_CreateRegion END INTERFACE cmfe_Fields_Create - !PUBLIC cmfe_Finalise,cmfe_Initialise PUBLIC cmfe_Finalise,cmfe_Initialise PUBLIC cmfe_WorkingRealPrecisionGet @@ -360,7 +362,7 @@ MODULE OpenCMISS_Iron PUBLIC cmfe_CellMLEquationsType,cmfe_CellMLEquations_Finalise,cmfe_CellMLEquations_Initialise - PUBLIC cmfe_ComputationalWorkGroupType,cmfe_ComputationalWorkGroup_Initialise + PUBLIC cmfe_ComputationEnvironmentType,cmfe_ComputationEnvironment_Initialise,cmfe_ComputationEnvironment_Finalise PUBLIC cmfe_ControlLoopType,cmfe_ControlLoop_Finalise,cmfe_ControlLoop_Initialise,cmfe_ControlLoop_LoadOutputSet @@ -374,6 +376,10 @@ MODULE OpenCMISS_Iron PUBLIC cmfe_Decomposition_CalculateFacesSet,cmfe_Decomposition_CalculateLinesSet + PUBLIC cmfe_DistributedMatrixType,cmfe_DistributedVectorType + + PUBLIC cmfe_DistributedMatrix_Initialise,cmfe_DistributedVector_Initialise + PUBLIC cmfe_EquationsType,cmfe_Equations_Finalise,cmfe_Equations_Initialise PUBLIC cmfe_EquationsSetType,cmfe_EquationsSet_Finalise,cmfe_EquationsSet_Initialise @@ -398,10 +404,6 @@ MODULE OpenCMISS_Iron PUBLIC cmfe_InterfacePointsConnectivityType,cmfe_InterfacePointsConnectivity_Initialise, & & cmfe_InterfacePointsConnectivity_Finalise - PUBLIC cmfe_DistributedMatrixType,cmfe_DistributedVectorType - - PUBLIC cmfe_DistributedMatrix_Initialise,cmfe_DistributedVector_Initialise - PUBLIC cmfe_MeshType,cmfe_Mesh_Finalise,cmfe_Mesh_Initialise PUBLIC cmfe_MeshElementsType,cmfe_MeshElements_Finalise,cmfe_MeshElements_Initialise @@ -420,6 +422,8 @@ MODULE OpenCMISS_Iron PUBLIC cmfe_SolverEquationsType,cmfe_SolverEquations_Finalise,cmfe_SolverEquations_Initialise + PUBLIC cmfe_WorkGroupType,cmfe_WorkGroup_Initialise,cmfe_WorkGroup_Finalise + !!================================================================================================================================== !! !! ANALYTIC_ANALYSIS_ROUTINES @@ -1257,7 +1261,7 @@ MODULE OpenCMISS_Iron !!================================================================================================================================== !! -!! ComputationalEnvironment +!! Computation !! !!================================================================================================================================== @@ -1269,23 +1273,89 @@ MODULE OpenCMISS_Iron !Interfaces - PUBLIC cmfe_ComputationalWorldCommunicatorGet,cmfe_ComputationalWorldCommunicatorSet + !>Starts the creation of a work group + INTERFACE cmfe_WorkGroup_CreateStart + MODULE PROCEDURE cmfe_WorkGroup_CreateStartNumber + MODULE PROCEDURE cmfe_WorkGroup_CreateStartObj + END INTERFACE cmfe_WorkGroup_CreateStart + + !>Finishes the creation of a work group + INTERFACE cmfe_WorkGroup_CreateFinish + MODULE PROCEDURE cmfe_WorkGroup_CreateFinishNumber + MODULE PROCEDURE cmfe_WorkGroup_CreateFinishObj + END INTERFACE cmfe_WorkGroup_CreateFinish + + !>Destroys a work group + INTERFACE cmfe_WorkGroup_Destroy + MODULE PROCEDURE cmfe_WorkGroup_DestroyNumber + MODULE PROCEDURE cmfe_WorkGroup_DestroyObj + END INTERFACE cmfe_WorkGroup_Destroy + + !>Gets the group communicator for a work group + INTERFACE cmfe_WorkGroup_GroupCommunicatorGet + MODULE PROCEDURE cmfe_WorkGroup_GroupCommunicatorGetNumber + MODULE PROCEDURE cmfe_WorkGroup_GroupCommunicatorGetObj + END INTERFACE cmfe_WorkGroup_GroupCommunicatorGet + + !>Returns the label of a work group. + INTERFACE cmfe_WorkGroup_LabelGet + MODULE PROCEDURE cmfe_WorkGroup_LabelGetCNumber + MODULE PROCEDURE cmfe_WorkGroup_LabelGetCObj + MODULE PROCEDURE cmfe_WorkGroup_LabelGetVSNumber + MODULE PROCEDURE cmfe_WorkGroup_LabelGetVSObj + END INTERFACE cmfe_WorkGroup_LabelGet + + !>Sets/changes the label of a work group. + INTERFACE cmfe_WorkGroup_LabelSet + MODULE PROCEDURE cmfe_WorkGroup_LabelSetCNumber + MODULE PROCEDURE cmfe_WorkGroup_LabelSetCObj + MODULE PROCEDURE cmfe_WorkGroup_LabelSetVSNumber + MODULE PROCEDURE cmfe_WorkGroup_LabelSetVSObj + END INTERFACE cmfe_WorkGroup_LabelSet + + !>Gets the group node number in a work group + INTERFACE cmfe_WorkGroup_GroupNodeNumberGet + MODULE PROCEDURE cmfe_WorkGroup_GroupNodeNumberGetNumber + MODULE PROCEDURE cmfe_WorkGroup_GroupNodeNumberGetObj + END INTERFACE cmfe_WorkGroup_GroupNodeNumberGet + + !>Gets the number of group nodes in a work group + INTERFACE cmfe_WorkGroup_NumberOfGroupNodesGet + MODULE PROCEDURE cmfe_WorkGroup_NumberOfGroupNodesGetNumber + MODULE PROCEDURE cmfe_WorkGroup_NumberOfGroupNodesGetObj + END INTERFACE cmfe_WorkGroup_NumberOfGroupNodesGet + + !>Sets/changes the number of group nodes in a work group + INTERFACE cmfe_WorkGroup_NumberOfGroupNodesSet + MODULE PROCEDURE cmfe_WorkGroup_NumberOfGroupNodesSetNumber + MODULE PROCEDURE cmfe_WorkGroup_NumberOfGroupNodesSetObj + END INTERFACE cmfe_WorkGroup_NumberOfGroupNodesSet + + PUBLIC cmfe_ComputationEnvironment_NumberOfWorldNodesGet + + PUBLIC cmfe_ComputationEnvironment_WorldCommunicatorGet + + PUBLIC cmfe_ComputationEnvironment_WorldNodeNumberGet - PUBLIC cmfe_ComputationalNodeNumberGet + PUBLIC cmfe_ComputationEnvironment_WorldWorkGroupGet - PUBLIC cmfe_ComputationalNumberOfNodesGet + PUBLIC cmfe_WorkGroup_CreateStart - PUBLIC cmfe_ComputationalWorkGroup_CreateStart + PUBLIC cmfe_WorkGroup_CreateFinish - PUBLIC cmfe_ComputationalWorkGroup_CreateFinish + PUBLIC cmfe_WorkGroup_Destroy - PUBLIC cmfe_ComputationalWorkGroup_SubgroupAdd + PUBLIC cmfe_WorkGroup_GroupCommunicatorGet - PUBLIC cmfe_Decomposition_WorldWorkGroupSet + PUBLIC cmfe_WorkGroup_GroupNodeNumberGet + PUBLIC cmfe_WorkGroup_LabelGet,cmfe_WorkGroup_LabelSet + + PUBLIC cmfe_WorkGroup_NumberOfGroupNodesGet,cmfe_WorkGroup_NumberOfGroupNodesSet + !!================================================================================================================================== !! -!! CONSTANTS +!! Constants !! !!================================================================================================================================== @@ -5280,6 +5350,12 @@ MODULE OpenCMISS_Iron MODULE PROCEDURE cmfe_Decomposition_TypeSetObj END INTERFACE cmfe_Decomposition_TypeSet + !>Sets/changes the work group for a decomposition. + INTERFACE cmfe_Decomposition_WorkGroupSet + MODULE PROCEDURE cmfe_Decomposition_WorkGroupSetNumber + MODULE PROCEDURE cmfe_Decomposition_WorkGroupSetObj + END INTERFACE cmfe_Decomposition_WorkGroupSet + !>Sets/changes whether lines should be calculated for the decomposition. INTERFACE cmfe_Decomposition_CalculateLinesSet MODULE PROCEDURE cmfe_Decomposition_CalculateLinesSetNumber @@ -5532,6 +5608,8 @@ MODULE OpenCMISS_Iron PUBLIC cmfe_Decomposition_TypeGet,cmfe_Decomposition_TypeSet + PUBLIC cmfe_Decomposition_WorkGroupSet + PUBLIC cmfe_Decomposition_NodeDomainGet PUBLIC cmfe_Mesh_CreateFinish,cmfe_Mesh_CreateStart @@ -5630,7 +5708,7 @@ MODULE OpenCMISS_Iron MODULE PROCEDURE cmfe_DistributedMatrix_DataTypeGetObj END INTERFACE cmfe_DistributedMatrix_DataTypeGet - !>Get the dimensions for a distributed matrix on this computational node + !>Get the dimensions for a distributed matrix on this computation node INTERFACE cmfe_DistributedMatrix_DimensionsGet MODULE PROCEDURE cmfe_DistributedMatrix_DimensionsGetObj END INTERFACE cmfe_DistributedMatrix_DimensionsGet @@ -5640,7 +5718,7 @@ MODULE OpenCMISS_Iron MODULE PROCEDURE cmfe_DistributedMatrix_StorageLocationsGetObj END INTERFACE cmfe_DistributedMatrix_StorageLocationsGet - !>Get the data array for this matrix on this computational node + !>Get the data array for this matrix on this computation node INTERFACE cmfe_DistributedMatrix_DataGet MODULE PROCEDURE cmfe_DistributedMatrix_DataGetIntgObj MODULE PROCEDURE cmfe_DistributedMatrix_DataGetDPObj @@ -5661,7 +5739,7 @@ MODULE OpenCMISS_Iron MODULE PROCEDURE cmfe_DistributedVector_DataTypeGetObj END INTERFACE cmfe_DistributedVector_DataTypeGet - !>Get the data array for this vector on this computational node + !>Get the data array for this vector on this computation node INTERFACE cmfe_DistributedVector_DataGet MODULE PROCEDURE cmfe_DistributedVector_DataGetIntgObj MODULE PROCEDURE cmfe_DistributedVector_DataGetDPObj @@ -6285,6 +6363,12 @@ MODULE OpenCMISS_Iron MODULE PROCEDURE cmfe_Problem_SpecificationSizeGetObj END INTERFACE cmfe_Problem_SpecificationSizeGet + !>Sets/changes the work group for a problem. + INTERFACE cmfe_Problem_WorkGroupSet + MODULE PROCEDURE cmfe_Problem_WorkGroupSetNumber + MODULE PROCEDURE cmfe_Problem_WorkGroupSetObj + END INTERFACE cmfe_Problem_WorkGroupSet + PUBLIC cmfe_Problem_CellMLEquationsCreateFinish,cmfe_Problem_CellMLEquationsCreateStart PUBLIC cmfe_Problem_CellMLEquationsGet @@ -6317,6 +6401,8 @@ MODULE OpenCMISS_Iron PUBLIC cmfe_Problem_SpecificationGet,cmfe_Problem_SpecificationSizeGet + PUBLIC cmfe_Problem_WorkGroupSet + !!================================================================================================================================== !! !! REGION_ROUTINES @@ -8110,26 +8196,51 @@ END SUBROUTINE cmfe_CellMLEquations_Initialise !================================================================================================================================ ! - !>Initialises a cmfe_ComputationalWorkGroupType object. - SUBROUTINE cmfe_ComputationalWorkGroup_Initialise(cmfe_ComputationalWorkGroup,err) - !DLLEXPORT(cmfe_ComputationalWorkGroup_Initialise) + !>Finalises a cmfe_ComputationEnvironmentType object. + SUBROUTINE cmfe_ComputationEnvironment_Finalise(cmfe_ComputationEnvironment,err) + !DLLEXPORT(cmfe_ComputationEnvironment_Finalise) !Argument variables - TYPE(cmfe_ComputationalWorkGroupType), INTENT(OUT) :: cmfe_ComputationalWorkGroup !Initialises a cmfe_ComputationEnvironmentType object. + SUBROUTINE cmfe_ComputationEnvironment_Initialise(cmfe_ComputationEnvironment,err) + !DLLEXPORT(cmfe_ComputationEnvironment_Initialise) + + !Argument variables + TYPE(cmfe_ComputationEnvironmentType), INTENT(OUT) :: cmfe_ComputationEnvironment !computationEnvironment + + EXITS("cmfe_ComputationEnvironment_Initialise") + RETURN +999 ERRORSEXITS("cmfe_ComputationEnvironment_Initialise",err,error) + CALL cmfe_HandleError(err,error) + RETURN + + END SUBROUTINE cmfe_ComputationEnvironment_Initialise ! !================================================================================================================================ @@ -8156,7 +8267,8 @@ SUBROUTINE cmfe_ControlLoop_Finalise(cmfe_ControlLoop,err) RETURN END SUBROUTINE cmfe_ControlLoop_Finalise - ! + + ! !================================================================================================================================ ! @@ -8388,6 +8500,56 @@ END SUBROUTINE cmfe_Decomposition_Initialise !================================================================================================================================ ! + !>Initialises a cmfe_DistributedMatrixType object. + SUBROUTINE cmfe_DistributedMatrix_Initialise(cmfe_DistributedMatrix,err) + !DLLEXPORT(cmfe_DistributedMatrix_Initialise) + + !Argument variables + TYPE(cmfe_DistributedMatrixType), INTENT(OUT) :: cmfe_DistributedMatrix !Initialises a cmfe_DistributedVectorType object. + SUBROUTINE cmfe_DistributedVector_Initialise(cmfe_DistributedVector,err) + !DLLEXPORT(cmfe_DistributedVector_Initialise) + + !Argument variables + TYPE(cmfe_DistributedVectorType), INTENT(OUT) :: cmfe_DistributedVector !Finalises a cmfe_EquationsType object. SUBROUTINE cmfe_Equations_Finalise(cmfe_Equations,err) !DLLEXPORT(cmfe_Equations_Finalise) @@ -9023,56 +9185,6 @@ END SUBROUTINE cmfe_History_Initialise !================================================================================================================================ ! - !>Initialises a cmfe_DistributedMatrixType object. - SUBROUTINE cmfe_DistributedMatrix_Initialise(cmfe_DistributedMatrix,err) - !DLLEXPORT(cmfe_DistributedMatrix_Initialise) - - !Argument variables - TYPE(cmfe_DistributedMatrixType), INTENT(OUT) :: cmfe_DistributedMatrix !Initialises a cmfe_DistributedVectorType object. - SUBROUTINE cmfe_DistributedVector_Initialise(cmfe_DistributedVector,err) - !DLLEXPORT(cmfe_DistributedVector_Initialise) - - !Argument variables - TYPE(cmfe_DistributedVectorType), INTENT(OUT) :: cmfe_DistributedVector !Finalises a cmfe_MeshType object. SUBROUTINE cmfe_Mesh_Finalise(cmfe_Mesh,err) !DLLEXPORT(cmfe_Mesh_Finalise) @@ -9525,6 +9637,56 @@ SUBROUTINE cmfe_SolverEquations_Initialise(cmfe_SolverEquations,err) END SUBROUTINE cmfe_SolverEquations_Initialise + ! + !================================================================================================================================ + ! + + !>Finalises a cmfe_WorkGroupType object. + SUBROUTINE cmfe_WorkGroup_Finalise(cmfe_WorkGroup,err) + !DLLEXPORT(cmfe_WorkGroup_Finalise) + + !Argument variables + TYPE(cmfe_WorkGroupType), INTENT(OUT) :: cmfe_WorkGroup !Initialises a cmfe_WorkGroupType object. + SUBROUTINE cmfe_WorkGroup_Initialise(cmfe_WorkGroup,err) + !DLLEXPORT(cmfe_WorkGroup_Initialise) + + !Argument variables + TYPE(cmfe_WorkGroupType), INTENT(OUT) :: cmfe_WorkGroup !Returns the current world communicator. - SUBROUTINE cmfe_ComputationalWorldCommunicatorGet(worldCommunicator,err) - !DLLEXPORT(cmfe_ComputationalWorldCommunicatorGet) + !>Returns the number of computation nodes in the world communicator. + SUBROUTINE cmfe_ComputationEnvironment_NumberOfWorldNodesGet(computationEnvironment,numberOfWorldNodes,err) + !DLLEXPORT(cmfe_ComputationEnvironment_NumberOfWorldNodesGet) !Argument variables - INTEGER(INTG), INTENT(OUT) :: worldCommunicator !Sets/changes the current world communicator. - SUBROUTINE cmfe_ComputationalWorldCommunicatorSet(worldCommunicator,err) - !DLLEXPORT(cmfe_ComputationalWorldCommunicatorSet) + !>Returns the current world communicator for the computation environment. + SUBROUTINE cmfe_ComputationEnvironment_WorldCommunicatorGet(computationEnvironment,worldCommunicator,err) + !DLLEXPORT(cmfe_ComputationEnvironment_WorldCommunicatorGet) !Argument variables - INTEGER(INTG), INTENT(IN) :: worldCommunicator !Returns the computational node number of the running process. - SUBROUTINE cmfe_ComputationalNodeNumberGet(nodeNumber,err) - !DLLEXPORT(cmfe_ComputationalNodeNumberGet) + !>Returns the computation node number in the world communicator. + SUBROUTINE cmfe_ComputationEnvironment_WorldNodeNumberGet(computationEnvironment,worldNodeNumber,err) + !DLLEXPORT(cmfe_ComputationEnvironment_WorldNodeNumberGet) !Argument variables - INTEGER(INTG), INTENT(OUT) :: nodeNumber !Returns the number of computational nodes for the running process. - SUBROUTINE cmfe_ComputationalNumberOfNodesGet(numberOfNodes,err) - !DLLEXPORT(cmfe_ComputationalNumberOfNodesGet) + !>Returns the world work group for the computation environment. + SUBROUTINE cmfe_ComputationEnvironment_WorldWorkGroupGet(computationEnvironment,worldWorkGroup,err) + !DLLEXPORT(cmfe_ComputationEnvironment_WorldWorkGroupGet) !Argument variables - INTEGER(INTG), INTENT(OUT) :: numberOfNodes !CREATE THE HIGHEST LEVEL WORK GROUP (DEFAULT: GROUP_WORLD) - SUBROUTINE cmfe_ComputationalWorkGroup_CreateStart(worldWorkGroup, numberComputationalNodes, err) - !DLLEXPORT(cmfe_ComputationalWorkGroup_CreateStart) + !>Start the creation of a computation work group specified by number. + SUBROUTINE cmfe_WorkGroup_CreateStartNumber(workGroupUserNumber,parentWorkGroupUserNumber,err) + !DLLEXPORT(cmfe_WorkGroup_CreateStartNumber) !Argument Variables - TYPE(cmfe_ComputationalWorkGroupType), INTENT(INOUT) :: worldWorkGroup - INTEGER(INTG),INTENT(IN) :: numberComputationalNodes + INTEGER(INTG), INTENT(IN) :: workGroupUserNumber !Start the creation of a computation work group specified by object under a parent work group + SUBROUTINE cmfe_WorkGroup_CreateStartObj(userNumber,parentWorkGroup,workGroup,err) + !DLLEXPORT(cmfe_WorkGroup_CreateStartObj) + !Argument Variables + INTEGER(INTG), INTENT(IN) :: userNumber !Finish the creation of a computation work group specified by number. + SUBROUTINE cmfe_WorkGroup_CreateFinishNumber(workGroupUserNumber,err) + !DLLEXPORT(cmfe_WorkGroup_CreateFinishNumber) + !Argument Variables + INTEGER(INTG), INTENT(IN) :: workGroupUserNumber !Finish the creation of a computation work group specified by object. + SUBROUTINE cmfe_WorkGroup_CreateFinishObj(workGroup,err) + !DLLEXPORT(cmfe_WorkGroup_CreateFinishObj) + !Argument Variables + TYPE(cmfe_WorkGroupType), INTENT(INOUT) :: workGroup !Destroy a work group specified by number. + SUBROUTINE cmfe_WorkGroup_DestroyNumber(workGroupUserNumber,err) + !DLLEXPORT(cmfe_WorkGroup_DestroyNumber) + !Argument Variables + INTEGER(INTG), INTENT(IN) :: workGroupUserNumber !Destroy a work group specified by object. + SUBROUTINE cmfe_WorkGroup_DestroyObj(workGroup,err) + !DLLEXPORT(cmfe_WorkGroup_DestroyObj) + !Argument Variables + TYPE(cmfe_WorkGroupType), INTENT(INOUT) :: workGroup !Returns the group communicator for a work group specified by a user number. + SUBROUTINE cmfe_WorkGroup_GroupCommunicatorGetNumber(workGroupUserNumber,groupCommunicator,err) + !DLLEXPORT(cmfe_WorkGroup_GroupCommunicatorNumber) + !Argument Variables + INTEGER(INTG), INTENT(IN) :: workGroupUserNumber !Returns the group communicator for a work group specified by an object. + SUBROUTINE cmfe_WorkGroup_GroupCommunicatorGetObj(workGroup,groupCommunicator,err) + !DLLEXPORT(cmfe_WorkGroup_GroupCommunicatorGetObj) + !Argument Variables + TYPE(cmfe_WorkGroupType), INTENT(INOUT) :: workGroup !Returns the group node number a work group specified by a user number. + SUBROUTINE cmfe_WorkGroup_GroupNodeNumberGetNumber(workGroupUserNumber,groupNodeNumber,err) + !DLLEXPORT(cmfe_WorkGroup_GroupNodeNumberGetNumber) + !Argument Variables + INTEGER(INTG), INTENT(IN) :: workGroupUserNumber !Returns the group node number for a work group specified by an object. + SUBROUTINE cmfe_WorkGroup_GroupNodeNumberGetObj(workGroup,groupNodeNumber,err) + !DLLEXPORT(cmfe_WorkGroup_GroupNodeNumberGetObj) + !Argument Variables + TYPE(cmfe_WorkGroupType), INTENT(INOUT) :: workGroup !Returns the character label a work group specified by a user number. + SUBROUTINE cmfe_WorkGroup_LabelGetCNumber(workGroupUserNumber,label,err) + !DLLEXPORT(cmfe_WorkGroup_LabelGetCNumber) + !Argument Variables + INTEGER(INTG), INTENT(IN) :: workGroupUserNumber !Returns the character label for a work group specified by an object. + SUBROUTINE cmfe_WorkGroup_LabelGetCObj(workGroup,label,err) + !DLLEXPORT(cmfe_WorkGroup_LabelGetCObj) + !Argument Variables + TYPE(cmfe_WorkGroupType), INTENT(INOUT) :: workGroup !Returns the varying string label a work group specified by a user number. + SUBROUTINE cmfe_WorkGroup_LabelGetVSNumber(workGroupUserNumber,label,err) + !DLLEXPORT(cmfe_WorkGroup_LabelGetVSNumber) + !Argument Variables + INTEGER(INTG), INTENT(IN) :: workGroupUserNumber !Returns the varying string label for a work group specified by an object. + SUBROUTINE cmfe_WorkGroup_LabelGetVSObj(workGroup,label,err) + !DLLEXPORT(cmfe_WorkGroup_LabelGetVSObj) + !Argument Variables + TYPE(cmfe_WorkGroupType), INTENT(INOUT) :: workGroup !Sets/changes the character label a work group specified by a user number. + SUBROUTINE cmfe_WorkGroup_LabelSetCNumber(workGroupUserNumber,label,err) + !DLLEXPORT(cmfe_WorkGroup_LabelSetCNumber) + !Argument Variables + INTEGER(INTG), INTENT(IN) :: workGroupUserNumber !Sets/changes the character label for a work group specified by an object. + SUBROUTINE cmfe_WorkGroup_LabelSetCObj(workGroup,label,err) + !DLLEXPORT(cmfe_WorkGroup_LabelSetCObj) + !Argument Variables + TYPE(cmfe_WorkGroupType), INTENT(INOUT) :: workGroup !Sets/changes the varying string label a work group specified by a user number. + SUBROUTINE cmfe_WorkGroup_LabelSetVSNumber(workGroupUserNumber,label,err) + !DLLEXPORT(cmfe_WorkGroup_LabelSetVSNumber) + !Argument Variables + INTEGER(INTG), INTENT(IN) :: workGroupUserNumber !GENERATE THE HIERARCHY COMPUTATIONAL ENVIRONMENT BASED ON WORK GROUP TREE - SUBROUTINE cmfe_ComputationalWorkGroup_CreateFinish(worldWorkGroup, err) - !DLLEXPORT(cmfe_ComputationalWorkGroup_CreateFinish) + !>Sets/changes the varying string label for a work group specified by an object. + SUBROUTINE cmfe_WorkGroup_LabelSetVSObj(workGroup,label,err) + !DLLEXPORT(cmfe_WorkGroup_LabelSetVSObj) !Argument Variables - TYPE(cmfe_ComputationalWorkGroupType), INTENT(INOUT) :: worldWorkGroup + TYPE(cmfe_WorkGroupType), INTENT(INOUT) :: workGroup !ADD WORK SUB-GROUP TO THE PARENT GROUP BASED ON THE COMPUTATIONAL REQUIREMENTS (CALLED BY THE USER) - SUBROUTINE cmfe_ComputationalWorkGroup_SubgroupAdd(parentWorkGroup, numberComputationalNodes,addedWorkGroup, err) - !DLLEXPORT(cmfe_ComputationalWorkGroup_SubgroupAdd) + !>Returns the group number of nodes in a work group specified by a user number. + SUBROUTINE cmfe_WorkGroup_NumberOfGroupNodesGetNumber(workGroupUserNumber,numberOfGroupNodes,err) + !DLLEXPORT(cmfe_WorkGroup_NumberOfGroupNodesGetNumber) !Argument Variables - TYPE(cmfe_ComputationalWorkGroupType), INTENT(INOUT) :: parentWorkGroup - TYPE(cmfe_ComputationalWorkGroupType), INTENT(INOUT) :: addedWorkGroup - INTEGER(INTG),INTENT(IN) :: numberComputationalNodes + INTEGER(INTG), INTENT(IN) :: workGroupUserNumber !Set the working group tree in order to performe mesh decomposition - SUBROUTINE cmfe_Decomposition_WorldWorkGroupSet(decomposition, worldWorkGroup, err) - !DLLEXPORT(cmfe_Decomposition_WorldWorkGroupSet) + !>Returns the group number of nodes for a work group specified by an object. + SUBROUTINE cmfe_WorkGroup_NumberOfGroupNodesGetObj(workGroup,numberOfGroupNodes,err) + !DLLEXPORT(cmfe_WorkGroup_NumberOfGroupNodesGetObj) !Argument Variables - TYPE(cmfe_DecompositionType), INTENT(INOUT) :: decomposition - TYPE(cmfe_ComputationalWorkGroupType),INTENT(IN) :: worldWorkGroup + TYPE(cmfe_WorkGroupType), INTENT(INOUT) :: workGroup !Sets/Changes the group number of nodes for a work group specified by a user number. + SUBROUTINE cmfe_WorkGroup_NumberOfGroupNodesSetNumber(workGroupUserNumber,numberOfGroupNodes,err) + !DLLEXPORT(cmfe_WorkGroup_NumberOfGroupNodesSetNumber) + !Argument Variables + INTEGER(INTG), INTENT(IN) :: workGroupUserNumber !Sets/Changes the group number of nodes for a work group specified by an object. + SUBROUTINE cmfe_WorkGroup_NumberOfGroupNodesSetObj(workGroup,numberOfGroupNodes,err) + !DLLEXPORT(cmfe_WorkGroup_NumberOfGroupNodesSetObj) + !Argument Variables + TYPE(cmfe_WorkGroupType), INTENT(INOUT) :: workGroup !Sets/changes the work group of a decomposition identified by a user number. + SUBROUTINE cmfe_Decomposition_WorkGroupSetNumber(regionUserNumber,meshUserNumber,decompositionUserNumber,workGroupUserNumber,err) + !DLLEXPORT(cmfe_Decomposition_WorkGroupSetNumber) + + !Argument variables + INTEGER(INTG), INTENT(IN) :: regionUserNumber !Sets/changes the work group for a decomposition identified by an object. + SUBROUTINE cmfe_Decomposition_WorkGroupSetObj(decomposition,workGroup,err) + !DLLEXPORT(cmfe_Decomposition_WorkGroupSetObj) + + !Argument variables + TYPE(cmfe_DecompositionType), INTENT(IN) :: decomposition !Sets whether lines should be calculated SUBROUTINE cmfe_Decomposition_CalculateLinesSetNumber(regionUserNumber,meshUserNumber,& & decompositionUserNumber,calculateLinesFlag,err) @@ -43398,7 +44111,7 @@ SUBROUTINE cmfe_Decomposition_NodeDomainGetNumber(regionUserNumber,meshUserNumbe INTEGER(INTG), INTENT(IN) :: decompositionUserNumber !Get the dimensions of a distributed matrix on this computational node + !>Get the dimensions of a distributed matrix on this computation node SUBROUTINE cmfe_DistributedMatrix_DimensionsGetObj(matrix,m,n,err) !DLLEXPORT(cmfe_DistributedMatrix_DimensionsGetObj) !Argument variables TYPE(cmfe_DistributedMatrixType), INTENT(IN) :: matrix !Get the data array for this matrix on this computational node + !>Get the data array for this matrix on this computation node SUBROUTINE cmfe_DistributedMatrix_DataGetIntgObj(matrix,data,err) !DLLEXPORT(cmfe_DistributedMatrix_DataGetIntgObj) @@ -45752,7 +46465,7 @@ END SUBROUTINE cmfe_DistributedMatrix_DataRestoreIntgObj !================================================================================================================================ ! - !>Get the data array for this matrix on this computational node + !>Get the data array for this matrix on this computation node SUBROUTINE cmfe_DistributedMatrix_DataGetDPObj(matrix,data,err) !DLLEXPORT(cmfe_DistributedMatrix_DataGetDPObj) @@ -45804,7 +46517,7 @@ END SUBROUTINE cmfe_DistributedMatrix_DataRestoreDPObj !================================================================================================================================ ! - !>Get the data array for this matrix on this computational node + !>Get the data array for this matrix on this computation node SUBROUTINE cmfe_DistributedMatrix_DataGetSPObj(matrix,data,err) !DLLEXPORT(cmfe_DistributedMatrix_DataGetSPObj) @@ -45856,7 +46569,7 @@ END SUBROUTINE cmfe_DistributedMatrix_DataRestoreSPObj !================================================================================================================================ ! - !>Get the data array for this matrix on this computational node + !>Get the data array for this matrix on this computation node SUBROUTINE cmfe_DistributedMatrix_DataGetLObj(matrix,data,err) !DLLEXPORT(cmfe_DistributedMatrix_DataGetLObj) @@ -45934,7 +46647,7 @@ END SUBROUTINE cmfe_DistributedVector_DataTypeGetObj !================================================================================================================================ ! - !>Get the data array for this vector on this computational node + !>Get the data array for this vector on this computation node SUBROUTINE cmfe_DistributedVector_DataGetIntgObj(vector,data,err) !DLLEXPORT(cmfe_DistributedVector_DataGetIntgObj) @@ -45986,7 +46699,7 @@ END SUBROUTINE cmfe_DistributedVector_DataRestoreIntgObj !================================================================================================================================ ! - !>Get the data array for this vector on this computational node + !>Get the data array for this vector on this computation node SUBROUTINE cmfe_DistributedVector_DataGetDPObj(vector,data,err) !DLLEXPORT(cmfe_DistributedVector_DataGetDPObj) @@ -46038,7 +46751,7 @@ END SUBROUTINE cmfe_DistributedVector_DataRestoreDPObj !================================================================================================================================ ! - !>Get the data array for this vector on this computational node + !>Get the data array for this vector on this computation node SUBROUTINE cmfe_DistributedVector_DataGetSPObj(vector,data,err) !DLLEXPORT(cmfe_DistributedVector_DataGetSPObj) @@ -46090,7 +46803,7 @@ END SUBROUTINE cmfe_DistributedVector_DataRestoreSPObj !================================================================================================================================ ! - !>Get the data array for this vector on this computational node + !>Get the data array for this vector on this computation node SUBROUTINE cmfe_DistributedVector_DataGetLObj(vector,data,err) !DLLEXPORT(cmfe_DistributedVector_DataGetLObj) @@ -48412,6 +49125,65 @@ SUBROUTINE cmfe_Problem_SpecificationSizeGetObj(problem,specificationSize,err) RETURN END SUBROUTINE cmfe_Problem_SpecificationSizeGetObj + + ! + !================================================================================================================================ + ! + + !>Sets/changes the work group of a problem identified by a user number. + SUBROUTINE cmfe_Problem_WorkGroupSetNumber(problemUserNumber,workGroupUserNumber,err) + !DLLEXPORT(cmfe_Problem_WorkGroupSetNumber) + + !Argument variables + INTEGER(INTG), INTENT(IN) :: problemUserNumber !Sets/changes the work group for a problem identified by an object. + SUBROUTINE cmfe_Problem_WorkGroupSetObj(problem,workGroup,err) + !DLLEXPORT(cmfe_Problem_WorkGroupSetObj) + + !Argument variables + TYPE(cmfe_ProblemType), INTENT(INOUT) :: problem !PROBLEMS + NULLIFY(worldWorkGroup) + CALL ComputationEnvironment_WorldWorkGroupGet(computationEnvironment,worldWorkGroup,err,error,*999) + NEW_PROBLEM%workGroup=>worldWorkGroup !Set problem specification CALL Problem_SpecificationSet(NEW_PROBLEM,PROBLEM_SPECIFICATION,err,error,*999) !For compatibility with old code, set class, type and subtype @@ -938,6 +944,7 @@ SUBROUTINE PROBLEM_INITIALISE(PROBLEM,err,error,*) PROBLEM%GLOBAL_NUMBER=0 PROBLEM%PROBLEM_FINISHED=.FALSE. NULLIFY(PROBLEM%PROBLEMS) + NULLIFY(problem%workGroup) NULLIFY(PROBLEM%CONTROL_LOOP) ELSE CALL FlagError("Problem is not associated.",err,error,*999) @@ -3743,6 +3750,36 @@ END SUBROUTINE Problem_SpecificationSizeGet !================================================================================================================================ ! + !>Sets the work group for a problem. \see OpenCMISS::cmfe_Problem_WorkGroupGet + SUBROUTINE Problem_WorkGroupSet(problem,workGroup,err,error,*) + + !Argument variables + TYPE(PROBLEM_TYPE), POINTER, INTENT(INOUT) :: problem !workGroup + + EXITS("Problem_WorkGroupSet") + RETURN +999 ERRORSEXITS("Problem_WorkGroupSet",err,error) + RETURN 1 + + END SUBROUTINE Problem_WorkGroupSet + + ! + !================================================================================================================================ + ! + !>Finalises all problems and deallocates all memory. SUBROUTINE PROBLEMS_FINALISE(err,error,*) diff --git a/src/reaction_diffusion_IO_routines.f90 b/src/reaction_diffusion_IO_routines.f90 index eb8a574f..384c5f49 100755 --- a/src/reaction_diffusion_IO_routines.f90 +++ b/src/reaction_diffusion_IO_routines.f90 @@ -27,7 +27,7 @@ !> Auckland, the University of Oxford and King's College, London. !> All Rights Reserved. !> -!> Contributor(s): +!> Contributor(s): Vijay Rajagopal !> !> Alternatively, the contents of this file may be used under the terms of !> either the GNU General Public License Version 2 or later (the "GPL"), or @@ -47,7 +47,8 @@ MODULE REACTION_DIFFUSION_IO_ROUTINES USE BaseRoutines - USE ComputationEnvironment + USE ComputationRoutines + USE ComputationAccessRoutines USE EquationsSetConstants USE FIELD_ROUTINES USE FieldAccessRoutines @@ -91,13 +92,13 @@ SUBROUTINE REACTION_DIFFUSION_IO_WRITE_CMGUI(REGION, EQUATIONS_SET_GLOBAL_NUMBER !Local Variables TYPE(EQUATIONS_SET_TYPE), POINTER :: EQUATIONS_SET - TYPE(DOMAIN_TYPE), POINTER :: COMPUTATIONAL_DOMAIN + TYPE(DOMAIN_TYPE), POINTER :: COMPUTATION_DOMAIN TYPE(FIELD_TYPE), POINTER :: SOURCE_FIELD REAL(DP) :: NodeXValue,NodeYValue,NodeZValue,NodeUValue - INTEGER(INTG):: myComputationalNodeNumber,NumberOfOutputFields,NumberOfDimensions,NumberOfElements,NumberOfNodes + INTEGER(INTG):: myWorldComputationNodeNumber,NumberOfOutputFields,NumberOfDimensions,NumberOfElements,NumberOfNodes INTEGER(INTG):: NumberOfVariableComponents,NumberOfSourceComponents,I,J,K,ValueIndex,NODE_GLOBAL_NUMBER INTEGER(INTG) :: NodesInMeshComponent,BasisType,MaxNodesPerElement,NumberOfFieldComponents(3),ELEMENT_GLOBAL_NUMBER - INTEGER(INTG) :: NODE_LOCAL_NUMBER + INTEGER(INTG) :: NODE_LOCAL_NUMBER,numberOfWorldComputationNodes INTEGER(INTG),ALLOCATABLE :: ElementNodes(:,:),SimplexOutputHelp(:) REAL(DP), ALLOCATABLE :: ElementNodesScales(:,:) LOGICAL :: OUTPUT_SOURCE @@ -107,18 +108,18 @@ SUBROUTINE REACTION_DIFFUSION_IO_WRITE_CMGUI(REGION, EQUATIONS_SET_GLOBAL_NUMBER ENTERS("REACTION_DIFFUSION_IO_WRITE_CMGUI",ERR,ERROR,*999) - myComputationalNodeNumber = ComputationalEnvironment_NodeNumberGet(err,error) + CALL ComputationEnvironment_NumberOfWorldNodesGet(computationEnvironment,numberOfWorldComputationNodes,err,error,*999) + CALL ComputationEnvironment_WorldNodeNumberGet(computationEnvironment,myWorldComputationNodeNumber,err,error,*999) EQUATIONS_SET => REGION%equations_sets%equations_sets(EQUATIONS_SET_GLOBAL_NUMBER)%ptr NULLIFY(SOURCE_FIELD) - COMPUTATIONAL_DOMAIN=>REGION%MESHES%MESHES(1) & + COMPUTATION_DOMAIN=>REGION%MESHES%MESHES(1) & & %ptr%DECOMPOSITIONS%DECOMPOSITIONS(1)%ptr%DOMAIN(1)%ptr - myComputationalNodeNumber = ComputationalEnvironment_NodeNumberGet(ERR,ERROR) - NumberOfDimensions = COMPUTATIONAL_DOMAIN%NUMBER_OF_DIMENSIONS - NumberOfNodes = COMPUTATIONAL_DOMAIN%TOPOLOGY%NODES%NUMBER_OF_NODES + NumberOfDimensions = COMPUTATION_DOMAIN%NUMBER_OF_DIMENSIONS + NumberOfNodes = COMPUTATION_DOMAIN%TOPOLOGY%NODES%NUMBER_OF_NODES NodesInMeshComponent = REGION%meshes%meshes(1)%ptr%topology(1)%ptr%nodes%numberOfNodes - NumberOfElements = COMPUTATIONAL_DOMAIN%TOPOLOGY%ELEMENTS%NUMBER_OF_ELEMENTS + NumberOfElements = COMPUTATION_DOMAIN%TOPOLOGY%ELEMENTS%NUMBER_OF_ELEMENTS NumberOfVariableComponents=REGION%equations_sets%equations_sets(EQUATIONS_SET_GLOBAL_NUMBER)%ptr%dependent%dependent_field% & & variables(1)%number_of_components NumberOfOutputFields=2 @@ -143,50 +144,50 @@ SUBROUTINE REACTION_DIFFUSION_IO_WRITE_CMGUI(REGION, EQUATIONS_SET_GLOBAL_NUMBER FILENAME="./output/"//TRIM(NAME)//".exnode" - OPEN(UNIT=myComputationalNodeNumber, FILE=CHAR(FILENAME),STATUS='unknown') + OPEN(UNIT=myWorldComputationNodeNumber, FILE=CHAR(FILENAME),STATUS='unknown') ! WRITING HEADER INFORMATION - WRITE(myComputationalNodeNumber,*) 'Group name: Cell' + WRITE(myWorldComputationNodeNumber,*) 'Group name: Cell' WRITE(INTG_STRING,'(I0)') NumberOfOutputFields - WRITE(myComputationalNodeNumber,*) '#Fields=',TRIM(INTG_STRING) + WRITE(myWorldComputationNodeNumber,*) '#Fields=',TRIM(INTG_STRING) ValueIndex=1 WRITE(INTG_STRING,'(I0)') NumberOfDimensions - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' 1) coordinates, coordinate, rectangular cartesian, #Components=',TRIM(INTG_STRING) DO I=1,NumberOfDimensions IF(I==1) THEN WRITE(INTG_STRING,'(I0)') ValueIndex - WRITE(myComputationalNodeNumber,*) ' x. Value index= ',TRIM(INTG_STRING),', #Derivatives= 0' + WRITE(myWorldComputationNodeNumber,*) ' x. Value index= ',TRIM(INTG_STRING),', #Derivatives= 0' ELSE IF(I==2) THEN WRITE(INTG_STRING,'(I0)') ValueIndex - WRITE(myComputationalNodeNumber,*) ' y. Value index= ',TRIM(INTG_STRING),', #Derivatives= 0' + WRITE(myWorldComputationNodeNumber,*) ' y. Value index= ',TRIM(INTG_STRING),', #Derivatives= 0' ELSE WRITE(INTG_STRING,'(I0)') ValueIndex - WRITE(myComputationalNodeNumber,*) ' z. Value index= ',TRIM(INTG_STRING),', #Derivatives= 0' + WRITE(myWorldComputationNodeNumber,*) ' z. Value index= ',TRIM(INTG_STRING),', #Derivatives= 0' END IF ValueIndex=ValueIndex+1 END DO WRITE(INTG_STRING,'(I0)') NumberOfVariableComponents - WRITE(myComputationalNodeNumber,*) ' 2) dependent, field, rectangular cartesian, #Components=', & + WRITE(myWorldComputationNodeNumber,*) ' 2) dependent, field, rectangular cartesian, #Components=', & & TRIM(INTG_STRING) DO I=1,NumberOfVariableComponents WRITE(INTG_STRING,'(I0)') ValueIndex WRITE(INTG_STRING2,'(I0)') I - WRITE(myComputationalNodeNumber,*) ' ',TRIM(INTG_STRING2),'. Value index= ',TRIM(INTG_STRING), & + WRITE(myWorldComputationNodeNumber,*) ' ',TRIM(INTG_STRING2),'. Value index= ',TRIM(INTG_STRING), & & ', #Derivatives= 0' ValueIndex=ValueIndex+1 END DO IF( OUTPUT_SOURCE ) THEN !Watch out that no numbering conflict occurs with Analytic: 4.) WRITE(INTG_STRING,'(I0)') NumberOfSourceComponents - WRITE(myComputationalNodeNumber,*) ' 3) source, field, rectangular cartesian, #Components=', & + WRITE(myWorldComputationNodeNumber,*) ' 3) source, field, rectangular cartesian, #Components=', & & TRIM(INTG_STRING) DO I=1,NumberOfSourceComponents WRITE(INTG_STRING,'(I0)') ValueIndex WRITE(INTG_STRING2,'(I0)') I - WRITE(myComputationalNodeNumber,*) ' ',TRIM(INTG_STRING2),'. Value index= ', & + WRITE(myWorldComputationNodeNumber,*) ' ',TRIM(INTG_STRING2),'. Value index= ', & & TRIM(INTG_STRING),', #Derivatives= 0' ValueIndex=ValueIndex+1 END DO @@ -194,7 +195,7 @@ SUBROUTINE REACTION_DIFFUSION_IO_WRITE_CMGUI(REGION, EQUATIONS_SET_GLOBAL_NUMBER !WRITE OUT NODE VALUES DO I = 1,NumberOfNodes - NODE_GLOBAL_NUMBER = COMPUTATIONAL_DOMAIN%TOPOLOGY%NODES%NODES(I)%GLOBAL_NUMBER + NODE_GLOBAL_NUMBER = COMPUTATION_DOMAIN%TOPOLOGY%NODES%NODES(I)%GLOBAL_NUMBER NodeXValue = REGION%equations_sets%equations_sets(EQUATIONS_SET_GLOBAL_NUMBER)%ptr%geometry%geometric_field%variables(1) & & %parameter_sets%parameter_sets(1)%ptr%parameters%cmiss%dataDP(I) IF(NumberOfDimensions==2 .OR. NumberOfDimensions==3) THEN @@ -208,17 +209,17 @@ SUBROUTINE REACTION_DIFFUSION_IO_WRITE_CMGUI(REGION, EQUATIONS_SET_GLOBAL_NUMBER NodeUValue=REGION%equations_sets%equations_sets(EQUATIONS_SET_GLOBAL_NUMBER)%ptr%dependent%dependent_field% & & variables(1)%parameter_sets%parameter_sets(1)%ptr%parameters%cmiss%dataDP(I) - WRITE(myComputationalNodeNumber,*) ' Node: ',NODE_GLOBAL_NUMBER - WRITE(myComputationalNodeNumber,'(" ", es25.16 )')NodeXValue + WRITE(myWorldComputationNodeNumber,*) ' Node: ',NODE_GLOBAL_NUMBER + WRITE(myWorldComputationNodeNumber,'(" ", es25.16 )')NodeXValue IF(NumberOfDimensions==2 .OR. NumberOfDimensions==3) THEN - WRITE(myComputationalNodeNumber,'(" ", es25.16 )')NodeYValue + WRITE(myWorldComputationNodeNumber,'(" ", es25.16 )')NodeYValue END IF IF(NumberOfDimensions==3) THEN - WRITE(myComputationalNodeNumber,'(" ", es25.16 )')NodeZValue + WRITE(myWorldComputationNodeNumber,'(" ", es25.16 )')NodeZValue END IF - WRITE(myComputationalNodeNumber,'(" ", es25.16 )')NodeUValue + WRITE(myWorldComputationNodeNumber,'(" ", es25.16 )')NodeUValue IF( (EQUATIONS_SET%SPECIFICATION(1)==EQUATIONS_SET_CLASSICAL_FIELD_CLASS) & & .AND.(EQUATIONS_SET%SPECIFICATION(2)==EQUATIONS_SET_REACTION_DIFFUSION_EQUATION_TYPE) & @@ -226,14 +227,14 @@ SUBROUTINE REACTION_DIFFUSION_IO_WRITE_CMGUI(REGION, EQUATIONS_SET_GLOBAL_NUMBER !source field IF( OUTPUT_SOURCE ) THEN !NodeSourceValue = SOURCE_INTERPOLATED_POINT(FIELD_U_VARIABLE_TYPE)%ptr%VALUES(1,1) - !WRITE(myComputationalNodeNumber,'(" ", es25.16 )')NodeSourceValue + !WRITE(myWorldComputationNodeNumber,'(" ", es25.16 )')NodeSourceValue END IF END IF END DO !nodes I - CLOSE(myComputationalNodeNumber) + CLOSE(myWorldComputationNodeNumber) !OUTPUT ELEMENTS IN CURRENT DOMAIN - MaxNodesPerElement=COMPUTATIONAL_DOMAIN%TOPOLOGY%ELEMENTS%ELEMENTS(1)%basis%number_of_element_parameters + MaxNodesPerElement=COMPUTATION_DOMAIN%TOPOLOGY%ELEMENTS%ELEMENTS(1)%basis%number_of_element_parameters BasisType = 1 IF(NumberOfDimensions==2) THEN IF(MaxNodesPerElement==4.OR.MaxNodesPerElement==9.OR.MaxNodesPerElement==16) THEN @@ -249,110 +250,110 @@ SUBROUTINE REACTION_DIFFUSION_IO_WRITE_CMGUI(REGION, EQUATIONS_SET_GLOBAL_NUMBER IF(exportExelem) THEN CALL WRITE_STRING(GENERAL_OUTPUT_TYPE,"Writing Elements...",ERR,ERROR,*999) FILENAME="./output/"//TRIM(NAME)//".exelem" - OPEN(UNIT=myComputationalNodeNumber, FILE=CHAR(FILENAME),STATUS='unknown') - WRITE(myComputationalNodeNumber,*) 'Group name: Cell' + OPEN(UNIT=myWorldComputationNodeNumber, FILE=CHAR(FILENAME),STATUS='unknown') + WRITE(myWorldComputationNodeNumber,*) 'Group name: Cell' IF (BasisType==1) THEN !lagrange basis in 1 and 2D WRITE(INTG_STRING,'(I0)') NumberOfDimensions - WRITE(myComputationalNodeNumber,*) 'Shape. Dimension= ',TRIM(INTG_STRING) - WRITE(myComputationalNodeNumber,*) '#Scale factor sets= 1' + WRITE(myWorldComputationNodeNumber,*) 'Shape. Dimension= ',TRIM(INTG_STRING) + WRITE(myWorldComputationNodeNumber,*) '#Scale factor sets= 1' IF(NumberOfDimensions==1) THEN WRITE(INTG_STRING,'(I0)') MaxNodesPerElement - WRITE(myComputationalNodeNumber,*) 'q.Lagrange, #Scale factors=',TRIM(INTG_STRING) + WRITE(myWorldComputationNodeNumber,*) 'q.Lagrange, #Scale factors=',TRIM(INTG_STRING) ELSE IF (NumberOfDimensions==2) THEN IF(MaxNodesPerElement==4) THEN WRITE(INTG_STRING,'(I0)') MaxNodesPerElement - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & 'l.Lagrange*l.Lagrange, #Scale factors=',TRIM(INTG_STRING) !linear lagrange ELSE IF(MaxNodesPerElement==9) THEN WRITE(INTG_STRING,'(I0)') MaxNodesPerElement - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & 'q.Lagrange*q.Lagrange, #Scale factors=',TRIM(INTG_STRING) !quadratic lagrange ELSE IF(MaxNodesPerElement==16) THEN WRITE(INTG_STRING,'(I0)') MaxNodesPerElement - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & 'c.Lagrange*c.Lagrange, #Scale factors=',TRIM(INTG_STRING) !cubic lagrange END IF ELSE !three dimensions IF(MaxNodesPerElement==8) THEN WRITE(INTG_STRING,'(I0)') MaxNodesPerElement - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & 'l.Lagrange*l.Lagrange*l.Lagrange, #Scale factors=',TRIM(INTG_STRING) ELSE IF(MaxNodesPerElement==27) THEN WRITE(INTG_STRING,'(I0)') MaxNodesPerElement - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & 'q.Lagrange*q.Lagrange*q.Lagrange, #Scale factors=',TRIM(INTG_STRING) ELSE IF(MaxNodesPerElement==64) THEN WRITE(INTG_STRING,'(I0)') MaxNodesPerElement - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & 'c.Lagrange*c.Lagrange*c.Lagrange, #Scale factors=',TRIM(INTG_STRING) END IF END IF ELSEIF(BasisType==2) THEN IF(NumberOfDimensions==2) THEN - WRITE(myComputationalNodeNumber,*) 'Shape. Dimension=', & + WRITE(myWorldComputationNodeNumber,*) 'Shape. Dimension=', & & NumberOfDimensions,', simplex(2)*simplex' IF(MaxNodesPerElement==3) THEN - WRITE(myComputationalNodeNumber,*) '#Scale factor sets= 1' + WRITE(myWorldComputationNodeNumber,*) '#Scale factor sets= 1' WRITE(INTG_STRING,'(I0)') MaxNodesPerElement - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' l.simplex(2)*l.simplex, #Scale factors= ', TRIM(INTG_STRING) ELSE IF(MaxNodesPerElement==6) THEN - WRITE(myComputationalNodeNumber,*) '#Scale factor sets= 1' + WRITE(myWorldComputationNodeNumber,*) '#Scale factor sets= 1' WRITE(INTG_STRING,'(I0)') MaxNodesPerElement - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' l.simplex(2)*l.simplex, #Scale factors= ', TRIM(INTG_STRING) ELSE IF (MaxNodesPerElement== 10 ) THEN - WRITE(myComputationalNodeNumber,*) '#Scale factor sets= 1' + WRITE(myWorldComputationNodeNumber,*) '#Scale factor sets= 1' WRITE(INTG_STRING,'(I0)') MaxNodesPerElement - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' q.simplex(2)*q.simplex, #Scale factors= ', TRIM(INTG_STRING) ENDIF ELSE IF(NumberOfDimensions==3) THEN WRITE(INTG_STRING2,'(I0)') NumberOfDimensions - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & 'Shape. Dimension=',TRIM(INTG_STRING2),', simplex(2;3)*simplex*simplex' IF(MaxNodesPerElement==4) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & '#Scale factor sets= 1' WRITE(INTG_STRING,'(I0)') MaxNodesPerElement - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' l.simplex(2;3)*l.simplex*l.simplex, #Scale factors= ', TRIM(INTG_STRING) ELSE IF (MaxNodesPerElement== 10 ) THEN - WRITE(myComputationalNodeNumber,*) '#Scale factor sets= 1' + WRITE(myWorldComputationNodeNumber,*) '#Scale factor sets= 1' WRITE(INTG_STRING,'(I0)') MaxNodesPerElement - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' q.simplex(2;3)*q.simplex*q.simplex, #Scale factors= ', TRIM(INTG_STRING) ELSE IF(MaxNodesPerElement==20) THEN - WRITE(myComputationalNodeNumber,*) '#Scale factor sets= 1' + WRITE(myWorldComputationNodeNumber,*) '#Scale factor sets= 1' WRITE(INTG_STRING,'(I0)') MaxNodesPerElement - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' q.simplex(2;3)*q.simplex*q.simplex, #Scale factors= ', TRIM(INTG_STRING) ENDIF ELSE - WRITE(myComputationalNodeNumber,*) '#Scale factor sets= 0' + WRITE(myWorldComputationNodeNumber,*) '#Scale factor sets= 0' END IF END IF WRITE(INTG_STRING,'(I0)') MaxNodesPerElement - WRITE(myComputationalNodeNumber,*) '#Nodes= ',TRIM(INTG_STRING) + WRITE(myWorldComputationNodeNumber,*) '#Nodes= ',TRIM(INTG_STRING) WRITE(INTG_STRING,'(I0)') NumberOfOutputFields - WRITE(myComputationalNodeNumber,*) '#Fields= ',TRIM(INTG_STRING) + WRITE(myWorldComputationNodeNumber,*) '#Fields= ',TRIM(INTG_STRING) NumberOfFieldComponents(1) = NumberOfDimensions NumberOfFieldComponents(2) = NumberOfVariableComponents NumberOfFieldComponents(3) = NumberOfSourceComponents DO I=1,NumberOfOutputFields IF(I==1)THEN WRITE(INTG_STRING,'(I0)') NumberOfDimensions - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' 1) coordinates, coordinate, rectangular cartesian, #Components= ',TRIM(INTG_STRING) ELSE IF(I==2) THEN WRITE(INTG_STRING,'(I0)') NumberOfVariableComponents - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' 2) dependent, field, rectangular cartesian, #Components= ',TRIM(INTG_STRING) ELSE IF(I==3) THEN WRITE(INTG_STRING,'(I0)') NumberOfSourceComponents - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' 3) source, field, rectangular cartesian, #Components= ',TRIM(INTG_STRING) END IF @@ -360,98 +361,98 @@ SUBROUTINE REACTION_DIFFUSION_IO_WRITE_CMGUI(REGION, EQUATIONS_SET_GLOBAL_NUMBER IF(NumberOfDimensions==1) THEN IF(I==1)THEN IF(J==1) THEN - WRITE(myComputationalNodeNumber,*)' x. l.Lagrange, no modify, standard node based.' + WRITE(myWorldComputationNodeNumber,*)' x. l.Lagrange, no modify, standard node based.' ELSE IF(J==2) THEN - WRITE(myComputationalNodeNumber,*)' y. l.Lagrange, no modify, standard node based.' + WRITE(myWorldComputationNodeNumber,*)' y. l.Lagrange, no modify, standard node based.' ELSE IF(J==3) THEN - WRITE(myComputationalNodeNumber,*)' z. l.Lagrange, no modify, standard node based.' + WRITE(myWorldComputationNodeNumber,*)' z. l.Lagrange, no modify, standard node based.' END IF ELSE - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' ',J,'. l.Lagrange, no modify, standard node based.' END IF ELSE IF(NumberOfDimensions==2) THEN IF(I==1)THEN IF(J==1) THEN IF(MaxNodesPerElement==4)THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' x. l.Lagrange*l.Lagrange, no modify, standard node based.' ELSE IF(MaxNodesPerElement==9) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' x. q.Lagrange*q.Lagrange, no modify, standard node based.' ELSE IF(MaxNodesPerElement==16) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' x. c.Lagrange*c.Lagrange, no modify, standard node based.' ELSE IF(MaxNodesPerElement==3) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' x. l.simplex(2)*l.simplex, no modify, standard node based.' ELSE IF(MaxNodesPerElement==6) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' x. q.simplex(2)*q.simplex, no modify, standard node based.' ELSE IF(MaxNodesPerElement==10) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' x. c.simplex(2)*c.simplex, no modify, standard node based.' END IF ELSE IF(J==2) THEN IF(MaxNodesPerElement==4) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' y. l.Lagrange*l.Lagrange, no modify, standard node based.' ELSE IF(MaxNodesPerElement==9) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' y. q.Lagrange*q.Lagrange, no modify, standard node based.' ELSE IF(MaxNodesPerElement==16) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' y. c.Lagrange*c.Lagrange, no modify, standard node based.' ELSE IF(MaxNodesPerElement==3) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' y. l.simplex(2)*l.simplex, no modify, standard node based.' ELSE IF(MaxNodesPerElement==6) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' y. q.simplex(2)*q.simplex, no modify, standard node based.' ELSE IF(MaxNodesPerElement==10) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' y. c.simplex(2)*c.simplex, no modify, standard node based.' END IF ELSE IF(J==3) THEN IF(MaxNodesPerElement==4) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' z. l.Lagrange*l.Lagrange, no modify, standard node based.' ELSE IF(MaxNodesPerElement==9) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' z. q.Lagrange*q.Lagrange, no modify, standard node based.' ELSE IF(MaxNodesPerElement==16) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' z. c.Lagrange*c.Lagrange, no modify, standard node based.' ELSE IF(MaxNodesPerElement==3) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' z. l.simplex(2)*l.simplex, no modify, standard node based.' ELSE IF(MaxNodesPerElement==6) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' z. q.simplex(2)*q.simplex, no modify, standard node based.' ELSE IF(MaxNodesPerElement==10) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' z. c.simplex(2)*c.simplex, no modify, standard node based.' END IF END IF ELSE IF(MaxNodesPerElement==4) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' ',J,'. l.Lagrange*l.Lagrange, no modify, standard node based.' ELSE IF(MaxNodesPerElement==9) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' ',J,'. q.Lagrange*q.Lagrange, no modify, standard node based.' ELSE IF(MaxNodesPerElement==16) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' ',J,'. c.Lagrange*c.Lagrange, no modify, standard node based.' ELSE IF(MaxNodesPerElement==3) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' ',J,'. l.simplex(2)*l.simplex, no modify, standard node based.' ELSE IF(MaxNodesPerElement==6) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' ',J,'. q.simplex(2)*q.simplex, no modify, standard node based.' ELSE IF(MaxNodesPerElement==10) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' ',J,'. c.simplex(2)*c.simplex, no modify, standard node based.' END IF END IF @@ -459,105 +460,105 @@ SUBROUTINE REACTION_DIFFUSION_IO_WRITE_CMGUI(REGION, EQUATIONS_SET_GLOBAL_NUMBER IF(I==1)THEN IF(J==1) THEN IF(MaxNodesPerElement==8) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' x. l.Lagrange*l.Lagrange*l.Lagrange, no modify, standard node based.' ELSE IF(MaxNodesPerElement==27) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' x. q.Lagrange*q.Lagrange*q.Lagrange, no modify, standard node based.' ELSE IF(MaxNodesPerElement==64) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' x. c.Lagrange*c.Lagrange*c.Lagrange, no modify, standard node based.' ELSE IF(MaxNodesPerElement==4) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' x. l.simplex(2;3)*l.simplex*l.simplex, no modify, standard node based.' ELSE IF(MaxNodesPerElement==10) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' x. q.simplex(2;3)*q.simplex*q.simplex, no modify, standard node based.' ELSE IF(MaxNodesPerElement==20) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' x. c.simplex(2;3)*c.simplex*c.simplex, no modify, standard node based.' END IF ELSE IF(J==2) THEN IF(MaxNodesPerElement==8) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' y. l.Lagrange*l.Lagrange*l.Lagrange, no modify, standard node based.' ELSE IF(MaxNodesPerElement==27) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' y. q.Lagrange*q.Lagrange*q.Lagrange, no modify, standard node based.' ELSE IF(MaxNodesPerElement==64) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' y. c.Lagrange*c.Lagrange*c.Lagrange, no modify, standard node based.' ELSE IF(MaxNodesPerElement==4) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' y. l.simplex(2;3)*l.simplex*l.simplex, no modify, standard node based.' ELSE IF(MaxNodesPerElement==10) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' y. q.simplex(2;3)*q.simplex*q.simplex, no modify, standard node based.' ELSE IF(MaxNodesPerElement==20) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' y. c.simplex(2;3)*c.simplex*c.simplex, no modify, standard node based.' END IF ELSE IF(J==3) THEN IF(MaxNodesPerElement==8) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' z. l.Lagrange*l.Lagrange*l.Lagrange, no modify, standard node based.' ELSE IF(MaxNodesPerElement==27) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' z. q.Lagrange*q.Lagrange*q.Lagrange, no modify, standard node based.' ELSE IF(MaxNodesPerElement==64) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' z. c.Lagrange*c.Lagrange*c.Lagrange, no modify, standard node based.' ELSE IF(MaxNodesPerElement==4) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' z. l.simplex(2;3)*l.simplex*l.simplex, no modify, standard node based.' ELSE IF(MaxNodesPerElement==10) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' z. q.simplex(2;3)*q.simplex*q.simplex, no modify, standard node based.' ELSE IF(MaxNodesPerElement==20) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' z. c.simplex(2;3)*c.simplex*c.simplex, no modify, standard node based.' END IF END IF ELSE IF(MaxNodesPerElement==8) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' ',J,'. l.Lagrange*l.Lagrange*l.Lagrange, no modify, standard node based.' ELSE IF(MaxNodesPerElement==27) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' ',J,'. q.Lagrange*q.Lagrange*q.Lagrange, no modify, standard node based.' ELSE IF(MaxNodesPerElement==64) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' ',J,'. c.Lagrange*c.Lagrange*c.Lagrange, no modify, standard node based.' ELSE IF(MaxNodesPerElement==4) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' ',J,'. l.simplex(2;3)*l.simplex*l.simplex, no modify, standard node based.' ELSE IF(MaxNodesPerElement==10) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' ',J,'. q.simplex(2;3)*q.simplex*q.simplex, no modify, standard node based.' ELSE IF(MaxNodesPerElement==20) THEN - WRITE(myComputationalNodeNumber,*) & + WRITE(myWorldComputationNodeNumber,*) & & ' ',J,'. c.simplex(2;3)*c.simplex*c.simplex, no modify, standard node based.' END IF END IF END IF WRITE(INTG_STRING,'(I0)') MaxNodesPerElement - WRITE(myComputationalNodeNumber,*) ' #Nodes= ',TRIM(INTG_STRING) + WRITE(myWorldComputationNodeNumber,*) ' #Nodes= ',TRIM(INTG_STRING) DO K = 1,MaxNodesPerElement WRITE(INTG_STRING,'(I0)') K - WRITE(myComputationalNodeNumber,*) ' ',TRIM(INTG_STRING),'. #Values=1' - WRITE(myComputationalNodeNumber,*) ' Value indices: 1' - WRITE(myComputationalNodeNumber,*) ' Scale factor indices: ',TRIM(INTG_STRING) + WRITE(myWorldComputationNodeNumber,*) ' ',TRIM(INTG_STRING),'. #Values=1' + WRITE(myWorldComputationNodeNumber,*) ' Value indices: 1' + WRITE(myWorldComputationNodeNumber,*) ' Scale factor indices: ',TRIM(INTG_STRING) END DO END DO !J loop END DO !I loop IF(.NOT.ALLOCATED(ElementNodes)) ALLOCATE(ElementNodes(NumberOfElements,MaxNodesPerElement)) IF(.NOT.ALLOCATED(ElementNodesScales)) ALLOCATE(ElementNodesScales(NumberOfElements,MaxNodesPerElement)) DO I=1,NumberOfElements - ELEMENT_GLOBAL_NUMBER=COMPUTATIONAL_DOMAIN%DECOMPOSITION%TOPOLOGY%ELEMENTS%ELEMENTS(K)%GLOBAL_NUMBER + ELEMENT_GLOBAL_NUMBER=COMPUTATION_DOMAIN%DECOMPOSITION%TOPOLOGY%ELEMENTS%ELEMENTS(K)%GLOBAL_NUMBER DO J=1,MaxNodesPerElement - NODE_LOCAL_NUMBER=COMPUTATIONAL_DOMAIN%TOPOLOGY%ELEMENTS%ELEMENTS(I)%ELEMENT_NODES(J) - NODE_GLOBAL_NUMBER=COMPUTATIONAL_DOMAIN%MAPPINGS%NODES%LOCAL_TO_GLOBAL_MAP(NODE_LOCAL_NUMBER) + NODE_LOCAL_NUMBER=COMPUTATION_DOMAIN%TOPOLOGY%ELEMENTS%ELEMENTS(I)%ELEMENT_NODES(J) + NODE_GLOBAL_NUMBER=COMPUTATION_DOMAIN%MAPPINGS%NODES%LOCAL_TO_GLOBAL_MAP(NODE_LOCAL_NUMBER) ElementNodes(I,J)=NODE_GLOBAL_NUMBER ElementNodesScales(I,J)=1.0000000000000000E+00 END DO @@ -565,14 +566,14 @@ SUBROUTINE REACTION_DIFFUSION_IO_WRITE_CMGUI(REGION, EQUATIONS_SET_GLOBAL_NUMBER DO K=1,NumberOfElements - ELEMENT_GLOBAL_NUMBER=COMPUTATIONAL_DOMAIN%DECOMPOSITION%TOPOLOGY%ELEMENTS%ELEMENTS(K)%GLOBAL_NUMBER + ELEMENT_GLOBAL_NUMBER=COMPUTATION_DOMAIN%DECOMPOSITION%TOPOLOGY%ELEMENTS%ELEMENTS(K)%GLOBAL_NUMBER IF (BasisType==1) THEN WRITE(INTG_STRING,'(I0)') ELEMENT_GLOBAL_NUMBER - WRITE(myComputationalNodeNumber,*) 'Element: ', TRIM(INTG_STRING),' 0 0' - WRITE(myComputationalNodeNumber,*) ' Nodes:' - WRITE(myComputationalNodeNumber,*) ' ', ElementNodes(K,1:MaxNodesPerElement) - WRITE(myComputationalNodeNumber,*) ' Scale factors:' - WRITE(myComputationalNodeNumber,*) ' ',ElementNodesScales(K,1:MaxNodesPerElement) + WRITE(myWorldComputationNodeNumber,*) 'Element: ', TRIM(INTG_STRING),' 0 0' + WRITE(myWorldComputationNodeNumber,*) ' Nodes:' + WRITE(myWorldComputationNodeNumber,*) ' ', ElementNodes(K,1:MaxNodesPerElement) + WRITE(myWorldComputationNodeNumber,*) ' Scale factors:' + WRITE(myWorldComputationNodeNumber,*) ' ',ElementNodesScales(K,1:MaxNodesPerElement) ELSEIF(BasisType==2) THEN IF(.NOT.ALLOCATED(SimplexOutputHelp)) ALLOCATE(SimplexOutputHelp(MaxNodesPerElement)) @@ -587,14 +588,14 @@ SUBROUTINE REACTION_DIFFUSION_IO_WRITE_CMGUI(REGION, EQUATIONS_SET_GLOBAL_NUMBER SimplexOutputHelp(4)=ElementNodes(K,3) END IF WRITE(INTG_STRING,'(I0)') ELEMENT_GLOBAL_NUMBER - WRITE(myComputationalNodeNumber,*) 'Element: ', TRIM(INTG_STRING),' 0 0' - WRITE(myComputationalNodeNumber,*) ' Nodes:' - WRITE(myComputationalNodeNumber,*) ' ', SimplexOutputHelp - WRITE(myComputationalNodeNumber,*) ' Scale factors:' - WRITE(myComputationalNodeNumber,*) ' ',ElementNodesScales(K,1:MaxNodesPerElement) + WRITE(myWorldComputationNodeNumber,*) 'Element: ', TRIM(INTG_STRING),' 0 0' + WRITE(myWorldComputationNodeNumber,*) ' Nodes:' + WRITE(myWorldComputationNodeNumber,*) ' ', SimplexOutputHelp + WRITE(myWorldComputationNodeNumber,*) ' Scale factors:' + WRITE(myWorldComputationNodeNumber,*) ' ',ElementNodesScales(K,1:MaxNodesPerElement) END IF ENDDO - CLOSE(myComputationalNodeNumber) + CLOSE(myWorldComputationNodeNumber) END IF !exportExelem flag check EXITS("REACTION_DIFFUSION_IO_WRITE_CMGUI") diff --git a/src/reaction_diffusion_equation_routines.f90 b/src/reaction_diffusion_equation_routines.f90 index 2f2af265..0f30f1a5 100755 --- a/src/reaction_diffusion_equation_routines.f90 +++ b/src/reaction_diffusion_equation_routines.f90 @@ -26,7 +26,7 @@ !> Auckland, the University of Oxford and King's College, London. !> All Rights Reserved. !> -!> Contributor(s): Chris Bradley +!> Contributor(s): Vijay Rajagopal,Chris Bradley !> !> Alternatively, the contents of this file may be used under the terms of !> either the GNU General Public License Version 2 or later (the "GPL"), or @@ -48,7 +48,8 @@ MODULE REACTION_DIFFUSION_EQUATION_ROUTINES USE BasisRoutines USE BasisAccessRoutines USE BOUNDARY_CONDITIONS_ROUTINES - USE ComputationEnvironment + USE ComputationRoutines + USE ComputationAccessRoutines USE Constants USE CONTROL_LOOP_ROUTINES USE ControlLoopAccessRoutines @@ -1513,8 +1514,8 @@ SUBROUTINE REACTION_DIFFUSION_POST_SOLVE_OUTPUT_DATA(CONTROL_LOOP,SOLVER,err,err REAL(DP) :: CURRENT_TIME,TIME_INCREMENT INTEGER(INTG) :: EQUATIONS_SET_IDX,CURRENT_LOOP_ITERATION,OUTPUT_FREQUENCY,MAX_DIGITS - INTEGER(INTG) :: myComputationalNodeNumber - LOGICAL :: exportExelem + INTEGER(INTG) :: myWorldComputationNodeNumber + LOGICAL :: exportExelem CHARACTER(30) :: FILE CHARACTER(30) :: OUTPUT_FILE @@ -1543,7 +1544,8 @@ SUBROUTINE REACTION_DIFFUSION_POST_SOLVE_OUTPUT_DATA(CONTROL_LOOP,SOLVER,err,err EQUATIONS_SET=>SOLVER_MAPPING%EQUATIONS_SETS(equations_set_idx)%PTR CURRENT_LOOP_ITERATION=CONTROL_LOOP%TIME_LOOP%ITERATION_NUMBER OUTPUT_FREQUENCY=CONTROL_LOOP%TIME_LOOP%OUTPUT_NUMBER - myComputationalNodeNumber = ComputationalEnvironment_NodeNumberGet(err,error) + CALL ComputationEnvironment_WorldNodeNumberGet(computationEnvironment,myWorldComputationNodeNumber, & + & err,error,*999) MAX_DIGITS=FLOOR(LOG10((CONTROL_LOOP%TIME_LOOP%STOP_TIME-CONTROL_LOOP%TIME_LOOP%START_TIME)/ & & CONTROL_LOOP%TIME_LOOP%TIME_INCREMENT))+1 IF(OUTPUT_FREQUENCY>0) THEN @@ -1556,7 +1558,7 @@ SUBROUTINE REACTION_DIFFUSION_POST_SOLVE_OUTPUT_DATA(CONTROL_LOOP,SOLVER,err,err WRITE(TEMP_FMT,'(A2,A38,A20,A2)') "(", '"TIME_STEP_SPEC_1.part",I2.2,".",',FMT,")" FMT = TRIM(TEMP_FMT) WRITE(OUTPUT_FILE,FMT) & - & myComputationalNodeNumber,CURRENT_LOOP_ITERATION + & myWorldComputationNodeNumber,CURRENT_LOOP_ITERATION ELSE WRITE(TEMP_FMT,'("I",I0,".",I0)') MAX_DIGITS,MAX_DIGITS !200 FORMAT @@ -1564,7 +1566,7 @@ SUBROUTINE REACTION_DIFFUSION_POST_SOLVE_OUTPUT_DATA(CONTROL_LOOP,SOLVER,err,err WRITE(TEMP_FMT,'(A2,A38,A20,A2)') "(", '"TIME_STEP_SPEC_",I0,".part",I2.2,".",',FMT,")" FMT = TRIM(TEMP_FMT) WRITE(OUTPUT_FILE,FMT) & - & equations_set_idx, myComputationalNodeNumber,CURRENT_LOOP_ITERATION + & equations_set_idx, myWorldComputationNodeNumber,CURRENT_LOOP_ITERATION ENDIF WRITE(*,*) OUTPUT_FILE FILE=TRIM(OUTPUT_FILE) diff --git a/src/region_routines.f90 b/src/region_routines.f90 index 4be8544e..6885bf03 100755 --- a/src/region_routines.f90 +++ b/src/region_routines.f90 @@ -278,7 +278,7 @@ END SUBROUTINE REGION_CREATE_START !================================================================================================================================ ! - !>Destroys a region given by USER_NUMBER and all sub-regions under it. \todo create destroy by pointer method. \see OPENCMISS::Iron::cmfe_RegionDestroy + !>Destroys a region given by USER_NUMBER and all sub-regions under it. \todo create destroy by pointer method. \see OpenCMISS::Iron::cmfe_Region_Destroy RECURSIVE SUBROUTINE REGION_DESTROY_NUMBER(USER_NUMBER,ERR,ERROR,*) !Argument variables diff --git a/src/solver_mapping_routines.f90 b/src/solver_mapping_routines.f90 index 4e680eae..ff4de2d8 100755 --- a/src/solver_mapping_routines.f90 +++ b/src/solver_mapping_routines.f90 @@ -26,7 +26,7 @@ !> Auckland, the University of Oxford and King's College, London. !> All Rights Reserved. !> -!> Contributor(s): +!> Contributor(s): Chris Bradley !> !> Alternatively, the contents of this file may be used under the terms of !> either the GNU General Public License Version 2 or later (the "GPL"), or @@ -46,7 +46,8 @@ MODULE SOLVER_MAPPING_ROUTINES USE BaseRoutines USE BOUNDARY_CONDITIONS_ROUTINES - USE ComputationEnvironment + USE ComputationRoutines + USE ComputationAccessRoutines USE DistributedMatrixVector USE EquationsAccessRoutines USE DOMAIN_MAPPINGS @@ -139,7 +140,7 @@ SUBROUTINE SOLVER_MAPPING_CALCULATE(SOLVER_MAPPING,ERR,ERROR,*) & numberOfLinearMatrices,NUMBER_OF_LOCAL_SOLVER_DOFS,NUMBER_OF_LOCAL_SOLVER_ROWS,NUMBER_OF_RANK_COLS, & & NUMBER_OF_RANK_ROWS,NUMBER_OF_VARIABLES,rank,rank_idx,row_idx,ROW_LIST_ITEM(4),ROW_RANK,solver_global_dof, & & solver_matrix_idx,solver_variable_idx,TOTAL_NUMBER_OF_LOCAL_SOLVER_DOFS,variable_idx,variableIdx, & - & VARIABLE_LIST_ITEM(3),variable_position_idx,variable_type, & + & VARIABLE_LIST_ITEM(3),variable_position_idx,variable_type,numberOfWorldComputationNodes, & & numberRowEquationsRows,numberColEquationsCols,rowEquationsRowIdx,colEquationsColIdx, & & globalDofCouplingNumber,equationsRow,eqnLocalDof,numberOfEquationsRHSVariables,rhsVariableType,equationsSetIdx INTEGER(INTG) :: temp_offset, solver_variable_idx_temp @@ -319,7 +320,8 @@ SUBROUTINE SOLVER_MAPPING_CALCULATE(SOLVER_MAPPING,ERR,ERROR,*) ! for each rank. ! !Calculate the row mappings. - myrank=computationalEnvironment%myComputationalNodeNumber + CALL ComputationEnvironment_NumberOfWorldNodesGet(computationEnvironment,numberOfWorldComputationNodes,err,error,*999) + CALL ComputationEnvironment_WorldNodeNumberGet(computationEnvironment,myrank,err,error,*999) NUMBER_OF_GLOBAL_SOLVER_ROWS=0 NUMBER_OF_LOCAL_SOLVER_ROWS=0 !Add in the rows from any equations sets that have been added to the solver equations @@ -327,10 +329,10 @@ SUBROUTINE SOLVER_MAPPING_CALCULATE(SOLVER_MAPPING,ERR,ERROR,*) ! !Allocate and initialise the rank lists. ALLOCATE(RANK_GLOBAL_ROWS_LISTS(SOLVER_MAPPING%NUMBER_OF_EQUATIONS_SETS+SOLVER_MAPPING% & - & NUMBER_OF_INTERFACE_CONDITIONS,0:computationalEnvironment%numberOfComputationalNodes-1),STAT=ERR) + & NUMBER_OF_INTERFACE_CONDITIONS,0:numberOfWorldComputationNodes-1),STAT=ERR) IF(ERR/=0) CALL FlagError("Could not allocate rank global rows lists.",ERR,ERROR,*999) CALL SolverDofCouplings_Initialise(rowCouplings,err,error,*999) - DO rank=0,computationalEnvironment%numberOfComputationalNodes-1 + DO rank=0,numberOfWorldComputationNodes-1 equations_idx=0 DO equations_set_idx=1,SOLVER_MAPPING%NUMBER_OF_EQUATIONS_SETS equations_idx=equations_idx+1 @@ -346,8 +348,7 @@ SUBROUTINE SOLVER_MAPPING_CALCULATE(SOLVER_MAPPING,ERR,ERROR,*) CALL LIST_CREATE_START(RANK_GLOBAL_ROWS_LISTS(equations_idx,rank)%PTR,ERR,ERROR,*999) CALL LIST_DATA_TYPE_SET(RANK_GLOBAL_ROWS_LISTS(equations_idx,rank)%PTR,LIST_INTG_TYPE,ERR,ERROR,*999) CALL LIST_INITIAL_SIZE_SET(RANK_GLOBAL_ROWS_LISTS(equations_idx,rank)%PTR,INT(vectorMapping% & - & numberOfGlobalRows/computationalEnvironment%numberOfComputationalNodes,INTG), & - & ERR,ERROR,*999) + & numberOfGlobalRows/numberOfWorldComputationNodes,INTG),ERR,ERROR,*999) CALL LIST_DATA_DIMENSION_SET(RANK_GLOBAL_ROWS_LISTS(equations_idx,rank)%PTR,4,ERR,ERROR,*999) CALL LIST_KEY_DIMENSION_SET(RANK_GLOBAL_ROWS_LISTS(equations_idx,rank)%PTR,1,ERR,ERROR,*999) CALL LIST_CREATE_FINISH(RANK_GLOBAL_ROWS_LISTS(equations_idx,rank)%PTR,ERR,ERROR,*999) @@ -369,8 +370,7 @@ SUBROUTINE SOLVER_MAPPING_CALCULATE(SOLVER_MAPPING,ERR,ERROR,*) CALL LIST_CREATE_START(RANK_GLOBAL_ROWS_LISTS(equations_idx,rank)%PTR,ERR,ERROR,*999) CALL LIST_DATA_TYPE_SET(RANK_GLOBAL_ROWS_LISTS(equations_idx,rank)%PTR,LIST_INTG_TYPE,ERR,ERROR,*999) CALL LIST_INITIAL_SIZE_SET(RANK_GLOBAL_ROWS_LISTS(equations_idx,rank)%PTR, & - & INT(INTERFACE_MAPPING%NUMBER_OF_GLOBAL_COLUMNS/computationalEnvironment%numberOfComputationalNodes, & - & INTG),ERR,ERROR,*999) + & INT(INTERFACE_MAPPING%NUMBER_OF_GLOBAL_COLUMNS/numberOfWorldComputationNodes,INTG),ERR,ERROR,*999) CALL LIST_DATA_DIMENSION_SET(RANK_GLOBAL_ROWS_LISTS(equations_idx,rank)%PTR,4,ERR,ERROR,*999) CALL LIST_KEY_DIMENSION_SET(RANK_GLOBAL_ROWS_LISTS(equations_idx,rank)%PTR,1,ERR,ERROR,*999) CALL LIST_CREATE_FINISH(RANK_GLOBAL_ROWS_LISTS(equations_idx,rank)%PTR,ERR,ERROR,*999) @@ -676,8 +676,7 @@ SUBROUTINE SOLVER_MAPPING_CALCULATE(SOLVER_MAPPING,ERR,ERROR,*) ALLOCATE(SOLVER_MAPPING%ROW_DOFS_MAPPING,STAT=ERR) IF(ERR/=0) CALL FlagError("Could not allocate solver mapping row dofs mapping.",ERR,ERROR,*999) !!TODO: what is the real number of domains for a solver??? - CALL DOMAIN_MAPPINGS_MAPPING_INITIALISE(SOLVER_MAPPING%ROW_DOFS_MAPPING,computationalEnvironment% & - & numberOfComputationalNodes,ERR,ERROR,*999) + CALL DOMAIN_MAPPINGS_MAPPING_INITIALISE(SOLVER_MAPPING%ROW_DOFS_MAPPING,numberOfWorldComputationNodes,ERR,ERROR,*999) ROW_DOMAIN_MAPPING=>SOLVER_MAPPING%ROW_DOFS_MAPPING ALLOCATE(ROW_DOMAIN_MAPPING%GLOBAL_TO_LOCAL_MAP(NUMBER_OF_GLOBAL_SOLVER_ROWS),STAT=ERR) IF(ERR/=0) CALL FlagError("Could not allocate row dofs mapping global to local map.",ERR,ERROR,*999) @@ -788,7 +787,7 @@ SUBROUTINE SOLVER_MAPPING_CALCULATE(SOLVER_MAPPING,ERR,ERROR,*) IF(err/=0) CALL FlagError("Could not allocate dummy DOF coupling values.",err,error,*999) dummyDofCoupling%numberOfDofs=1 !Loop over the ranks to ensure that the lowest ranks have the lowest numbered solver variables - DO rank=0,computationalEnvironment%numberOfComputationalNodes-1 + DO rank=0,numberOfWorldComputationNodes-1 NUMBER_OF_LOCAL_SOLVER_ROWS=0 !Calculate the solver row <-> equations row & interface row mappings. @@ -1226,9 +1225,9 @@ SUBROUTINE SOLVER_MAPPING_CALCULATE(SOLVER_MAPPING,ERR,ERROR,*) !dof_type is 1 for domain local DOFs and 2 for ghost DOFs ALLOCATE(RANK_GLOBAL_COLS_LISTS(2,SOLVER_MAPPING%NUMBER_OF_EQUATIONS_SETS+SOLVER_MAPPING% & & NUMBER_OF_INTERFACE_CONDITIONS,SOLVER_MAPPING%VARIABLES_LIST(solver_matrix_idx)%NUMBER_OF_VARIABLES, & - & 0:computationalEnvironment%numberOfComputationalNodes-1),STAT=ERR) + & 0:numberOfWorldComputationNodes-1),STAT=ERR) IF(ERR/=0) CALL FlagError("Could not allocate rank global columns lists.",ERR,ERROR,*999) - DO rank=0,computationalEnvironment%numberOfComputationalNodes-1 + DO rank=0,numberOfWorldComputationNodes-1 DO solver_variable_idx=1,SOLVER_MAPPING%VARIABLES_LIST(solver_matrix_idx)%NUMBER_OF_VARIABLES DO equations_idx=1,SOLVER_MAPPING%NUMBER_OF_EQUATIONS_SETS+SOLVER_MAPPING%NUMBER_OF_INTERFACE_CONDITIONS DO dof_type=1,2 @@ -1870,13 +1869,13 @@ SUBROUTINE SOLVER_MAPPING_CALCULATE(SOLVER_MAPPING,ERR,ERROR,*) IF(ERR/=0) CALL FlagError("Could not allocate solver col to equations sets map column dofs mapping.",ERR,ERROR,*999) !!TODO: what is the real number of domains for a solver??? CALL DOMAIN_MAPPINGS_MAPPING_INITIALISE(SOLVER_MAPPING%SOLVER_COL_TO_EQUATIONS_COLS_MAP(solver_matrix_idx)% & - & COLUMN_DOFS_MAPPING,computationalEnvironment%numberOfComputationalNodes,ERR,ERROR,*999) + & COLUMN_DOFS_MAPPING,numberOfWorldComputationNodes,ERR,ERROR,*999) COL_DOMAIN_MAPPING=>SOLVER_MAPPING%SOLVER_COL_TO_EQUATIONS_COLS_MAP(solver_matrix_idx)%COLUMN_DOFS_MAPPING ALLOCATE(COL_DOMAIN_MAPPING%GLOBAL_TO_LOCAL_MAP(NUMBER_OF_GLOBAL_SOLVER_DOFS),STAT=ERR) IF(ERR/=0) CALL FlagError("Could not allocate column dofs mapping global to local.",ERR,ERROR,*999) COL_DOMAIN_MAPPING%NUMBER_OF_GLOBAL=NUMBER_OF_GLOBAL_SOLVER_DOFS ALLOCATE(VARIABLE_RANK_PROCESSED(SOLVER_MAPPING%VARIABLES_LIST(solver_matrix_idx)%NUMBER_OF_VARIABLES, & - & 0:computationalEnvironment%numberOfComputationalNodes-1),STAT=ERR) + & 0:numberOfWorldComputationNodes-1),STAT=ERR) IF(ERR/=0) CALL FlagError("Could not allocate variable rank processed.",ERR,ERROR,*999) VARIABLE_RANK_PROCESSED=.FALSE. !Calculate the column mappings @@ -2260,7 +2259,7 @@ SUBROUTINE SOLVER_MAPPING_CALCULATE(SOLVER_MAPPING,ERR,ERROR,*) DOF_MAP(solver_variable_idx)%PTR=0 ENDDO !solver_variable_idx - ALLOCATE(solver_local_dof(0:computationalEnvironment%numberOfComputationalNodes-1),STAT=ERR) + ALLOCATE(solver_local_dof(0:numberOfWorldComputationNodes-1),STAT=ERR) IF(ERR/=0) CALL FlagError("Could not allocate solver local dof array.",ERR,ERROR,*999) ! @@ -2271,7 +2270,7 @@ SUBROUTINE SOLVER_MAPPING_CALCULATE(SOLVER_MAPPING,ERR,ERROR,*) solver_global_dof=0 solver_local_dof=0 DO dof_type=1,2 - DO rank=0,computationalEnvironment%numberOfComputationalNodes-1 + DO rank=0,numberOfWorldComputationNodes-1 DO solver_variable_idx=1,SOLVER_MAPPING%VARIABLES_LIST(solver_matrix_idx)%NUMBER_OF_VARIABLES diff --git a/src/solver_routines.f90 b/src/solver_routines.f90 index 82924d39..25ef4961 100644 --- a/src/solver_routines.f90 +++ b/src/solver_routines.f90 @@ -26,7 +26,7 @@ !> Auckland, the University of Oxford and King's College, London. !> All Rights Reserved. !> -!> Contributor(s): +!> Contributor(s): Chris Bradley !> !> Alternatively, the contents of this file may be used under the terms of !> either the GNU General Public License Version 2 or later (the "GPL"), or @@ -52,7 +52,8 @@ MODULE SOLVER_ROUTINES USE CMISS_CELLML USE CmissPetsc USE CmissPetscTypes - USE ComputationEnvironment + USE ComputationRoutines + USE ComputationAccessRoutines USE Constants USE DistributedMatrixVector USE EquationsAccessRoutines @@ -9733,6 +9734,7 @@ SUBROUTINE SOLVER_LINEAR_DIRECT_CREATE_FINISH(LINEAR_DIRECT_SOLVER,ERR,ERROR,*) INTEGER(INTG), INTENT(OUT) :: ERR !LINEAR_SOLVER%SOLVER IF(ASSOCIATED(SOLVER)) THEN + CALL ComputationEnvironment_WorldCommunicatorGet(computationEnvironment,worldCommunicator,err,error,*999) SELECT CASE(LINEAR_DIRECT_SOLVER%DIRECT_SOLVER_TYPE) CASE(SOLVER_DIRECT_LU) IF(ASSOCIATED(SOLVER%LINKING_SOLVER)) THEN @@ -9809,7 +9812,7 @@ SUBROUTINE SOLVER_LINEAR_DIRECT_CREATE_FINISH(LINEAR_DIRECT_SOLVER,ERR,ERROR,*) !Nothing else to do CASE(SOLVER_MUMPS_LIBRARY,SOLVER_SUPERLU_LIBRARY,SOLVER_PASTIX_LIBRARY,SOLVER_LAPACK_LIBRARY) !Set up solver through PETSc - CALL Petsc_KSPCreate(computationalEnvironment%mpiCommunicator,LINEAR_DIRECT_SOLVER%KSP,ERR,ERROR,*999) + CALL Petsc_KSPCreate(worldCommunicator,LINEAR_DIRECT_SOLVER%KSP,ERR,ERROR,*999) !Set any further KSP options from the command line options CALL Petsc_KSPSetFromOptions(LINEAR_DIRECT_SOLVER%KSP,ERR,ERROR,*999) @@ -10925,6 +10928,7 @@ SUBROUTINE SOLVER_LINEAR_ITERATIVE_CREATE_FINISH(LINEAR_ITERATIVE_SOLVER,ERR,ERR INTEGER(INTG), INTENT(OUT) :: ERR !LINEAR_SOLVER%SOLVER IF(ASSOCIATED(SOLVER)) THEN + CALL ComputationEnvironment_WorldCommunicatorGet(computationEnvironment,worldCommunicator,err,error,*999) !Should really check iterative types here and then the solver library but as they are all PETSc for now hold off. SELECT CASE(LINEAR_ITERATIVE_SOLVER%SOLVER_LIBRARY) CASE(SOLVER_CMISS_LIBRARY) @@ -11062,7 +11067,7 @@ SUBROUTINE SOLVER_LINEAR_ITERATIVE_CREATE_FINISH(LINEAR_ITERATIVE_SOLVER,ERR,ERR CALL FlagError("Solver linking solve is not associated.",ERR,ERROR,*999) ENDIF ELSE - CALL Petsc_KSPCreate(computationalEnvironment%mpiCommunicator,LINEAR_ITERATIVE_SOLVER%KSP,ERR,ERROR,*999) + CALL Petsc_KSPCreate(worldCommunicator,LINEAR_ITERATIVE_SOLVER%KSP,ERR,ERROR,*999) ENDIF !Set the iterative solver type SELECT CASE(LINEAR_ITERATIVE_SOLVER%ITERATIVE_SOLVER_TYPE) @@ -15719,7 +15724,7 @@ SUBROUTINE Solver_QuasiNewtonLinesearchCreateFinish(LINESEARCH_SOLVER,ERR,ERROR, EXTERNAL :: Problem_SolverResidualEvaluatePetsc EXTERNAL :: Problem_SolverConvergenceTestPetsc EXTERNAL :: Problem_SolverNonlinearMonitorPETSC - INTEGER(INTG) :: equations_matrix_idx,equations_set_idx,interface_condition_idx,interface_matrix_idx + INTEGER(INTG) :: equations_matrix_idx,equations_set_idx,interface_condition_idx,interface_matrix_idx,worldCommunicator TYPE(DistributedMatrixType), POINTER :: JACOBIAN_MATRIX TYPE(DistributedVectorType), POINTER :: RESIDUAL_VECTOR TYPE(EquationsType), POINTER :: equations @@ -15756,6 +15761,7 @@ SUBROUTINE Solver_QuasiNewtonLinesearchCreateFinish(LINESEARCH_SOLVER,ERR,ERROR, IF(ASSOCIATED(NONLINEAR_SOLVER)) THEN SOLVER=>NONLINEAR_SOLVER%SOLVER IF(ASSOCIATED(SOLVER)) THEN + CALL ComputationEnvironment_WorldCommunicatorGet(computationEnvironment,worldCommunicator,err,error,*999) SOLVER_EQUATIONS=>SOLVER%SOLVER_EQUATIONS IF(ASSOCIATED(SOLVER_EQUATIONS)) THEN SELECT CASE(LINESEARCH_SOLVER%SOLVER_LIBRARY) @@ -15897,7 +15903,7 @@ SUBROUTINE Solver_QuasiNewtonLinesearchCreateFinish(LINESEARCH_SOLVER,ERR,ERROR, ENDIF ENDDO !interface_idx !Create the PETSc SNES solver - CALL Petsc_SnesCreate(computationalEnvironment%mpiCommunicator,LINESEARCH_SOLVER%snes,ERR,ERROR,*999) + CALL Petsc_SnesCreate(worldCommunicator,LINESEARCH_SOLVER%snes,ERR,ERROR,*999) !Set the nonlinear solver type to be a Quasi-Newton line search solver CALL Petsc_SnesSetType(LINESEARCH_SOLVER%snes,PETSC_SNESQN,ERR,ERROR,*999) !Following routines don't work for petsc version < 3.5. @@ -16960,7 +16966,7 @@ SUBROUTINE Solver_QuasiNewtonTrustRegionCreateFinish(TRUSTREGION_SOLVER,ERR,ERRO TYPE(VARYING_STRING), INTENT(OUT) :: ERROR !NONLINEAR_SOLVER%SOLVER IF(ASSOCIATED(SOLVER)) THEN + CALL ComputationEnvironment_WorldCommunicatorGet(computationEnvironment,worldCommunicator,err,error,*999) SOLVER_EQUATIONS=>SOLVER%SOLVER_EQUATIONS IF(ASSOCIATED(SOLVER_EQUATIONS)) THEN SELECT CASE(TRUSTREGION_SOLVER%SOLVER_LIBRARY) @@ -17079,7 +17086,7 @@ SUBROUTINE Solver_QuasiNewtonTrustRegionCreateFinish(TRUSTREGION_SOLVER,ERR,ERRO END SELECT CALL SOLVER_MATRICES_CREATE_FINISH(SOLVER_MATRICES,ERR,ERROR,*999) !Create the PETSc SNES solver - CALL Petsc_SnesCreate(computationalEnvironment%mpiCommunicator,TRUSTREGION_SOLVER%snes,ERR,ERROR,*999) + CALL Petsc_SnesCreate(worldCommunicator,TRUSTREGION_SOLVER%snes,ERR,ERROR,*999) !Set the nonlinear solver type to be a Quasi-Newton trust region solver CALL Petsc_SnesSetType(TRUSTREGION_SOLVER%snes,PETSC_SNESNEWTONTR,ERR,ERROR,*999) !Set the nonlinear function @@ -18493,7 +18500,7 @@ SUBROUTINE SOLVER_NEWTON_LINESEARCH_CREATE_FINISH(LINESEARCH_SOLVER,ERR,ERROR,*) EXTERNAL :: Problem_SolverResidualEvaluatePetsc EXTERNAL :: Problem_SolverConvergenceTestPetsc EXTERNAL :: Problem_SolverNonlinearMonitorPetsc - INTEGER(INTG) :: equations_matrix_idx,equations_set_idx,interface_condition_idx,interface_matrix_idx + INTEGER(INTG) :: equations_matrix_idx,equations_set_idx,interface_condition_idx,interface_matrix_idx,worldCommunicator TYPE(DistributedMatrixType), POINTER :: JACOBIAN_MATRIX TYPE(DistributedVectorType), POINTER :: RESIDUAL_VECTOR TYPE(EquationsType), POINTER :: equations @@ -18530,6 +18537,7 @@ SUBROUTINE SOLVER_NEWTON_LINESEARCH_CREATE_FINISH(LINESEARCH_SOLVER,ERR,ERROR,*) IF(ASSOCIATED(NONLINEAR_SOLVER)) THEN SOLVER=>NONLINEAR_SOLVER%SOLVER IF(ASSOCIATED(SOLVER)) THEN + CALL ComputationEnvironment_WorldCommunicatorGet(computationEnvironment,worldCommunicator,err,error,*999) SOLVER_EQUATIONS=>SOLVER%SOLVER_EQUATIONS IF(ASSOCIATED(SOLVER_EQUATIONS)) THEN SELECT CASE(LINESEARCH_SOLVER%SOLVER_LIBRARY) @@ -18671,7 +18679,7 @@ SUBROUTINE SOLVER_NEWTON_LINESEARCH_CREATE_FINISH(LINESEARCH_SOLVER,ERR,ERROR,*) ENDIF ENDDO !interface_idx !Create the PETSc SNES solver - CALL Petsc_SnesCreate(computationalEnvironment%mpiCommunicator,LINESEARCH_SOLVER%snes,ERR,ERROR,*999) + CALL Petsc_SnesCreate(worldCommunicator,LINESEARCH_SOLVER%snes,ERR,ERROR,*999) !Set the nonlinear solver type to be a Newton line search solver CALL Petsc_SnesSetType(LINESEARCH_SOLVER%snes,PETSC_SNESNEWTONLS,ERR,ERROR,*999) @@ -19717,7 +19725,7 @@ SUBROUTINE SOLVER_NEWTON_TRUSTREGION_CREATE_FINISH(TRUSTREGION_SOLVER,ERR,ERROR, TYPE(VARYING_STRING), INTENT(OUT) :: ERROR !NONLINEAR_SOLVER%SOLVER IF(ASSOCIATED(SOLVER)) THEN + CALL ComputationEnvironment_WorldCommunicatorGet(computationEnvironment,worldCommunicator,err,error,*999) SOLVER_EQUATIONS=>SOLVER%SOLVER_EQUATIONS IF(ASSOCIATED(SOLVER_EQUATIONS)) THEN SELECT CASE(TRUSTREGION_SOLVER%SOLVER_LIBRARY) @@ -19836,7 +19845,7 @@ SUBROUTINE SOLVER_NEWTON_TRUSTREGION_CREATE_FINISH(TRUSTREGION_SOLVER,ERR,ERROR, END SELECT CALL SOLVER_MATRICES_CREATE_FINISH(SOLVER_MATRICES,ERR,ERROR,*999) !Create the PETSc SNES solver - CALL Petsc_SnesCreate(computationalEnvironment%mpiCommunicator,TRUSTREGION_SOLVER%snes,ERR,ERROR,*999) + CALL Petsc_SnesCreate(worldCommunicator,TRUSTREGION_SOLVER%snes,ERR,ERROR,*999) !Set the nonlinear solver type to be a Newton trust region solver CALL Petsc_SnesSetType(TRUSTREGION_SOLVER%snes,PETSC_SNESNEWTONTR,ERR,ERROR,*999) !Set the solver as the SNES application context diff --git a/src/stree_equation_routines.f90 b/src/stree_equation_routines.f90 index dc2f62fa..6bc64d65 100644 --- a/src/stree_equation_routines.f90 +++ b/src/stree_equation_routines.f90 @@ -54,7 +54,8 @@ MODULE Stree_EQUATION_ROUTINES USE Constants USE CONTROL_LOOP_ROUTINES USE CmissMPI - USE ComputationEnvironment + USE ComputationRoutines + USE ComputationAccessRoutines USE COORDINATE_ROUTINES USE DistributedMatrixVector USE DOMAIN_MAPPINGS diff --git a/src/types.f90 b/src/types.f90 index f40a2c50..92c9052e 100644 --- a/src/types.f90 +++ b/src/types.f90 @@ -26,7 +26,7 @@ !> Auckland, the University of Oxford and King's College, London. !> All Rights Reserved. !> -!> Contributor(s): +!> Contributor(s): Chris Bradley !> !> Alternatively, the contents of this file may be used under the terms of !> either the GNU General Public License Version 2 or later (the "GPL"), or @@ -71,6 +71,7 @@ MODULE Types USE CmissPetscTypes, ONLY : PetscISColoringType,PetscKspType,PetscMatType,PetscMatColoringType,PetscMatFDColoringType, & & PetscPCType,PetscSnesType,PetscSnesLineSearchType,PetscTaoType,PetscVecType + USE ComputationAccessRoutines USE Constants USE Kinds USE ISO_C_BINDING @@ -1094,8 +1095,8 @@ MODULE Types INTEGER(INTG) :: numberOfDataPoints !>NOTE: ",A)') "It doesn't make any sense to use more than 2 computational nodes for this example?" +! IF (NumberOfComputationNodes .gt. 2) +! WRITE(*,'(">>NOTE: ",A)') "It doesn't make any sense to use more than 2 computation nodes for this example?" ! STOP ! ENDIF @@ -248,7 +250,7 @@ PROGRAM CELLMLINTEGRATIONFORTRANEXAMPLE CALL cmfe_Decomposition_CreateStart(DecompositionUserNumber,Mesh,Decomposition,Err) !Set the decomposition to be a general decomposition with the specified number of domains CALL cmfe_Decomposition_TypeSet(Decomposition,CMFE_DECOMPOSITION_CALCULATED_TYPE,Err) - CALL cmfe_Decomposition_NumberOfDomainsSet(Decomposition,NumberOfComputationalNodes,Err) + CALL cmfe_Decomposition_NumberOfDomainsSet(Decomposition,NumberOfComputationNodes,Err) !Finish the decomposition CALL cmfe_Decomposition_CreateFinish(Decomposition,Err) @@ -384,7 +386,7 @@ PROGRAM CELLMLINTEGRATIONFORTRANEXAMPLE !Set the Stimulus all nodes? DO node_idx=1,NUMBER_OF_ELEMENTS+1 CALL cmfe_Decomposition_NodeDomainGet(Decomposition,node_idx,1,NodeDomain,Err) - IF(NodeDomain==ComputationalNodeNumber) THEN + IF(NodeDomain==ComputationNodeNumber) THEN CALL cmfe_Field_ParameterSetUpdateNode(CellMLParametersField,CMFE_FIELD_U_VARIABLE_TYPE,CMFE_FIELD_VALUES_SET_TYPE,1,1, & & node_idx,stimcomponent,STIM_VALUE,Err) ENDIF @@ -396,7 +398,7 @@ PROGRAM CELLMLINTEGRATIONFORTRANEXAMPLE ! !Loop over the nodes ! DO node_idx=1,LastNodeNumber ! CALL cmfe_Decomposition_NodeDomainGet(Decomposition,node_idx,1,NodeDomain,Err) -! IF(NodeDomain==ComputationalNodeNumber) THEN +! IF(NodeDomain==ComputationNodeNumber) THEN ! CALL cmfe_Field_ParameterSetGetNode(GeometricField,CMFE_FIELD_U_VARIABLE_TYPE,CMFE_FIELD_VALUES_SET_TYPE,1,1,node_idx,1, & ! & X,Err) ! CALL cmfe_Field_ParameterSetGetNode(GeometricField,CMFE_FIELD_U_VARIABLE_TYPE,CMFE_FIELD_VALUES_SET_TYPE,1,1,node_idx,2, & @@ -488,11 +490,11 @@ PROGRAM CELLMLINTEGRATIONFORTRANEXAMPLE CALL cmfe_BoundaryConditions_Initialise(BoundaryConditions,Err) CALL cmfe_SolverEquations_BoundaryConditionsCreateStart(SolverEquations,BoundaryConditions,Err) !Set the first node to 0.0 and the last node to 1.0 - IF(FirstNodeDomain==ComputationalNodeNumber) THEN + IF(FirstNodeDomain==ComputationNodeNumber) THEN !CALL cmfe_BoundaryConditions_SetNode(BoundaryConditions,DependentField,CMFE_FIELD_U_VARIABLE_TYPE,1,1,FirstNodeNumber,1, & ! & CMFE_BOUNDARY_CONDITION_FIXED,0.0_CMISSRP,Err) ENDIF - IF(LastNodeDomain==ComputationalNodeNumber) THEN + IF(LastNodeDomain==ComputationNodeNumber) THEN !CALL cmfe_BoundaryConditions_SetNode(BoundaryConditions,DependentField,CMFE_FIELD_U_VARIABLE_TYPE,1,1,LastNodeNumber,1, & ! & CMFE_BOUNDARY_CONDITION_FIXED,1.0_CMISSRP,Err) ENDIF @@ -506,7 +508,7 @@ PROGRAM CELLMLINTEGRATIONFORTRANEXAMPLE !Set the Stimulus at node 1 DO node_idx=1,NUMBER_OF_ELEMENTS+1 CALL cmfe_Decomposition_NodeDomainGet(Decomposition,node_idx,1,NodeDomain,Err) - IF(NodeDomain==ComputationalNodeNumber) THEN + IF(NodeDomain==ComputationNodeNumber) THEN CALL cmfe_Field_ParameterSetUpdateNode(CellMLParametersField,CMFE_FIELD_U_VARIABLE_TYPE,CMFE_FIELD_VALUES_SET_TYPE,1,1, & & node_idx,stimcomponent,0.0_CMISSRP,Err) ENDIF diff --git a/tests/CellML/Monodomain.f90 b/tests/CellML/Monodomain.f90 index a89b230c..9e2fecc2 100644 --- a/tests/CellML/Monodomain.f90 +++ b/tests/CellML/Monodomain.f90 @@ -122,6 +122,7 @@ PROGRAM MONODOMAINEXAMPLE TYPE(cmfe_BoundaryConditionsType) :: BoundaryConditions TYPE(cmfe_CellMLType) :: CellML TYPE(cmfe_CellMLEquationsType) :: CellMLEquations + TYPE(cmfe_ComputationEnvironmentType) :: ComputationEnvironment TYPE(cmfe_ControlLoopType) :: ControlLoop TYPE(cmfe_CoordinateSystemType) :: CoordinateSystem,WorldCoordinateSystem TYPE(cmfe_DecompositionType) :: Decomposition @@ -139,7 +140,7 @@ PROGRAM MONODOMAINEXAMPLE !Generic CMISS variables - INTEGER(CMISSIntg) :: NumberOfComputationalNodes,ComputationalNodeNumber + INTEGER(CMISSIntg) :: NumberOfComputationNodes,ComputationNodeNumber INTEGER(CMISSIntg) :: EquationsSetIndex,CellMLIndex INTEGER(CMISSIntg) :: FirstNodeNumber,LastNodeNumber INTEGER(CMISSIntg) :: FirstNodeDomain,LastNodeDomain,NodeDomain @@ -180,9 +181,10 @@ PROGRAM MONODOMAINEXAMPLE !Trap errors CALL cmfe_ErrorHandlingModeSet(CMFE_ERRORS_TRAP_ERROR,Err) - !Get the computational nodes information - CALL cmfe_ComputationalNumberOfNodesGet(NumberOfComputationalNodes,Err) - CALL cmfe_ComputationalNodeNumberGet(ComputationalNodeNumber,Err) + !Get the computation nodes information + CALL cmfe_ComputationEnvironment_Initialise(ComputationEnvironment,Err) + CALL cmfe_ComputationEnvironment_NumberOfWorldNodesGet(ComputationEnvironment,NumberOfComputationNodes,Err) + CALL cmfe_ComputationEnvironment_WorldNodeNumberGet(ComputationEnvironment,ComputationNodeNumber,Err) !CALL cmfe_OutputSetOn("Monodomain",Err) @@ -250,7 +252,7 @@ PROGRAM MONODOMAINEXAMPLE CALL cmfe_Decomposition_CreateStart(DecompositionUserNumber,Mesh,Decomposition,Err) !Set the decomposition to be a general decomposition with the specified number of domains CALL cmfe_Decomposition_TypeSet(Decomposition,CMFE_DECOMPOSITION_CALCULATED_TYPE,Err) - CALL cmfe_Decomposition_NumberOfDomainsSet(Decomposition,NumberOfComputationalNodes,Err) + CALL cmfe_Decomposition_NumberOfDomainsSet(Decomposition,NumberOfComputationNodes,Err) !Finish the decomposition CALL cmfe_Decomposition_CreateFinish(Decomposition,Err) @@ -397,7 +399,7 @@ PROGRAM MONODOMAINEXAMPLE !Set the Stimulus at half the bottom nodes DO node_idx=1,NUMBER_OF_ELEMENTS/2 CALL cmfe_Decomposition_NodeDomainGet(Decomposition,node_idx,1,NodeDomain,Err) - IF(NodeDomain==ComputationalNodeNumber) THEN + IF(NodeDomain==ComputationNodeNumber) THEN CALL cmfe_Field_ParameterSetUpdateNode(CellMLParametersField,CMFE_FIELD_U_VARIABLE_TYPE,CMFE_FIELD_VALUES_SET_TYPE,1,1, & & node_idx,stimcomponent,STIM_VALUE,Err) ENDIF @@ -409,7 +411,7 @@ PROGRAM MONODOMAINEXAMPLE !Loop over the nodes DO node_idx=1,LastNodeNumber CALL cmfe_Decomposition_NodeDomainGet(Decomposition,node_idx,1,NodeDomain,Err) - IF(NodeDomain==ComputationalNodeNumber) THEN + IF(NodeDomain==ComputationNodeNumber) THEN CALL cmfe_Field_ParameterSetGetNode(GeometricField,CMFE_FIELD_U_VARIABLE_TYPE,CMFE_FIELD_VALUES_SET_TYPE,1,1,node_idx,1, & & X,Err) CALL cmfe_Field_ParameterSetGetNode(GeometricField,CMFE_FIELD_U_VARIABLE_TYPE,CMFE_FIELD_VALUES_SET_TYPE,1,1,node_idx,2, & @@ -500,11 +502,11 @@ PROGRAM MONODOMAINEXAMPLE CALL cmfe_BoundaryConditions_Initialise(BoundaryConditions,Err) CALL cmfe_SolverEquations_BoundaryConditionsCreateStart(SolverEquations,BoundaryConditions,Err) !Set the first node to 0.0 and the last node to 1.0 - IF(FirstNodeDomain==ComputationalNodeNumber) THEN + IF(FirstNodeDomain==ComputationNodeNumber) THEN !CALL cmfe_BoundaryConditions_SetNode(BoundaryConditions,DependentField,CMFE_FIELD_U_VARIABLE_TYPE,1,1,FirstNodeNumber,1, & ! & CMFE_BOUNDARY_CONDITION_FIXED,0.0_CMISSRP,Err) ENDIF - IF(LastNodeDomain==ComputationalNodeNumber) THEN + IF(LastNodeDomain==ComputationNodeNumber) THEN !CALL cmfe_BoundaryConditions_SetNode(BoundaryConditions,DependentField,CMFE_FIELD_U_VARIABLE_TYPE,1,1,LastNodeNumber,1, & ! & CMFE_BOUNDARY_CONDITION_FIXED,1.0_CMISSRP,Err) ENDIF @@ -518,7 +520,7 @@ PROGRAM MONODOMAINEXAMPLE !Set the Stimulus at node 1 DO node_idx=1,NUMBER_OF_ELEMENTS/2 CALL cmfe_Decomposition_NodeDomainGet(Decomposition,node_idx,1,NodeDomain,Err) - IF(NodeDomain==ComputationalNodeNumber) THEN + IF(NodeDomain==ComputationNodeNumber) THEN CALL cmfe_Field_ParameterSetUpdateNode(CellMLParametersField,CMFE_FIELD_U_VARIABLE_TYPE,CMFE_FIELD_VALUES_SET_TYPE,1,1, & & node_idx,stimcomponent,0.0_CMISSRP,Err) ENDIF diff --git a/tests/ClassicalField/AnalyticHelmholtz.f90 b/tests/ClassicalField/AnalyticHelmholtz.f90 index 34eb7922..ae6c2363 100644 --- a/tests/ClassicalField/AnalyticHelmholtz.f90 +++ b/tests/ClassicalField/AnalyticHelmholtz.f90 @@ -281,7 +281,7 @@ SUBROUTINE ANALYTICHELMHOLTZ_GENERIC(NUMBER_GLOBAL_X_ELEMENTS,NUMBER_GLOBAL_Y_EL NUMBER_OF_DOMAINS=1 - !Broadcast the number of elements in the X & Y directions and the number of partitions to the other computational nodes + !Broadcast the number of elements in the X & Y directions and the number of partitions to the other computation nodes CALL MPI_BCAST(NUMBER_GLOBAL_X_ELEMENTS,1,MPI_INTEGER,0,MPI_COMM_WORLD,MPI_IERROR) CALL MPI_BCAST(NUMBER_GLOBAL_Y_ELEMENTS,1,MPI_INTEGER,0,MPI_COMM_WORLD,MPI_IERROR) CALL MPI_BCAST(NUMBER_GLOBAL_Z_ELEMENTS,1,MPI_INTEGER,0,MPI_COMM_WORLD,MPI_IERROR) diff --git a/tests/ClassicalField/AnalyticLaplace.f90 b/tests/ClassicalField/AnalyticLaplace.f90 index 0deec29a..91522b43 100644 --- a/tests/ClassicalField/AnalyticLaplace.f90 +++ b/tests/ClassicalField/AnalyticLaplace.f90 @@ -381,7 +381,7 @@ SUBROUTINE ANALYTICLAPLACE_GENERIC(NUMBER_GLOBAL_X_ELEMENTS,NUMBER_GLOBAL_Y_ELEM NUMBER_OF_DOMAINS=1 - !Broadcast the number of elements in the X & Y directions and the number of partitions to the other computational nodes + !Broadcast the number of elements in the X & Y directions and the number of partitions to the other computation nodes CALL MPI_BCAST(NUMBER_GLOBAL_X_ELEMENTS,1,MPI_INTEGER,0,MPI_COMM_WORLD,MPI_IERROR) CALL MPI_BCAST(NUMBER_GLOBAL_Y_ELEMENTS,1,MPI_INTEGER,0,MPI_COMM_WORLD,MPI_IERROR) CALL MPI_BCAST(NUMBER_GLOBAL_Z_ELEMENTS,1,MPI_INTEGER,0,MPI_COMM_WORLD,MPI_IERROR) diff --git a/tests/ClassicalField/AnalyticNonlinearPoisson.f90 b/tests/ClassicalField/AnalyticNonlinearPoisson.f90 index 3e96d89f..dcb7e8eb 100644 --- a/tests/ClassicalField/AnalyticNonlinearPoisson.f90 +++ b/tests/ClassicalField/AnalyticNonlinearPoisson.f90 @@ -97,6 +97,7 @@ PROGRAM NONLINEARPOISSONEXAMPLE !CMISS variables TYPE(cmfe_BasisType) :: Basis + TYPE(cmfe_ComputationEnvironmentType) :: ComputationEnvironment TYPE(cmfe_CoordinateSystemType) :: CoordinateSystem,WorldCoordinateSystem TYPE(cmfe_DecompositionType) :: Decomposition TYPE(cmfe_EquationsType) :: Equations @@ -116,7 +117,7 @@ PROGRAM NONLINEARPOISSONEXAMPLE INTEGER(CMISSIntg) :: EquationsSetIndex INTEGER(CMISSIntg) :: Err - INTEGER(CMISSIntg) :: NumberOfComputationalNodes,ComputationalNodeNumber + INTEGER(CMISSIntg) :: NumberOfComputationNodes,ComputationNodeNumber #ifdef WIN32 !Quickwin type @@ -179,9 +180,10 @@ PROGRAM NONLINEARPOISSONEXAMPLE !Output to a file CALL cmfe_OutputSetOn("NonlinearPoisson",Err) - !Get the computational nodes information - CALL cmfe_ComputationalNumberOfNodesGet(NumberOfComputationalNodes,Err) - CALL cmfe_ComputationalNodeNumberGet(ComputationalNodeNumber,Err) + !Get the computation nodes information + CALL cmfe_ComputationEnvironment_Initialise(ComputationEnvironment,Err) + CALL cmfe_ComputationEnvironment_NumberOfWorldNodesGet(ComputationEnvironment,NumberOfComputationNodes,Err) + CALL cmfe_ComputationEnvironment_WorldNodeNumberGet(ComputationEnvironment,ComputationNodeNumber,Err) !Start the creation of a new RC coordinate system CALL cmfe_CoordinateSystem_Initialise(CoordinateSystem,Err) @@ -268,7 +270,7 @@ PROGRAM NONLINEARPOISSONEXAMPLE CALL cmfe_Decomposition_CreateStart(DecompositionUserNumber,Mesh,Decomposition,Err) !Set the decomposition to be a general decomposition with the specified number of domains CALL cmfe_Decomposition_TypeSet(Decomposition,CMFE_DECOMPOSITION_CALCULATED_TYPE,Err) - CALL cmfe_Decomposition_NumberOfDomainsSet(Decomposition,NumberOfComputationalNodes,Err) + CALL cmfe_Decomposition_NumberOfDomainsSet(Decomposition,NumberOfComputationNodes,Err) !Finish the decomposition CALL cmfe_Decomposition_CreateFinish(Decomposition,Err) diff --git a/tests/ClassicalField/Laplace.f90 b/tests/ClassicalField/Laplace.f90 index c4ff1b80..592e8fcd 100644 --- a/tests/ClassicalField/Laplace.f90 +++ b/tests/ClassicalField/Laplace.f90 @@ -97,6 +97,7 @@ PROGRAM LAPLACEEXAMPLE TYPE(cmfe_BasisType) :: Basis TYPE(cmfe_BoundaryConditionsType) :: BoundaryConditions + TYPE(cmfe_ComputationEnvironmentType) :: ComputationEnvironment TYPE(cmfe_CoordinateSystemType) :: CoordinateSystem,WorldCoordinateSystem TYPE(cmfe_DecompositionType) :: Decomposition TYPE(cmfe_EquationsType) :: Equations @@ -119,7 +120,7 @@ PROGRAM LAPLACEEXAMPLE !Generic CMISS variables - INTEGER(CMISSIntg) :: NumberOfComputationalNodes,ComputationalNodeNumber + INTEGER(CMISSIntg) :: NumberOfComputationNodes,ComputationNodeNumber INTEGER(CMISSIntg) :: EquationsSetIndex INTEGER(CMISSIntg) :: FirstNodeNumber,LastNodeNumber INTEGER(CMISSIntg) :: FirstNodeDomain,LastNodeDomain @@ -183,9 +184,10 @@ PROGRAM LAPLACEEXAMPLE CALL cmfe_OutputSetOn(Filename,Err) - !Get the computational nodes information - CALL cmfe_ComputationalNumberOfNodesGet(NumberOfComputationalNodes,Err) - CALL cmfe_ComputationalNodeNumberGet(ComputationalNodeNumber,Err) + !Get the computation nodes information + CALL cmfe_ComputationEnvironment_Initialise(ComputationEnvironment,Err) + CALL cmfe_ComputationEnvironment_NumberOfWorldNodesGet(ComputationEnvironment,NumberOfComputationNodes,Err) + CALL cmfe_ComputationEnvironment_WorldNodeNumberGet(ComputationEnvironment,ComputationNodeNumber,Err) !Start the creation of a new RC coordinate system CALL cmfe_CoordinateSystem_Initialise(CoordinateSystem,Err) @@ -273,7 +275,7 @@ PROGRAM LAPLACEEXAMPLE CALL cmfe_Decomposition_CreateStart(DecompositionUserNumber,Mesh,Decomposition,Err) !Set the decomposition to be a general decomposition with the specified number of domains CALL cmfe_Decomposition_TypeSet(Decomposition,CMFE_DECOMPOSITION_CALCULATED_TYPE,Err) - CALL cmfe_Decomposition_NumberOfDomainsSet(Decomposition,NumberOfComputationalNodes,Err) + CALL cmfe_Decomposition_NumberOfDomainsSet(Decomposition,NumberOfComputationNodes,Err) !Finish the decomposition CALL cmfe_Decomposition_CreateFinish(Decomposition,Err) @@ -394,11 +396,11 @@ PROGRAM LAPLACEEXAMPLE CALL cmfe_Nodes_NumberOfNodesGet(Nodes,LastNodeNumber,Err) CALL cmfe_Decomposition_NodeDomainGet(Decomposition,FirstNodeNumber,1,FirstNodeDomain,Err) CALL cmfe_Decomposition_NodeDomainGet(Decomposition,LastNodeNumber,1,LastNodeDomain,Err) - IF(FirstNodeDomain==ComputationalNodeNumber) THEN + IF(FirstNodeDomain==ComputationNodeNumber) THEN CALL cmfe_BoundaryConditions_SetNode(BoundaryConditions,DependentField,CMFE_FIELD_U_VARIABLE_TYPE,1,1,FirstNodeNumber,1, & & CMFE_BOUNDARY_CONDITION_FIXED,0.0_CMISSRP,Err) ENDIF - IF(LastNodeDomain==ComputationalNodeNumber) THEN + IF(LastNodeDomain==ComputationNodeNumber) THEN CALL cmfe_BoundaryConditions_SetNode(BoundaryConditions,DependentField,CMFE_FIELD_U_VARIABLE_TYPE,1,1,LastNodeNumber,1, & & CMFE_BOUNDARY_CONDITION_FIXED,1.0_CMISSRP,Err) ENDIF diff --git a/tests/FieldML_IO/cube.f90 b/tests/FieldML_IO/cube.f90 index 22dbdfbc..0f33fe39 100644 --- a/tests/FieldML_IO/cube.f90 +++ b/tests/FieldML_IO/cube.f90 @@ -117,19 +117,20 @@ SUBROUTINE ReadCube(worldRegion, inputFilename, region, mesh, geometricField, & TYPE(cmfe_FieldType), INTENT(OUT) :: geometricField ! local variables TYPE(cmfe_BasisType) :: basis + TYPE(cmfe_ComputationEnvironmentType) :: ComputationEnvironment TYPE(cmfe_CoordinateSystemType) :: coordinateSystem TYPE(cmfe_DecompositionType) :: decomposition TYPE(cmfe_NodesType) :: nodes TYPE(cmfe_FieldMLIOType) :: fieldmlInfo - INTEGER(CMISSIntg) :: numberOfComputationalNodes, computationalNodeNumber + INTEGER(CMISSIntg) :: numberOfComputationNodes, computationNodeNumber INTEGER(CMISSIntg) :: err err = 0 - ! Get computational nodes information - - CALL cmfe_ComputationalNumberOfNodesGet(numberOfComputationalNodes, err) - CALL cmfe_ComputationalNodeNumberGet(computationalNodeNumber, err) + ! Get computation nodes information + CALL cmfe_ComputationEnvironment_Initialise(ComputationEnvironment,Err) + CALL cmfe_ComputationEnvironment_NumberOfWorldNodesGet(ComputationEnvironment,NumberOfComputationNodes,Err) + CALL cmfe_ComputationEnvironment_WorldNodeNumberGet(ComputationEnvironment,ComputationNodeNumber,Err) ! Initialise FieldML and parse input file @@ -178,7 +179,7 @@ SUBROUTINE ReadCube(worldRegion, inputFilename, region, mesh, geometricField, & CALL cmfe_Decomposition_Initialise(decomposition, err) CALL cmfe_Decomposition_CreateStart(AUTO_USER_NUMBER(), mesh, decomposition, err) CALL cmfe_Decomposition_TypeSet(decomposition, CMFE_DECOMPOSITION_ALL_TYPE, err) - CALL cmfe_Decomposition_NumberOfDomainsSet(decomposition, numberOfComputationalNodes, err) + CALL cmfe_Decomposition_NumberOfDomainsSet(decomposition, numberOfComputationNodes, err) CALL cmfe_Decomposition_CreateFinish(decomposition, err) ! Define Geometric Field diff --git a/tests/FieldML_IO/fieldml_io.f90 b/tests/FieldML_IO/fieldml_io.f90 index 7fbfcad2..74c1662d 100644 --- a/tests/FieldML_IO/fieldml_io.f90 +++ b/tests/FieldML_IO/fieldml_io.f90 @@ -48,12 +48,13 @@ PROGRAM IRON_TEST_FIELDML_IO IMPLICIT NONE ! CMISS variables + TYPE(cmfe_ComputationEnvironmentType) :: ComputationEnvironment TYPE(cmfe_CoordinateSystemType) :: worldCoordinateSystem TYPE(cmfe_RegionType) :: worldRegion ! Generic CMISS variables - INTEGER(CMISSIntg) :: numberOfComputationalNodes, computationalNodeNumber + INTEGER(CMISSIntg) :: numberOfComputationNodes, computationNodeNumber INTEGER(CMISSIntg) :: err CALL INITIALISE_TESTS() @@ -63,10 +64,10 @@ PROGRAM IRON_TEST_FIELDML_IO CALL cmfe_Initialise(worldCoordinateSystem, worldRegion, err) CALL cmfe_ErrorHandlingModeSet(CMFE_ERRORS_TRAP_ERROR, err) - ! Get computational nodes information - - CALL cmfe_ComputationalNumberOfNodesGet(numberOfComputationalNodes, err) - CALL cmfe_ComputationalNodeNumberGet(computationalNodeNumber, err) + ! Get computation nodes information + CALL cmfe_ComputationEnvironment_Initialise(ComputationEnvironment,Err) + CALL cmfe_ComputationEnvironment_NumberOfWorldNodesGet(ComputationEnvironment,NumberOfComputationNodes,Err) + CALL cmfe_ComputationEnvironment_WorldNodeNumberGet(ComputationEnvironment,ComputationNodeNumber,Err) CALL TestFieldMLIOCube(worldRegion) CALL TestFieldMLArguments(worldRegion) diff --git a/tests/FiniteElasticity/Cantilever.f90 b/tests/FiniteElasticity/Cantilever.f90 index 22184bc8..36cec906 100644 --- a/tests/FiniteElasticity/Cantilever.f90 +++ b/tests/FiniteElasticity/Cantilever.f90 @@ -98,7 +98,7 @@ PROGRAM CANTILEVEREXAMPLE !Program variables INTEGER(CMISSIntg) :: NumberGlobalXElements,NumberGlobalYElements,NumberGlobalZElements INTEGER(CMISSIntg) :: EquationsSetIndex - INTEGER(CMISSIntg) :: NumberOfComputationalNodes,NumberOfDomains,ComputationalNodeNumber + INTEGER(CMISSIntg) :: NumberOfComputationNodes,NumberOfDomains,ComputationNodeNumber INTEGER(CMISSIntg) :: NodeNumber,NodeDomain,node_idx,component_idx,deriv_idx INTEGER(CMISSIntg),ALLOCATABLE :: LeftSurfaceNodes(:) INTEGER(CMISSIntg) :: LeftNormalXi @@ -108,6 +108,7 @@ PROGRAM CANTILEVEREXAMPLE !CMISS variables TYPE(cmfe_BasisType) :: DisplacementBasis,PressureBasis TYPE(cmfe_BoundaryConditionsType) :: BoundaryConditions + TYPE(cmfe_ComputationEnvironmentType) :: ComputationEnvironment TYPE(cmfe_CoordinateSystemType) :: CoordinateSystem, WorldCoordinateSystem TYPE(cmfe_MeshType) :: Mesh TYPE(cmfe_DecompositionType) :: Decomposition @@ -208,11 +209,12 @@ PROGRAM CANTILEVEREXAMPLE WRITE(*,'("Elements: ", 3 i3)') NumberGlobalXElements,NumberGlobalYElements,NumberGlobalZElements WRITE(*,'("Scaling type: ", i3)') ScalingType - !Get the number of computational nodes and this computational node number - CALL cmfe_ComputationalNumberOfNodesGet(NumberOfComputationalNodes,Err) - CALL cmfe_ComputationalNodeNumberGet(ComputationalNodeNumber,Err) + !Get the number of computation nodes and this computation node number + CALL cmfe_ComputationEnvironment_Initialise(ComputationEnvironment,Err) + CALL cmfe_ComputationEnvironment_NumberOfWorldNodesGet(ComputationEnvironment,NumberOfComputationNodes,Err) + CALL cmfe_ComputationEnvironment_WorldNodeNumberGet(ComputationEnvironment,ComputationNodeNumber,Err) - NumberOfDomains=NumberOfComputationalNodes + NumberOfDomains=NumberOfComputationNodes !Create a 3D rectangular cartesian coordinate system CALL cmfe_CoordinateSystem_Initialise(CoordinateSystem,Err) @@ -430,7 +432,7 @@ PROGRAM CANTILEVEREXAMPLE DO node_idx=1,SIZE(LeftSurfaceNodes,1) NodeNumber=LeftSurfaceNodes(node_idx) CALL cmfe_Decomposition_NodeDomainGet(Decomposition,NodeNumber,1,NodeDomain,Err) - IF(NodeDomain==ComputationalNodeNumber) THEN + IF(NodeDomain==ComputationNodeNumber) THEN DO component_idx=1,3 CALL cmfe_BoundaryConditions_AddNode(BoundaryConditions,DependentField,CMFE_FIELD_U_VARIABLE_TYPE,1,1,NodeNumber, & & component_idx,CMFE_BOUNDARY_CONDITION_FIXED,0.0_CMISSRP,Err) diff --git a/tests/FiniteElasticity/SimpleShear.f90 b/tests/FiniteElasticity/SimpleShear.f90 index fecaa8d4..dbb7159b 100644 --- a/tests/FiniteElasticity/SimpleShear.f90 +++ b/tests/FiniteElasticity/SimpleShear.f90 @@ -100,7 +100,7 @@ PROGRAM SIMPLESHEAREXAMPLE INTEGER(CMISSIntg) :: NumberGlobalXElements,NumberGlobalYElements,NumberGlobalZElements INTEGER(CMISSIntg) :: EquationsSetIndex - INTEGER(CMISSIntg) :: NumberOfComputationalNodes,NumberOfDomains,ComputationalNodeNumber + INTEGER(CMISSIntg) :: NumberOfComputationNodes,NumberOfDomains,ComputationNodeNumber INTEGER(CMISSIntg) :: NodeNumber,NodeDomain,node_idx INTEGER(CMISSIntg),ALLOCATABLE :: LeftSurfaceNodes(:) INTEGER(CMISSIntg),ALLOCATABLE :: RightSurfaceNodes(:) @@ -115,6 +115,7 @@ PROGRAM SIMPLESHEAREXAMPLE !CMISS variables TYPE(cmfe_BasisType) :: Basis, PressureBasis TYPE(cmfe_BoundaryConditionsType) :: BoundaryConditions + TYPE(cmfe_ComputationEnvironmentType) :: ComputationEnvironment TYPE(cmfe_CoordinateSystemType) :: CoordinateSystem, WorldCoordinateSystem TYPE(cmfe_MeshType) :: Mesh TYPE(cmfe_DecompositionType) :: Decomposition @@ -158,14 +159,15 @@ PROGRAM SIMPLESHEAREXAMPLE CALL cmfe_OutputSetOn("SimpleShear",Err) - !Get the number of computational nodes and this computational node number - CALL cmfe_ComputationalNumberOfNodesGet(NumberOfComputationalNodes,Err) - CALL cmfe_ComputationalNodeNumberGet(ComputationalNodeNumber,Err) + !Get the number of computation nodes and this computation node number + CALL cmfe_ComputationEnvironment_Initialise(ComputationEnvironment,Err) + CALL cmfe_ComputationEnvironment_NumberOfWorldNodesGet(ComputationEnvironment,NumberOfComputationNodes,Err) + CALL cmfe_ComputationEnvironment_WorldNodeNumberGet(ComputationEnvironment,ComputationNodeNumber,Err) NumberGlobalXElements=2 NumberGlobalYElements=2 NumberGlobalZElements=2 - NumberOfDomains=NumberOfComputationalNodes + NumberOfDomains=NumberOfComputationNodes !Create a 3D rectangular cartesian coordinate system CALL cmfe_CoordinateSystem_Initialise(CoordinateSystem,Err) @@ -396,7 +398,7 @@ PROGRAM SIMPLESHEAREXAMPLE DO node_idx=1,SIZE(TopSurfaceNodes,1) NodeNumber=TopSurfaceNodes(node_idx) CALL cmfe_Decomposition_NodeDomainGet(Decomposition,NodeNumber,1,NodeDomain,Err) - IF(NodeDomain==ComputationalNodeNumber) THEN + IF(NodeDomain==ComputationNodeNumber) THEN ! x-direction CALL cmfe_Field_ParameterSetGetNode(GeometricField,CMFE_FIELD_U_VARIABLE_TYPE,CMFE_FIELD_VALUES_SET_TYPE,1,1,NodeNumber,1,& & VALUE,Err) @@ -419,7 +421,7 @@ PROGRAM SIMPLESHEAREXAMPLE DO node_idx=1,SIZE(BottomSurfaceNodes,1) NodeNumber=BottomSurfaceNodes(node_idx) CALL cmfe_Decomposition_NodeDomainGet(Decomposition,NodeNumber,1,NodeDomain,Err) - IF(NodeDomain==ComputationalNodeNumber) THEN + IF(NodeDomain==ComputationNodeNumber) THEN ! x-direction CALL cmfe_Field_ParameterSetGetNode(GeometricField,CMFE_FIELD_U_VARIABLE_TYPE,CMFE_FIELD_VALUES_SET_TYPE,1,1,NodeNumber,1,& & VALUE,Err) diff --git a/tests/LinearElasticity/CantileverBeam.f90 b/tests/LinearElasticity/CantileverBeam.f90 index ad59c101..c1a2810b 100644 --- a/tests/LinearElasticity/CantileverBeam.f90 +++ b/tests/LinearElasticity/CantileverBeam.f90 @@ -185,11 +185,12 @@ SUBROUTINE ANALYTIC_LINEAR_ELASTICITY_GENERIC(NumberGlobalXElements,NumberGlobal INTEGER(CMISSIntg) :: FieldGeometryNumberOfComponents,FieldDependentNumberOfComponents,NumberOfElements(3) INTEGER(CMISSIntg) :: MPI_IERROR INTEGER(CMISSIntg) :: EquationsSetIndex,FieldComponentIndex,FieldMaterialNumberOfComponents,NumberOfXi - INTEGER(CMISSIntg) :: NumberOfComputationalNodes,ComputationalNodeNumber + INTEGER(CMISSIntg) :: NumberOfComputationNodes,ComputationNodeNumber !CMISS variables TYPE(cmfe_BasisType) :: Basis + TYPE(cmfe_ComputationEnvironmentType) :: ComputationEnvironment TYPE(cmfe_CoordinateSystemType) :: CoordinateSystem TYPE(cmfe_GeneratedMeshType) :: GeneratedMesh TYPE(cmfe_DecompositionType) :: Decomposition @@ -232,11 +233,12 @@ SUBROUTINE ANALYTIC_LINEAR_ELASTICITY_GENERIC(NumberGlobalXElements,NumberGlobal FieldGeometryNumberOfComponents=NumberOfXi FieldDependentNumberOfComponents=NumberOfXi - !Get the number of computational nodes and this computational node number - CALL cmfe_ComputationalNumberOfNodesGet(NumberOfComputationalNodes,Err) - CALL cmfe_ComputationalNodeNumberGet(ComputationalNodeNumber,Err) + !Get the number of computation nodes and this computation node number + CALL cmfe_ComputationEnvironment_Initialise(ComputationEnvironment,Err) + CALL cmfe_ComputationEnvironment_NumberOfWorldNodesGet(ComputationEnvironment,NumberOfComputationNodes,Err) + CALL cmfe_ComputationEnvironment_WorldNodeNumberGet(ComputationEnvironment,ComputationNodeNumber,Err) - !Broadcast the number of elements in the X,Y and Z directions and the number of partitions to the other computational nodes + !Broadcast the number of elements in the X,Y and Z directions and the number of partitions to the other computation nodes CALL MPI_BCAST(NumberGlobalXElements,1,MPI_INTEGER,0,MPI_COMM_WORLD,MPI_IERROR) CALL MPI_BCAST(NumberGlobalYElements,1,MPI_INTEGER,0,MPI_COMM_WORLD,MPI_IERROR) CALL MPI_BCAST(NumberGlobalZElements,1,MPI_INTEGER,0,MPI_COMM_WORLD,MPI_IERROR) diff --git a/tests/LinearElasticity/Extension.f90 b/tests/LinearElasticity/Extension.f90 index 983ff9e2..fae01d9d 100644 --- a/tests/LinearElasticity/Extension.f90 +++ b/tests/LinearElasticity/Extension.f90 @@ -237,11 +237,12 @@ SUBROUTINE ANALYTIC_LINEAR_ELASTICITY_GENERIC(NumberGlobalXElements,NumberGlobal INTEGER(CMISSIntg) :: FieldGeometryNumberOfComponents,FieldDependentNumberOfComponents,NumberOfElements(3) INTEGER(CMISSIntg) :: MPI_IERROR INTEGER(CMISSIntg) :: EquationsSetIndex,FieldComponentIndex,FieldMaterialNumberOfComponents,NumberOfXi - INTEGER(CMISSIntg) :: NumberOfComputationalNodes,ComputationalNodeNumber + INTEGER(CMISSIntg) :: NumberOfComputationNodes,ComputationNodeNumber !CMISS variables TYPE(cmfe_BasisType) :: Basis + TYPE(cmfe_ComputationEnvironmentType) :: ComputationEnvironment TYPE(cmfe_CoordinateSystemType) :: CoordinateSystem TYPE(cmfe_GeneratedMeshType) :: GeneratedMesh TYPE(cmfe_DecompositionType) :: Decomposition @@ -284,11 +285,12 @@ SUBROUTINE ANALYTIC_LINEAR_ELASTICITY_GENERIC(NumberGlobalXElements,NumberGlobal FieldGeometryNumberOfComponents=NumberOfXi FieldDependentNumberOfComponents=NumberOfXi - !Get the number of computational nodes and this computational node number - CALL cmfe_ComputationalNumberOfNodesGet(NumberOfComputationalNodes,Err) - CALL cmfe_ComputationalNodeNumberGet(ComputationalNodeNumber,Err) + !Get the number of computation nodes and this computation node number + CALL cmfe_ComputationEnvironment_Initialise(ComputationEnvironment,Err) + CALL cmfe_ComputationEnvironment_NumberOfWorldNodesGet(ComputationEnvironment,NumberOfComputationNodes,Err) + CALL cmfe_ComputationEnvironment_WorldNodeNumberGet(ComputationEnvironment,ComputationNodeNumber,Err) - !Broadcast the number of elements in the X,Y and Z directions and the number of partitions to the other computational nodes + !Broadcast the number of elements in the X,Y and Z directions and the number of partitions to the other computation nodes CALL MPI_BCAST(NumberGlobalXElements,1,MPI_INTEGER,0,MPI_COMM_WORLD,MPI_IERROR) CALL MPI_BCAST(NumberGlobalYElements,1,MPI_INTEGER,0,MPI_COMM_WORLD,MPI_IERROR) CALL MPI_BCAST(NumberGlobalZElements,1,MPI_INTEGER,0,MPI_COMM_WORLD,MPI_IERROR)