...
Code Block |
---|
cd FluTAS/src
# Edit FC name and FFTW path in targets/target.generic-intel.
# MPI=intelmpi-2021.7.0
MPI=openmpi-4.1.2-intel2021.4.0
if [[ "$MPI" =~ ^intel ]]; then
module load intel/2022.1.0
module load fftw/3.3.10-impi
export I_MPI_F90=ifort
elif [[ "$MPI" =~ ^openmpi ]]; then
module load fftw/3.3.10-ompi
export OMPI_MPIF90=ifort
fi
module load $(echo $MPI | sed -e "s/\-/\//")
make ARCH=generic-intel APP=two_phase_ht DO_DBG=0 DO_POSTPROC=0 |
Running Example
Code Block |
---|
cd FluTAS/examples/two_phase_ht/coarse_two_layer_rb # Edit the following line in dns.in to change the process grid. Below will work with 256cores. 16 16 ! dims(1:2) /usr/bin/time mpirun -np 128 $MPIFLAGS flutas the used processor grid is 16 by 16 Padded ALLTOALL optimisation on ************************************************ *** Beginning of simulation (TWO-PHASE mode) *** ************************************************ *** Initial condition succesfully set *** dtmax = 3.322388020223433E-003 dt = 1.661194010111717E-003 *** Calculation loop starts now *** ... *** Fim *** OUT:initial : 6.335s ( 1 calls) STEP : 14.630s ( 1000 calls) VOF : 9.309s ( 1000 calls) RK : 0.545s ( 1000 calls) SOLVER : 1.264s ( 1000 calls) CORREC : 0.588s ( 1000 calls) POSTPROC : 0.117s ( 1000 calls) OUT:iout0d : 0.005s ( 2 calls) OUT:iout1d : 0.000s ( 1 calls) OUT:iout3d : 4.267s ( 1 calls) OUT:isave : 4.277s ( 1 calls) |
...