Actual source code: ex4f.F
petsc-3.13.1 2020-05-02
1: !
2: ! This introductory example illustrates running PETSc on a subset
3: ! of processes
4: !
5: !/*T
6: ! Concepts: introduction to PETSc;
7: ! Concepts: process^subset set PETSC_COMM_WORLD
8: ! Processors: 2
9: !T*/
10: ! -----------------------------------------------------------------------
12: program main
13: #include <petsc/finclude/petscsys.h>
14: use petscsys
15: implicit none
16: PetscErrorCode ierr
17: PetscMPIInt rank, size,grank,two
18: PetscReal globalrank
20: ! We must call MPI_Init() first, making us, not PETSc, responsible
21: ! for MPI
23: call MPI_Init(ierr)
25: ! We can now change the communicator universe for PETSc
27: two = 2
28: call MPI_Comm_rank(MPI_COMM_WORLD,rank,ierr)
29: call MPI_Comm_split(MPI_COMM_WORLD,mod(rank,two),0, &
30: & PETSC_COMM_WORLD,ierr)
32: ! Every PETSc routine should begin with the PetscInitialize()
33: ! routine.
35: call PetscInitializeNoArguments(ierr)
36: if (ierr .ne. 0) then
37: print*,'Unable to initialize PETSc'
38: stop
39: endif
41: ! The following MPI calls return the number of processes being used
42: ! and the rank of this process in the group.
44: call MPI_Comm_size(PETSC_COMM_WORLD,size,ierr)
45: call MPI_Comm_rank(PETSC_COMM_WORLD,rank,ierr)
48: ! Here we would like to print only one message that represents all
49: ! the processes in the group. Sleep so that IO from different ranks
50: ! don't get mixed up. Note this is not an ideal solution
51: call MPI_Comm_rank(MPI_COMM_WORLD,grank,ierr)
52: globalrank = grank
53: call PetscSleep(globalrank,ierr)
54: if (rank .eq. 0) write(6,100) size,rank
55: 100 format('No of Procs = ',i4,' rank = ',i4)
57: ! Always call PetscFinalize() before exiting a program. This
58: ! routine - finalizes the PETSc libraries as well as MPI - provides
59: ! summary and diagnostic information if certain runtime options are
60: ! chosen (e.g., -log_view). See PetscFinalize() manpage for more
61: ! information.
63: call PetscFinalize(ierr)
65: call MPI_Comm_free(PETSC_COMM_WORLD,ierr)
68: ! Since we initialized MPI, we must call MPI_Finalize()
70: call MPI_Finalize(ierr)
71: end
73: !/*TEST
74: !
75: ! test:
76: ! nsize: 5
77: ! filter: sort -b
78: ! filter_output: sort -b
79: ! requires: !cuda
80: !
81: !TEST*/