! ! This fortran 90 program is used in MATH 365 - Computational ! Fluid Dynamics at James Madison University. This program was ! written by James Sochacki of the Department of Mathematics ! at James Madison University as part of the NSF Grant - A Collaborative ! Compuatational Science Program between JMU and North Carolina Central ! University. His info is ! ! Jim Sochacki ! Department of Mathematics ! James Madison University ! Harrisonburg, VA 22807 ! jim@math.jmu.edu ! !%%%%%%%%%%%%%%%%%%%%%%%%%%% MPI PROGRAM %%%%%%%%%%%%%%%%%%%%%%%%%%%% ! program oneway_wave ! !%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ! ! This code looks at the upwind finite difference solution ! to the one way wave equation ! u_t + cu_x = 0 ; ! u(x,0) = exp((x-x0)^2) a < x < b, x0 = (a+b)/2 ! The code runs on more than one processor using the MPI (Message Passing ! Interface) protocol ! !%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ! IMPLICIT NONE ! Include the mpi routines include 'mpif.h' ! Include the mpi fortran library routines include 'lib3f.h' ! hlnth is the length of the model ! dx is the grid size on the length ! dt is the time step ! tyme is the time to run the model ! p is the numerical approximation to u(x,t) ! np is the numerical approximation to u(x,t+dt) ! workers is the number of processors that will be used ! max_workers is set to 17 the number of nodes on the Raptor Beowulf system ! housed in the Center for Computational Mathematics and Modeling at James ! Madison University ! The data is outputted to a file called name in frames determined by the ! variable filename real :: a, b, c, dx, dt, const, hlnth, tyme ! real :: start_time, stop_time, elapsed_time, flops, million = 1.e06 real, dimension(:), allocatable :: p,np integer :: j, k, l, n, n_times, interv, icount, type integer :: count, count_max, count_rate integer :: rank, size, ierr, i, total integer :: workers integer :: max_workers, len character(6) :: name character(11) :: filename parameter (MAX_WORKERS = 17) ! The parameters and calls needed to initialize MPI integer :: request(MAX_WORKERS) integer :: status(MAX_WORKERS, MPI_STATUS_SIZE) character*32 host call MPI_INIT(ierr) call MPI_COMM_RANK(MPI_COMM_WORLD, rank, ierr) call MPI_COMM_SIZE(MPI_COMM_WORLD, size, ierr) ierr=hostnm(host) write (6,'( a32)') 'hostname=',host ! The subroutines are ! (1) input ! inputs the data from a file called inad. all input data is ! explained in this routine ! (2) manage ! this sends data to and from the processors for the finite ! difference calculations ! (3) calculate ! does the upwind calculation to update p for the next time ! step ! ! input parameters ! if (rank == 0) then call input(c,a,b,hlnth,tyme,dt,dx,interv, & name,size,n,n_times,const,len,workers) allocate(p(n+10*workers)) p = 0 count = 0 end if ! This is used to reduce message passing at the processor grids if (const>0) then type = 2 else type = 1 end if ! broadcast the variables to all the processors for the finite difference ! equation call MPI_BCAST(const,1,MPI_REAL,0,MPI_COMM_WORLD) call MPI_BCAST(a,1,MPI_REAL,0,MPI_COMM_WORLD) call MPI_BCAST(b,1,MPI_REAL,0,MPI_COMM_WORLD) call MPI_BCAST(dx,1,MPI_REAL,0,MPI_COMM_WORLD) call MPI_BCAST(len,1,MPI_INTEGER,0,MPI_COMM_WORLD) call MPI_BCAST(n_times,1,MPI_INTEGER,0,MPI_COMM_WORLD) ! the initial condition ! initialize p on each processor if (rank==0) then do i=1, workers call MPI_ISEND(p((i - 1) * len + 1), len, & MPI_REAL, i, 101, MPI_COMM_WORLD, request(i), & ierr) call MPI_ISEND(i, 1, MPI_INTEGER, i, 102, MPI_COMM_WORLD, & request(i),ierr) end do call MPI_WAITALL(workers, request, status, ierr) do i = 1, workers call MPI_IRECV(p((i-1)*len+1), len, MPI_REAL, i, & 201, MPI_COMM_WORLD, request(i), ierr) end do call MPI_WAITALL(workers, request, status, ierr) ! output the initial condition to the file name at frame 00000 write (filename,'(a6,i5.5)') name, 0 open (4,file=filename,status='unknown') do j = 1,n write (4,'(f15.7)') p(j) end do close (4) else allocate(np(len)) np = 0 call MPI_RECV(np, len, MPI_REAL, 0, 101, & MPI_COMM_WORLD, status, ierr) call MPI_RECV(i, 1, MPI_INTEGER, 0, 102, & MPI_COMM_WORLD, status, ierr) call MPI_WAITALL(workers, request, status, ierr) ! calculate the initial condition here do j = 1, len np(j) = exp(-((j+(i-1)*len)*dx-(a+b)/2)**2) end do ! send the initial condition to each processor call MPI_SEND(np, len, MPI_REAL, 0, 201, & MPI_COMM_WORLD, ierr) deallocate(np) end if ! do the finite difference calculations if (rank==0) then ! the host will send data to and from the processors through the subroutine ! manage do k = 1,n_times call manage(p,n,len,name,count,interv) end do else ! the processors will do the upwind calculations do k=1,n_times call calculate(len,const) end do end if STOP contains ! The subroutine that inputs the data from the file inad subroutine input(c,a,b,hlnth,tyme,dt,dx,interv, & name,size,n,n_times,const,len,workers) integer,intent(out) :: interv,n,n_times,len,workers integer,intent(in) :: size real,intent(out) :: c,a,b,hlnth,dt,dx,const,tyme character(6),intent(out) :: name print*,' This code looks at the finite difference solution ' print*,' to the one way wave equation ' print*,' u_t + cu_x = 0 ; ' print*,' u(x,0) = exp((x-x0)^2) a < x < b, x0 = (a+b)/2 ' print*,' using forward euler ' open (5,file='inad') print*,' Give c ' read(5,*) c print*,' Give a and b ' read(5,*) a,b print*,' Give the length of the model ' read(5,*) hlnth print*,' Give the time to run the model ' read(5,*) tyme print*,' Give the time step and grid size ' read(5,*) dt,dx if (tyme/dt>99999) then print*,' time/dt = ',tyme/dt,' time/dt < 99999' stop end if print*,' Give the snapshot interval (A snapshot will be made ' print*,' every nth time step. You input n.) ' read(5,*) interv print*,' Give the filename where these snapshots should be ' print*,' saved (6 characters exactly). ' read (5,' (a6)') name ! ! initialize the parameters for the finite difference calculations ! workers = size-1 n = hlnth/dx n_times=tyme/dt+1 const = c*dt / dx len = n/workers n = len*workers ! open the times for the finite difference solution open (3,file='times',status='unknown') do l = 0,n_times write (3,*) l*dt end do close (3) return end subroutine input ! This subroutine sends data to and from the processors for the finite ! difference calculations subroutine manage(p,n,len,name,count,interval) integer,intent(in) :: n,len,interval integer,intent(inout) :: count real,intent(inout),dimension(:) :: p character(6),intent(in) :: name integer :: i character(11) :: filename do i=1, workers call MPI_SEND(p((i - 1) * len + 1), len+1, & MPI_REAL, i, 101, MPI_COMM_WORLD, request(i), & ierr) end do call MPI_WAITALL(workers, request, status, ierr) do i = 1, workers call MPI_IRECV(p((i-1)*len+type), len, MPI_REAL, i, & 201, MPI_COMM_WORLD, request(i), ierr) end do call MPI_WAITALL(workers, request, status, ierr) ! output the wave form to file name with frame number count = count+1 if (count==interval) then count = 0 write (filename,'(a6,i5.5)') name, k open (4,file=filename,status='unknown') do j = 1,n write (4,'(f15.7)') p(j) end do close (4) end if return end subroutine manage ! This subroutine does the finite difference calculations subroutine calculate(len,const) integer,intent(in) :: len real,intent(in) :: const integer :: i real,allocatable,dimension(:) :: np0,np1 allocate(np0(len+1)) allocate(np1(len+1)) np0 = 0 np1 = 0 call MPI_RECV(np0, len+1, MPI_REAL, 0, 101, & MPI_COMM_WORLD, status, ierr) ! Do the upwind scheme on the pde if (const > 0) then do j = 2, len+1 np1(j-1) = np0(j)-const*(np0(j)-np0(j-1)) end do else do j = 1, len np1(j) = np0(j)-const*(np0(j+1)-np0(j)) end do end if ! send the wave form back to the host call MPI_SEND(np1, len, MPI_REAL, 0, 201, & MPI_COMM_WORLD, ierr) deallocate(np0) deallocate(np1) return end subroutine calculate END program oneway_wave