00001 #ifdef use_CRI_pointers
00002 #undef use_local_CRI_pointers
00003 #endif
00004 subroutine MPP_READ_2DDECOMP_1D_(unit,field,domain,data,tindex,blockid )
00005 integer, intent(in) :: unit
00006 type(fieldtype), intent(in) :: field
00007 type(domain2D), intent(in) :: domain
00008 MPP_TYPE_, intent(inout) :: data(:)
00009 integer, intent(in), optional :: tindex
00010 integer, intent(in), optional :: blockid
00011 integer::il_xbegin,il_xend,il_ybegin,il_yend
00012 MPP_TYPE_,allocatable :: data3D(:,:,:)
00013
00014 call mpp_get_compute_domain(domain &
00015 ,xbegin=il_xbegin,xend=il_xend &
00016 ,ybegin=il_ybegin,yend=il_yend)
00017
00018 allocate(data3D(1:il_xend-il_xbegin+1,1:il_yend-il_ybegin+1,1:1))
00019 data3D = RESHAPE( data, SHAPE(data3D) )
00020 if(PRESENT(blockid)) then
00021 call mpp_read( unit,field,domain,data3D,tindex,blockid)
00022 else
00023 call mpp_read( unit, field, domain, data3D, tindex )
00024 endif
00025 data = RESHAPE( data3D, SHAPE(data) )
00026 deallocate(data3D)
00027
00028 return
00029 end subroutine MPP_READ_2DDECOMP_1D_
00030 subroutine MPP_READ_2DDECOMP_2D_( unit, field, domain,data,tindex,blockid )
00031 integer, intent(in) :: unit
00032 type(fieldtype), intent(in) :: field
00033 type(domain2D), intent(in) :: domain
00034 MPP_TYPE_, intent(inout) :: data(:,:)
00035 integer, intent(in), optional :: tindex
00036 integer, intent(in), optional :: blockid
00037 MPP_TYPE_ :: data3D(size(data,1),size(data,2),1)
00038 #ifdef use_local_CRI_pointers
00039 pointer( ptr, data3D )
00040 ptr = LOC(data)
00041 if(PRESENT(blockid)) then
00042 call mpp_read( unit, field, domain, data3D, tindex, blockid )
00043 else
00044 call mpp_read( unit, field, domain, data3D, tindex )
00045 endif
00046 #else
00047 data3D = RESHAPE( data, SHAPE(data3D) )
00048 if(PRESENT(blockid)) then
00049 call mpp_read( unit, field, domain, data3D, tindex, blockid )
00050 else
00051 call mpp_read( unit, field, domain, data3D, tindex )
00052 endif
00053 data = RESHAPE( data3D, SHAPE(data) )
00054 #endif
00055 return
00056 end subroutine MPP_READ_2DDECOMP_2D_
00057
00058 subroutine MPP_READ_2DDECOMP_3D_(unit,field,domain,data,tindex,blockid )
00059 !mpp_read reads <data> which has the domain decomposition <domain>
00060 integer, intent(in) :: unit
00061 type(fieldtype), intent(in) :: field
00062 type(domain2D), intent(in) :: domain
00063 MPP_TYPE_, intent(inout) :: data(:,:,:)
00064 integer, intent(in), optional :: tindex
00065 integer, intent(in), optional :: blockid
00066 MPP_TYPE_, allocatable :: cdata(:,:,:)
00067 MPP_TYPE_, allocatable :: gdata(:)
00068 integer :: len, lenx,leny,lenz,i,j,k,n
00069 !NEW: data may be on compute OR data domain
00070 logical :: data_has_halos, halos_are_global, x_is_global, y_is_global
00071 integer :: is, ie, js, je, isd, ied, jsd, jed, isg, ieg, jsg, jeg, ioff, joff
00072
00073 if (.NOT. present(tindex) .AND. mpp_file(unit)%time_level .ne. -1) &
00074 call mpp_error(FATAL, 'MPP_READ: need to specify a time level for data with time axis')
00075
00076 if( .NOT.module_is_initialized )call mpp_error( FATAL, 'MPP_READ: must first call mpp_io_init.' )
00077 if( .NOT.mpp_file(unit)%opened )call mpp_error( FATAL, 'MPP_READ: invalid unit number.' )
00078
00079 call mpp_get_compute_domain( domain, is, ie, js, je )
00080 call mpp_get_data_domain ( domain, isd, ied, jsd, jed, x_is_global=x_is_global, y_is_global=y_is_global )
00081 call mpp_get_global_domain ( domain, isg, ieg, jsg, jeg )
00082 if( debug ) &
00083 write(stdout(),*) 'MPP_READ_2DDECOMP_3D_:is, ie, js, je, isd, ied, jsd, jed',is, ie, js, je, isd, ied, jsd, jed
00084 if( debug ) &
00085 write(stdout(),*) 'MPP_READ_2DDECOMP_3D_:isg, ieg, jsg, jeg',isg, ieg, jsg, jeg
00086 if( size(data,1).EQ.ie-is+1 .AND. size(data,2).EQ.je-js+1 )then
00087 data_has_halos = .FALSE.
00088 else if( size(data,1).EQ.ied-isd+1 .AND. size(data,2).EQ.jed-jsd+1 )then
00089 data_has_halos = .TRUE.
00090 else
00091 call mpp_error( FATAL, 'MPP_READ: data must be either on compute domain or data domain.' )
00092 end if
00093 halos_are_global = x_is_global .AND. y_is_global
00094 if( debug ) &
00095 write(stdout(),*) 'MPP_READ_2DDECOMP_3D_: halos_are_global', halos_are_global
00096 if( npes.GT.1 .AND. mpp_file(unit)%threading.EQ.MPP_SINGLE )then
00097 if( debug ) &
00098 write(stdout(),*) 'MPP_READ_2DDECOMP_3D_: threading.EQ.MPP_SINGLE'
00099 if( halos_are_global )then !you can read directly into data array
00100 if(PRESENT(blockid)) then
00101 if( pe.EQ.mpp_root_pe() )call read_record_b(unit,field,size(data) &
00102 ,data,tindex,block_id=blockid )
00103 else
00104 if(pe.EQ.mpp_root_pe())call read_record(unit,field,size(data),data,tindex )
00105 endif
00106 else
00107 lenx=ieg-isg+1
00108 leny=jeg-jsg+1
00109 lenz=size(data,3)
00110 len=lenx*leny*lenz
00111 write(stdout(),*)'MPP_READ_2DDECOMP_3D: gdata ', lenx,leny,lenz
00112 allocate(gdata(len))
00113 ! read field on pe 0 and pass to all pes
00114 if(PRESENT(blockid)) then
00115 if(pe.EQ.mpp_root_pe()) call read_record_b(unit,field,len,gdata,tindex,block_id=blockid)
00116 else
00117 if( pe.EQ.mpp_root_pe() ) call read_record( unit, field, len, gdata, tindex )
00118 endif
00119 ! broadcasting global array, this can be expensive!
00120 call mpp_transmit( gdata, len, ALL_PES, gdata, len, 0 )
00121 ioff = is; joff = js
00122 if( data_has_halos )then
00123 ioff = isd; joff = jsd
00124 end if
00125 do k=1,size(data,3)
00126 do j=js,je
00127 do i=is,ie
00128 n=(i-isg+1) + (j-jsg)*lenx + (k-1)*lenx*leny
00129 data(i-ioff+1,j-joff+1,k)=gdata(n)
00130 enddo
00131 enddo
00132 enddo
00133 deallocate(gdata)
00134 end if
00135 else if( data_has_halos )then
00136 ! for uniprocessor or multithreaded read
00137 ! read compute domain as contiguous data
00138
00139 allocate( cdata(is:ie,js:je,size(data,3)) )
00140 if(PRESENT(blockid)) then
00141 call read_record_b(unit,field,size(cdata),cdata,tindex,domain,block_id=blockid)
00142 else
00143 call read_record(unit,field,size(cdata),cdata,tindex,domain)
00144 endif
00145
00146 data(is-isd+1:ie-isd+1,js-jsd+1:je-jsd+1,:) = cdata(:,:,:)
00147 deallocate(cdata)
00148 else
00149 if(PRESENT(blockid)) then
00150 call read_record_b(unit,field,size(data),data,tindex,domain,block_id=blockid)
00151 else
00152 call read_record(unit,field,size(data),data,tindex,domain)
00153 endif
00154 end if
00155
00156 return
00157 end subroutine MPP_READ_2DDECOMP_3D_
00158
00159 subroutine MPP_READ_2DDECOMP_4D_(unit,field,domain,data,tindex,blockid )
00160 !mpp_read reads <data> which has the domain decomposition <domain>
00161 integer, intent(in) :: unit
00162 type(fieldtype), intent(in) :: field
00163 type(domain2D), intent(in) :: domain
00164 MPP_TYPE_, intent(inout) :: data(:,:,:,:)
00165 integer, intent(in), optional :: tindex
00166 integer, intent(in), optional :: blockid
00167 MPP_TYPE_, allocatable :: cdata(:,:,:,:)
00168 MPP_TYPE_, allocatable :: gdata(:)
00169 integer :: len, lenx,leny,lenz,i,j,k,n,l
00170 !NEW: data may be on compute OR data domain
00171 logical :: data_has_halos, halos_are_global, x_is_global, y_is_global
00172 integer :: is, ie, js, je, isd, ied, jsd, jed, isg, ieg, jsg, jeg, ioff, joff
00173
00174 if (.NOT. present(tindex) .AND. mpp_file(unit)%time_level .ne. -1) &
00175 call mpp_error(FATAL, 'MPP_READ: need to specify a time level for data with time axis')
00176
00177 if( .NOT.module_is_initialized )call mpp_error( FATAL, 'MPP_READ: must first call mpp_io_init.' )
00178 if( .NOT.mpp_file(unit)%opened )call mpp_error( FATAL, 'MPP_READ: invalid unit number.' )
00179
00180 call mpp_get_compute_domain( domain, is, ie, js, je )
00181 call mpp_get_data_domain ( domain, isd, ied, jsd, jed, x_is_global=x_is_global, y_is_global=y_is_global )
00182 call mpp_get_global_domain ( domain, isg, ieg, jsg, jeg )
00183 if( size(data,1).EQ.ie-is+1 .AND. size(data,2).EQ.je-js+1 )then
00184 data_has_halos = .FALSE.
00185 else if( size(data,1).EQ.ied-isd+1 .AND. size(data,2).EQ.jed-jsd+1 )then
00186 data_has_halos = .TRUE.
00187 else
00188 call mpp_error( FATAL, 'MPP_READ: data must be either on compute domain or data domain.' )
00189 end if
00190 halos_are_global = x_is_global .AND. y_is_global
00191 if( npes.GT.1 .AND. mpp_file(unit)%threading.EQ.MPP_SINGLE )then
00192 if( halos_are_global )then !you can read directly into data array
00193 if(PRESENT(blockid)) then
00194 if( pe.EQ.mpp_root_pe() )call read_record_b(unit,field,size(data) &
00195 ,data,tindex,block_id=blockid )
00196 else
00197 if(pe.EQ.mpp_root_pe())call read_record(unit,field,size(data),data,tindex )
00198 endif
00199 else
00200 lenx=ieg-isg+1
00201 leny=jeg-jsg+1
00202 lenz=size(data,3)
00203 len=lenx*leny*lenz*size(data,4)
00204 allocate(gdata(len))
00205 ! read field on pe 0 and pass to all pes
00206 if(PRESENT(blockid)) then
00207 if(pe.EQ.mpp_root_pe()) call read_record_b(unit,field,len,gdata,tindex,block_id=blockid)
00208 else
00209 if( pe.EQ.mpp_root_pe() ) call read_record( unit, field, len, gdata, tindex )
00210 endif
00211 ! broadcasting global array, this can be expensive!
00212 call mpp_transmit( gdata, len, ALL_PES, gdata, len, 0 )
00213 ioff = is; joff = js
00214 if( data_has_halos )then
00215 ioff = isd; joff = jsd
00216 end if
00217 do l=1,size(data,4)
00218 do k=1,size(data,3)
00219 do j=js,je
00220 do i=is,ie
00221 n=(i-isg+1)+(j-jsg)*lenx+(k-1)*lenx*leny &
00222 +(l-1)*lenx*leny*lenz
00223 data(i-ioff+1,j-joff+1,k,l)=gdata(n)
00224 enddo
00225 enddo
00226 enddo
00227 enddo
00228 deallocate(gdata)
00229 end if
00230 else if( data_has_halos )then
00231 ! for uniprocessor or multithreaded read
00232 ! read compute domain as contiguous data
00233
00234 allocate( cdata(is:ie,js:je,size(data,3),size(data,4)) )
00235 if(PRESENT(blockid)) then
00236 call read_record_b(unit,field,size(cdata),cdata,tindex,domain,block_id=blockid)
00237 else
00238 call read_record(unit,field,size(cdata),cdata,tindex,domain)
00239 endif
00240
00241 data(is-isd+1:ie-isd+1,js-jsd+1:je-jsd+1,:,:) = cdata(:,:,:,:)
00242 deallocate(cdata)
00243 else
00244 if(PRESENT(blockid)) then
00245 call read_record_b(unit,field,size(data),data,tindex,domain,block_id=blockid)
00246 else
00247 call read_record(unit,field,size(data),data,tindex,domain)
00248 endif
00249 end if
00250
00251 return
00252 end subroutine MPP_READ_2DDECOMP_4D_
00253 #define use_local_CRI_pointers