00001 subroutine MPP_WRITE_2DDECOMP_1D_( unit, field, domain, data, tstamp,blockid )
00002 integer, intent(in) :: unit
00003 type(fieldtype), intent(in) :: field
00004 type(domain2D), intent(inout) :: domain
00005 MPP_TYPE_, intent(inout) :: data(:)
00006 real(DOUBLE_KIND), intent(in), optional :: tstamp
00007 integer,intent(in),optional::blockid
00008 integer::il_xbegin,il_xend,il_ybegin,il_yend,ij
00009 MPP_TYPE_,allocatable :: data3D(:,:,:)
00010 call mpp_get_compute_domain(domain &
00011 ,xbegin=il_xbegin,xend=il_xend &
00012 ,ybegin=il_ybegin,yend=il_yend)
00013 allocate(data3D(1:il_xend-il_xbegin+1,1:il_yend-il_ybegin+1,1:1))
00014 data3D = RESHAPE( data, SHAPE(data3D) )
00015 ! write(stdout(),*)'MPP_WRITE_2DDECOMP_1D',size(data3D,1),size(data3D,2)
00016 if(PRESENT(blockid)) then
00017 call mpp_write( unit, field, domain, data3D, tstamp ,blockid)
00018 else
00019 call mpp_write( unit, field, domain, data3D, tstamp )
00020 endif
00021 data = RESHAPE( data3D, SHAPE(data) )
00022 deallocate(data3D)
00023
00024 return
00025 end subroutine MPP_WRITE_2DDECOMP_1D_
00026
00027 subroutine MPP_WRITE_2DDECOMP_2D_( unit, field, domain, data, tstamp, blockid )
00028 integer, intent(in) :: unit
00029 type(fieldtype), intent(in) :: field
00030 type(domain2D), intent(inout) :: domain
00031 MPP_TYPE_, intent(inout) :: data(:,:)
00032 real(DOUBLE_KIND), intent(in), optional :: tstamp
00033 integer,intent(in),optional::blockid
00034 MPP_TYPE_ :: data3D(size(data,1),size(data,2),1)
00035 #ifdef use_CRI_pointers
00036 pointer( ptr, data3D )
00037 ptr = LOC(data)
00038 if(PRESENT(blockid)) then
00039 call mpp_write( unit, field, domain, data3D, tstamp ,blockid)
00040 else
00041 call mpp_write( unit, field, domain, data3D, tstamp )
00042 endif
00043 #else
00044 data3D = RESHAPE( data, SHAPE(data3D) )
00045 if(PRESENT(blockid)) then
00046 call mpp_write( unit, field, domain, data3D, tstamp ,blockid)
00047 else
00048 call mpp_write( unit, field, domain, data3D, tstamp )
00049 endif
00050 data = RESHAPE( data3D, SHAPE(data) )
00051 #endif
00052 return
00053 end subroutine MPP_WRITE_2DDECOMP_2D_
00054
00055 subroutine MPP_WRITE_2DDECOMP_3D_( unit, field, domain, data, tstamp ,blockid)
00056 !mpp_write writes <data> which has the domain decomposition <domain>
00057 integer, intent(in) :: unit
00058 type(fieldtype), intent(in) :: field
00059 type(domain2D), intent(inout) :: domain !must have intent(out) as well because active domain might be reset
00060 MPP_TYPE_, intent(inout) :: data(:,:,:)
00061 real(DOUBLE_KIND), intent(in), optional :: tstamp
00062 integer,intent(in),optional::blockid
00063 !cdata is used to store compute domain as contiguous data
00064 !gdata is used to globalize data for multi-PE single-threaded I/O
00065 MPP_TYPE_, allocatable, dimension(:,:,:) :: cdata, gdata
00066 !NEW: data may be on compute OR data domain
00067 logical :: data_has_halos, halos_are_global, x_is_global, y_is_global
00068 integer :: is, ie, js, je, isd, ied, jsd, jed, isg, ieg, jsg, jeg
00069 !rr integer :: mypelist(4)
00070
00071 ! write(stdout(),*)'MPP_WRITE_2DDECOMP_3D',size(data,1),size(data,2),size(data,3)
00072 if( .NOT.module_is_initialized )call mpp_error( FATAL, 'MPP_WRITE: must first call mpp_io_init.' )
00073 if( .NOT.mpp_file(unit)%opened )call mpp_error( FATAL, 'MPP_WRITE: invalid unit number.' )
00074 ! write(stdout(),*)'MPP_WRITE_2DDECOMP_3D','calling mpp_get_compute_domain'
00075 call mpp_get_compute_domain( domain, is, ie, js, je )
00076 ! write(stdout(),*)'MPP_WRITE_2DDECOMP_3D','calling mpp_get_data_domain'
00077 call mpp_get_data_domain ( domain, isd, ied, jsd, jed, x_is_global=x_is_global, y_is_global=y_is_global )
00078 ! write(stdout(),*)'MPP_WRITE_2DDECOMP_3D','calling mpp_get_global_domain'
00079 call mpp_get_global_domain ( domain, isg, ieg, jsg, jeg )
00080 ! write(stdout(),*)'MPP_WRITE_2DDECOMP_3D','checking for halos '
00081 if(debug) &
00082 print*,'MPP_WRITE_2DDECOMP_3D:',isd, ied, jsd, jed,is, ie, js, je,'|' &
00083 , size(data,1),size(data,2)
00084 if( size(data,1).EQ.ie-is+1 .AND. size(data,2).EQ.je-js+1 )then
00085 data_has_halos = .FALSE.
00086 else if( size(data,1).EQ.ied-isd+1 .AND. size(data,2).EQ.jed-jsd+1 )then
00087 data_has_halos = .TRUE.
00088 else
00089 call mpp_error( FATAL, 'MPP_WRITE: data must be either on compute domain or data domain.' )
00090 end if
00091 halos_are_global = x_is_global .AND. y_is_global
00092 if(debug) &
00093 write(stdout(),*)'MPP_WRITE_2DDECOMP_3D','checked for halos ',halos_are_global,npes
00094 if( npes.GT.1 .AND. mpp_file(unit)%threading.EQ.MPP_SINGLE )then
00095 if(debug) &
00096 write(stdout(),*)'MPP_WRITE_2DDECOMP_3D','npes.GT.1','threading.EQ.MPP_SINGLE'
00097 if( halos_are_global )then
00098 call mpp_update_domains( data, domain )
00099 !all non-0 PEs have passed their data to PE 0 and may now exit
00100 !rv,sgi if( pe.NE.0 )return
00101 if( pe.NE.mpp_root_pe() )return
00102 if(PRESENT(blockid)) then
00103 call write_record_b( unit, field, size(data), data, tstamp,block_id=blockid)
00104 else
00105 call write_record( unit, field, size(data), data, tstamp )
00106 endif
00107 else
00108 !put field onto global domain
00109 if(debug) &
00110 write(stdout(),*)'MPP_WRITE_2DDECOMP_3D','allocating gdata'
00111 allocate( gdata(isg:ieg,jsg:jeg,size(data,3)) )
00112 ! write(stdout(),*)'MPP_WRITE_2DDECOMP_3D','calling mpp_global_field'
00113 call mpp_global_field( domain, data, gdata )
00114 if(debug) &
00115 write(stdout(),*)'MPP_WRITE_2DDECOMP_3D','called mpp_global_field',pe
00116 !rr call mpp_get_current_pelist(mypelist)
00117 !rr if(debug) &
00118 !rr write(stdout(),*)'MPP_WRITE_2DDECOMP_3D','mypelist',mypelist,'|' &
00119 !rr ,mpp_root_pe()
00120
00121 !all non-0 PEs have passed their data to PE 0 and may now exit
00122 !rv,sgi if( pe.NE.0 )return
00123 if( pe.NE. mpp_root_pe() )return
00124 if(PRESENT(blockid)) then
00125 call write_record_b( unit, field, size(gdata), gdata, tstamp,block_id=blockid )
00126 else
00127 call write_record( unit, field, size(gdata), gdata, tstamp )
00128 endif
00129 end if
00130 else if( data_has_halos )then
00131 if(debug) &
00132 write(stdout(),*)'MPP_WRITE_2DDECOMP_3D','data_has_halos '
00133 !store compute domain as contiguous data and pass to write_record
00134 allocate( cdata(is:ie,js:je,size(data,3)) )
00135 cdata(:,:,:) = data(is-isd+1:ie-isd+1,js-jsd+1:je-jsd+1,:)
00136 if(PRESENT(blockid)) then
00137 call write_record_b( unit, field, size(cdata), cdata, tstamp, domain ,block_id=blockid)
00138 else
00139 call write_record( unit, field, size(cdata), cdata, tstamp, domain )
00140 endif
00141 if(debug) &
00142 write(stdout(),*)'MPP_WRITE_2DDECOMP_3D','done with data_has_halos '
00143 else
00144 if(debug) &
00145 write(stdout(),*)'MPP_WRITE_2DDECOMP_3D','contigous ',size(data)
00146 !data is already contiguous
00147 if(PRESENT(blockid)) then
00148 call write_record_b( unit, field, size(data), data, tstamp, domain,block_id=blockid )
00149 else
00150 call write_record( unit, field, size(data), data, tstamp, domain )
00151 endif
00152 if(debug) &
00153 write(stdout(),*)'MPP_WRITE_2DDECOMP_3D','done with contigous '
00154 end if
00155 return
00156 end subroutine MPP_WRITE_2DDECOMP_3D_
00157
00158 subroutine MPP_WRITE_2DDECOMP_4D_( unit, field, domain, data, tstamp ,blockid)
00159 !mpp_write writes <data> which has the domain decomposition <domain>
00160 integer, intent(in) :: unit
00161 type(fieldtype), intent(in) :: field
00162 type(domain2D), intent(inout) :: domain !must have intent(out) as well because active domain might be reset
00163 MPP_TYPE_, intent(inout) :: data(:,:,:,:)
00164 real(DOUBLE_KIND), intent(in), optional :: tstamp
00165 integer,intent(in),optional::blockid
00166 !cdata is used to store compute domain as contiguous data
00167 !gdata is used to globalize data for multi-PE single-threaded I/O
00168 MPP_TYPE_, allocatable, dimension(:,:,:,:) :: cdata, gdata
00169 !NEW: data may be on compute OR data domain
00170 logical :: data_has_halos, halos_are_global, x_is_global, y_is_global
00171 integer :: is, ie, js, je, isd, ied, jsd, jed, isg, ieg, jsg, jeg
00172
00173 ! write(stdout(),*)'MPP_WRITE_2DDECOMP_4D',size(data,1),size(data,2),size(data,3),size(data,4)
00174 if( .NOT.module_is_initialized )call mpp_error( FATAL, 'MPP_WRITE: must first call mpp_io_init.' )
00175 if( .NOT.mpp_file(unit)%opened )call mpp_error( FATAL, 'MPP_WRITE: invalid unit number.' )
00176 ! write(stdout(),*)'MPP_WRITE_2DDECOMP_4D','calling mpp_get_compute_domain'
00177 call mpp_get_compute_domain( domain, is, ie, js, je )
00178 ! write(stdout(),*)'MPP_WRITE_2DDECOMP_4D','calling mpp_get_data_domain'
00179 call mpp_get_data_domain ( domain, isd, ied, jsd, jed, x_is_global=x_is_global, y_is_global=y_is_global )
00180 ! write(stdout(),*)'MPP_WRITE_2DDECOMP_4D','calling mpp_get_global_domain'
00181 call mpp_get_global_domain ( domain, isg, ieg, jsg, jeg )
00182 ! write(stdout(),*)'MPP_WRITE_2DDECOMP_4D','checking for halos '
00183 if( size(data,1).EQ.ie-is+1 .AND. size(data,2).EQ.je-js+1 )then
00184 data_has_halos = .FALSE.
00185 else if( size(data,1).EQ.ied-isd+1 .AND. size(data,2).EQ.jed-jsd+1 )then
00186 data_has_halos = .TRUE.
00187 else
00188 call mpp_error( FATAL, 'MPP_WRITE: data must be either on compute domain or data domain.' )
00189 end if
00190 halos_are_global = x_is_global .AND. y_is_global
00191 ! write(stdout(),*)'MPP_WRITE_2DDECOMP_4D','checked for halos ',halos_are_global,npes
00192 if( npes.GT.1 .AND. mpp_file(unit)%threading.EQ.MPP_SINGLE )then
00193 if( halos_are_global )then
00194 call mpp_update_domains( data, domain )
00195 !all non-0 PEs have passed their data to PE 0 and may now exit
00196 !rv,sgi if( pe.NE.0 )return
00197 if( pe.NE.mpp_root_pe() )return
00198 if(PRESENT(blockid)) then
00199 call write_record_b( unit, field, size(data), data, tstamp,block_id=blockid )
00200 else
00201 call write_record( unit, field, size(data), data, tstamp )
00202 endif
00203 else
00204 !put field onto global domain
00205 ! write(stdout(),*)'MPP_WRITE_2DDECOMP_4D','allocating gdata'
00206 allocate( gdata(isg:ieg,jsg:jeg,size(data,3),size(data,4)) )
00207 ! write(stdout(),*)'MPP_WRITE_2DDECOMP_4D','calling mpp_global_field'
00208 call mpp_global_field( domain, data, gdata )
00209 ! write(stdout(),*)'MPP_WRITE_2DDECOMP_4D','called mpp_global_field'
00210 !all non-0 PEs have passed their data to PE 0 and may now exit
00211 !rv,sgi if( pe.NE.0 )return
00212 if( pe.NE.mpp_root_pe())return
00213 if(PRESENT(blockid)) then
00214 call write_record_b( unit, field, size(gdata), gdata, tstamp ,block_id=blockid)
00215 else
00216 call write_record( unit, field, size(gdata), gdata, tstamp )
00217 endif
00218 end if
00219 else if( data_has_halos )then
00220 ! write(stdout(),*)'MPP_WRITE_2DDECOMP_4D','data_has_halos '
00221 !store compute domain as contiguous data and pass to write_record
00222 allocate( cdata(is:ie,js:je,size(data,3),size(data,4)) )
00223 cdata(:,:,:,:) = data(is-isd+1:ie-isd+1,js-jsd+1:je-jsd+1,:,:)
00224 if(PRESENT(blockid)) then
00225 call write_record_b( unit, field, size(cdata), cdata, tstamp, domain,block_id=blockid )
00226 else
00227 call write_record( unit, field, size(cdata), cdata, tstamp, domain )
00228 endif
00229 ! write(stdout(),*)'MPP_WRITE_2DDECOMP_4D','done with data_has_halos '
00230 else
00231 ! write(stdout(),*)'MPP_WRITE_2DDECOMP_4D','contigous ',size(data)
00232 !data is already contiguous
00233 if(PRESENT(blockid)) then
00234 call write_record_b( unit, field, size(data), data, tstamp, domain,block_id=blockid )
00235 else
00236 call write_record( unit, field, size(data), data, tstamp, domain )
00237 endif
00238 ! write(stdout(),*)'MPP_WRITE_2DDECOMP_4D','done with contigous '
00239 end if
00240 return
00241 end subroutine MPP_WRITE_2DDECOMP_4D_