FreeFOAM The Cross-Platform CFD Toolkit
Hosted by SourceForge:
Get FreeFOAM at SourceForge.net.
            Fast, secure and Free Open Source software downloads

mpiPstreamImpl.C

Go to the documentation of this file.
00001 /*---------------------------------------------------------------------------*\
00002   =========                 |
00003   \\      /  F ield         | OpenFOAM: The Open Source CFD Toolbox
00004    \\    /   O peration     |
00005     \\  /    A nd           | Copyright (C) 1991-2010 OpenCFD Ltd.
00006      \\/     M anipulation  |
00007 -------------------------------------------------------------------------------
00008 License
00009     This file is part of OpenFOAM.
00010 
00011     OpenFOAM is free software: you can redistribute it and/or modify it
00012     under the terms of the GNU General Public License as published by
00013     the Free Software Foundation, either version 3 of the License, or
00014     (at your option) any later version.
00015 
00016     OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
00017     ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
00018     FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
00019     for more details.
00020 
00021     You should have received a copy of the GNU General Public License
00022     along with OpenFOAM.  If not, see <http://www.gnu.org/licenses/>.
00023 
00024 \*---------------------------------------------------------------------------*/
00025 
00026 #include "mpi.h"
00027 
00028 #include "mpiPstreamImpl.H"
00029 #include <OpenFOAM/Pstream.H>
00030 #include <OpenFOAM/OSspecific.H>
00031 #include <OpenFOAM/addToRunTimeSelectionTable.H>
00032 
00033 #include <cstring>
00034 #include <cstdlib>
00035 #include <csignal>
00036 
00037 #if defined(SP)
00038 #   define MPI_SCALAR MPI_FLOAT
00039 #elif defined(DP)
00040 #   define MPI_SCALAR MPI_DOUBLE
00041 #endif
00042 
00043 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
00044 
00045 // * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
00046 
00047 namespace Foam
00048 {
00049 
00050 defineTypeNameAndDebug(mpiPstreamImpl, 0);
00051 addToRunTimeSelectionTable(PstreamImpl, mpiPstreamImpl, dictionary);
00052 
00053 }
00054 
00055 
00056 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
00057 
00058 // NOTE:
00059 // valid parallel options vary between implementations, but flag common ones.
00060 // if they are not removed by MPI_Init(), the subsequent argument processing
00061 // will notice that they are wrong
00062 void Foam::mpiPstreamImpl::addValidParOptions(HashTable<string>& validParOptions)
00063 {
00064     validParOptions.insert("np", "");
00065     validParOptions.insert("p4pg", "PI file");
00066     validParOptions.insert("p4wd", "directory");
00067     validParOptions.insert("p4amslave", "");
00068     validParOptions.insert("p4yourname", "hostname");
00069     validParOptions.insert("GAMMANP", "number of instances");
00070     validParOptions.insert("machinefile", "machine file");
00071 }
00072 
00073 
00074 bool Foam::mpiPstreamImpl::init(int& argc, char**& argv, int& myProcNo, List<int>& procIDs, bool& isParallel)
00075 {
00076     MPI_Init(&argc, &argv);
00077 
00078     int numprocs;
00079     MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
00080     MPI_Comm_rank(MPI_COMM_WORLD, &myProcNo);
00081 
00082     if (numprocs <= 1)
00083     {
00084         FatalErrorIn("mpiPstreamImpl::init(int& argc, char**& argv)")
00085             << "bool mpiPstreamImpl::init(int& argc, char**& argv) : "
00086                "attempt to run parallel on 1 processor"
00087             << Foam::abort(FatalError);
00088     }
00089 
00090     procIDs.setSize(numprocs);
00091 
00092     forAll(procIDs, procNo)
00093     {
00094         procIDs[procNo] = procNo;
00095     }
00096 
00097     setParRun(isParallel);
00098 
00099 #   ifndef SGIMPI
00100     //FIX <themiwi@users.sf.net>
00101     // Use default bufferSize and let the user override it
00102     // using $MPI_BUFFER_SIZE if she wants to.
00103     int bufferSize = 20000000;
00104 
00105     string bufferSizeName = getEnv("MPI_BUFFER_SIZE");
00106 
00107     if (bufferSizeName.size())
00108     {
00109         int tmpBufferSize = atoi(bufferSizeName.c_str());
00110 
00111         if (tmpBufferSize)
00112         {
00113             bufferSize = tmpBufferSize;
00114         }
00115     }
00116     MPI_Buffer_attach(new char[bufferSize], bufferSize);
00117 #   endif
00118 
00119     int processorNameLen;
00120     char processorName[MPI_MAX_PROCESSOR_NAME];
00121 
00122     MPI_Get_processor_name(processorName, &processorNameLen);
00123 
00124     //signal(SIGABRT, stop);
00125 
00126     // Now that nprocs is known construct communication tables.
00127     PstreamImpl::initCommunicationSchedule();
00128 
00129     return true;
00130 }
00131 
00132 
00133 void Foam::mpiPstreamImpl::exit(int errnum)
00134 {
00135 #   ifndef SGIMPI
00136     int size;
00137     char* buff;
00138     MPI_Buffer_detach(&buff, &size);
00139     delete[] buff;
00140 #   endif
00141 
00142     if (errnum == 0)
00143     {
00144         MPI_Finalize();
00145         ::exit(errnum);
00146     }
00147     else
00148     {
00149         MPI_Abort(MPI_COMM_WORLD, errnum);
00150     }
00151 }
00152 
00153 
00154 void Foam::mpiPstreamImpl::abort()
00155 {
00156     MPI_Abort(MPI_COMM_WORLD, 1);
00157 }
00158 
00159 void Foam::mpiPstreamImpl::reduce(scalar& Value, const sumOp<scalar>& bop)
00160 {
00161     if (!Pstream::parRun())
00162     {
00163         return;
00164     }
00165 
00166     if (Pstream::nProcs() <= Pstream::nProcsSimpleSum)
00167     {
00168         if (Pstream::master())
00169         {
00170             for
00171             (
00172                 int slave=Pstream::firstSlave();
00173                 slave<=Pstream::lastSlave();
00174                 slave++
00175             )
00176             {
00177                 scalar value;
00178 
00179                 if
00180                 (
00181                     MPI_Recv
00182                     (
00183                         &value,
00184                         1,
00185                         MPI_SCALAR,
00186                         Pstream::procID(slave),
00187                         Pstream::msgType(),
00188                         MPI_COMM_WORLD,
00189                         MPI_STATUS_IGNORE
00190                     )
00191                 )
00192                 {
00193                     FatalErrorIn
00194                     (
00195                         "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
00196                     )   << "MPI_Recv failed"
00197                         << Foam::abort(FatalError);
00198                 }
00199 
00200                 Value = bop(Value, value);
00201             }
00202         }
00203         else
00204         {
00205             if
00206             (
00207                 MPI_Send
00208                 (
00209                     &Value,
00210                     1,
00211                     MPI_SCALAR,
00212                     Pstream::procID(Pstream::masterNo()),
00213                     Pstream::msgType(),
00214                     MPI_COMM_WORLD
00215                 )
00216             )
00217             {
00218                 FatalErrorIn
00219                 (
00220                     "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
00221                 )   << "MPI_Send failed"
00222                     << Foam::abort(FatalError);
00223             }
00224         }
00225 
00226 
00227         if (Pstream::master())
00228         {
00229             for
00230             (
00231                 int slave=Pstream::firstSlave();
00232                 slave<=Pstream::lastSlave();
00233                 slave++
00234             )
00235             {
00236                 if
00237                 (
00238                     MPI_Send
00239                     (
00240                         &Value,
00241                         1,
00242                         MPI_SCALAR,
00243                         Pstream::procID(slave),
00244                         Pstream::msgType(),
00245                         MPI_COMM_WORLD
00246                     )
00247                 )
00248                 {
00249                     FatalErrorIn
00250                     (
00251                         "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
00252                     )   << "MPI_Send failed"
00253                         << Foam::abort(FatalError);
00254                 }
00255             }
00256         }
00257         else
00258         {
00259             if
00260             (
00261                 MPI_Recv
00262                 (
00263                     &Value,
00264                     1,
00265                     MPI_SCALAR,
00266                     Pstream::procID(Pstream::masterNo()),
00267                     Pstream::msgType(),
00268                     MPI_COMM_WORLD,
00269                     MPI_STATUS_IGNORE
00270                 )
00271             )
00272             {
00273                 FatalErrorIn
00274                 (
00275                     "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
00276                 )   << "MPI_Recv failed"
00277                     << Foam::abort(FatalError);
00278             }
00279         }
00280     }
00281     else
00282     {
00283         scalar sum;
00284         MPI_Allreduce(&Value, &sum, 1, MPI_SCALAR, MPI_SUM, MPI_COMM_WORLD);
00285         Value = sum;
00286 
00287         /*
00288         int myProcNo = Pstream::myProcNo();
00289         int nProcs = Pstream::nProcs();
00290 
00291         //
00292         // receive from children
00293         //
00294         int level = 1;
00295         int thisLevelOffset = 2;
00296         int childLevelOffset = thisLevelOffset/2;
00297         int childProcId = 0;
00298 
00299         while
00300         (
00301             (childLevelOffset < nProcs)
00302          && (myProcNo % thisLevelOffset) == 0
00303         )
00304         {
00305             childProcId = myProcNo + childLevelOffset;
00306 
00307             scalar value;
00308 
00309             if (childProcId < nProcs)
00310             {
00311                 if
00312                 (
00313                     MPI_Recv
00314                     (
00315                         &value,
00316                         1,
00317                         MPI_SCALAR,
00318                         Pstream::procID(childProcId),
00319                         Pstream::msgType(),
00320                         MPI_COMM_WORLD,
00321                         MPI_STATUS_IGNORE
00322                     )
00323                 )
00324                 {
00325                     FatalErrorIn
00326                     (
00327                         "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
00328                     )   << "MPI_Recv failed"
00329                         << Foam::abort(FatalError);
00330                 }
00331 
00332                 Value = bop(Value, value);
00333             }
00334 
00335             level++;
00336             thisLevelOffset <<= 1;
00337             childLevelOffset = thisLevelOffset/2;
00338         }
00339 
00340         //
00341         // send and receive from parent
00342         //
00343         if (!Pstream::master())
00344         {
00345             int parentId = myProcNo - (myProcNo % thisLevelOffset);
00346 
00347             if
00348             (
00349                 MPI_Send
00350                 (
00351                     &Value,
00352                     1,
00353                     MPI_SCALAR,
00354                     Pstream::procID(parentId),
00355                     Pstream::msgType(),
00356                     MPI_COMM_WORLD
00357                 )
00358             )
00359             {
00360                 FatalErrorIn
00361                 (
00362                     "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
00363                 )   << "MPI_Send failed"
00364                     << Foam::abort(FatalError);
00365             }
00366 
00367             if
00368             (
00369                 MPI_Recv
00370                 (
00371                     &Value,
00372                     1,
00373                     MPI_SCALAR,
00374                     Pstream::procID(parentId),
00375                     Pstream::msgType(),
00376                     MPI_COMM_WORLD,
00377                     MPI_STATUS_IGNORE
00378                 )
00379             )
00380             {
00381                 FatalErrorIn
00382                 (
00383                     "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
00384                 )   << "MPI_Recv failed"
00385                     << Foam::abort(FatalError);
00386             }
00387         }
00388 
00389 
00390         //
00391         // distribute to my children
00392         //
00393         level--;
00394         thisLevelOffset >>= 1;
00395         childLevelOffset = thisLevelOffset/2;
00396 
00397         while (level > 0)
00398         {
00399             childProcId = myProcNo + childLevelOffset;
00400 
00401             if (childProcId < nProcs)
00402             {
00403                 if
00404                 (
00405                     MPI_Send
00406                     (
00407                         &Value,
00408                         1,
00409                         MPI_SCALAR,
00410                         Pstream::procID(childProcId),
00411                         Pstream::msgType(),
00412                         MPI_COMM_WORLD
00413                     )
00414                 )
00415                 {
00416                     FatalErrorIn
00417                     (
00418                         "reduce(scalar& Value, const sumOp<scalar>& sumOp)"
00419                     )   << "MPI_Send failed"
00420                         << Foam::abort(FatalError);
00421                 }
00422             }
00423 
00424             level--;
00425             thisLevelOffset >>= 1;
00426             childLevelOffset = thisLevelOffset/2;
00427         }
00428         */
00429     }
00430 }
00431 
00432 
00433 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
00434 
00435 // ************************ vim: set sw=4 sts=4 et: ************************ //
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines