// Discussion: // // This script is intended as a simple test for the MPI version of // FreeFem++. It implements the Schwarz method in parellel, using // just two processes. // // Location: // // http://people.sc.fsu.edu/~jburkardt/freefem++/schwarz_mpi/schwarz_mpi.edp // // Licensing: // // This code is distributed under the GNU LGPL license. // // Modified: // // 03 February 2016 // // Author: // // Frederic Hecht // // Reference: // // Frederic Hecht, // Freefem++, // Third Edition, version 3.22 // cout << "\n"; cout << "SCHWARZ_MPI\n"; cout << " Schwarz method in parallel, using two processes.\n"; if ( mpisize != 2 ) { cout << "\n"; cout << "SCHWARZ_MPI - Fatal error!\n"; cout << " Number of processors is not 2!\n"; exit ( 1 ); } verbosity=3; // // Region labels. // int interior = 2; int exterior = 1; // // Define the border. // border a ( t = 1, 2 ) { x = t; y = 0; label = exterior; }; border b ( t = 0, 1 ) { x = 2; y = t; label = exterior; }; border c ( t = 2, 0 ) { x = t; y = 1; label = exterior; }; border d ( t = 1, 0 ) { x = 1 - t; y = t; label = interior; }; border e ( t = 0, pi/2 ) { x = cos ( t ); y = sin(t); label = interior; }; border e1 ( t = pi/2, 2*pi ) { x = cos ( t ); y = sin(t); label = exterior; }; // // Create a mesh Th. // Process 0 meshes a+b+c+d. // Process 1 meshes e+e1. // int n = 4; mesh[int] Th(mpisize); if ( mpirank == 0 ) { Th[0] = buildmesh ( a(5*n) + b(5*n) + c(10*n) + d(5*n) ); } else { Th[1] = buildmesh ( e(5*n) + e1(25*n) ); } // // Broadcast each mesh to all other processes. // broadcast ( processor(0), Th[0] ); broadcast ( processor(1), Th[1] ); // // Process 0 plots the mesh. // if ( mpirank == 0 ) { plot ( Th[0], Th[1], ps = "schwarz_mpi_mesh.eps" ); } // // Define the finite element spaces on this process and "the other" process. // fespace Vh ( Th[ mpirank], P1 ); fespace Vhother ( Th[1-mpirank], P1 ); Vh u=0.0; Vh v; Vhother U=0.0; int i = 0; problem pb ( u, v, init = i, solver = Cholesky ) = int2d ( Th[mpirank] ) ( dx(u) * dx(v) + dy(u) * dy(v) ) + int2d ( Th[mpirank] ) ( -v ) + on ( interior, u = U ) + on ( exterior, u = 0.0 ); for ( i = 0; i < 20; i++ ) { cout << mpirank << " loop " << i << endl; pb; // // Send the contents of u to the other process. // Receive in U the solution computed in the other process. // processor(1-mpirank) << u[]; processor(1-mpirank) >> U[]; real err0; real err1; err0 = int1d ( Th[mpirank], interior ) ( square ( U - u ) ); // // send err0; // receive err1. // processor(1-mpirank) << err0; processor(1-mpirank) >> err1; real err = sqrt ( err0 + err1 ); cout << " err = " << err << " err0 = " << err0 << " err1 = " << err1 << endl; if ( err < 1.0e-03 ) { break; } }; // // Process 0 plots the solution. // if ( mpirank == 0 ) { plot ( u, U, ps = "schwarz_mpi_u.eps" ); } // // Terminate. // cout << "\n"; cout << "SCHWARZ_MPI:\n"; cout << " Normal end of execution.\n";