Commit bee5df0a authored by Vasily's avatar Vasily
Browse files

2D heat equation - MPI

parent b875d139
No related merge requests found
Showing with 282 additions and 3 deletions
+282 -3
#include "array_types.hpp"
#include "../vectors_and_matrices/array_types.hpp"
#include "heat_equation.hpp"
#include <iostream>
#include <fstream>
......@@ -32,7 +32,6 @@ void print_log_mpi(std::ostream& log_file, intptr_t step, double step_time,
if (myrank == 0)
{
log_file << "Timestep " << step << ". t = " << step_time << "\n";
log_file << "0.0 " << 0.5 * (prob.us(0) + prob.us(1)) << "\n";
int ix = 1;
for (int i = 1; i <= nx; i++)
{
......@@ -50,6 +49,7 @@ void print_log_mpi(std::ostream& log_file, intptr_t step, double step_time,
++ix;
}
}
log_file << "\n\n\n";
} else
{
MPI_Send(&nx, 1, MPI_INT, 0, 1, comm);
......@@ -213,7 +213,6 @@ int main(int argc, char* argv[]) {
// Гарантируем, что на процессе 0 размер массива не меньше, чем на остальных
int nx_each = nx / comm_size + (myrank < (nx % comm_size));
cout << myrank << " " << nx_each << endl;
vec<double> us(nx_each+2), us_next(nx_each+2);
heat_eq_problem prob = {us, us_next, dx, heat_cond, dirichlet_bc(1.0), dirichlet_bc(0.5)};
......
#include "../vectors_and_matrices/array_types.hpp"
#include "heat_equation.hpp"
#include <iostream>
#include <fstream>
#include <cstdint>
#include <cmath>
#include <string>
#include <omp.h>
#include "mpi.h"
using intptr_t = std::intptr_t;
using std::cin;
using std::cout;
using std::endl;
using std::floor;
void set_constant_u(matrix<double> us, double u0)
{
intptr_t nx = us.nrows() - 2, ny = us.ncols() - 2;
for (intptr_t ix = 1; ix <= nx; ix++)
{
for (intptr_t iy = 1; iy <= ny; iy++)
{
us(ix, iy) = u0;
}
}
}
void print_log_mpi(std::ostream& log_file, intptr_t step, double step_time,
heat_eq_problem prob, int comm_size, int myrank, MPI_Comm comm)
{
double hx = prob.step_x, hy = prob.step_y;
intptr_t nx = prob.us.nrows() - 2, ny = prob.us.ncols() - 2;
MPI_Status status;
if (myrank == 0)
{
int ix_all = 1;
log_file << "Timestep " << step << ". t = " << step_time << "\n";
for (intptr_t ix = 1; ix <= nx; ix++)
{
for (intptr_t iy = 1; iy <= ny; iy++)
{
log_file << (ix - 0.5) * hx << " " << (iy - 0.5) * hy << " "
<< prob.us(ix, iy) << "\n";
}
++ix_all;
}
for (int other = 1; other < comm_size; other++)
{
// получить данные с другого процесса
MPI_Recv(&nx, 1, MPI_INT, other, 1, comm, &status);
MPI_Recv(prob.us_next.raw_ptr()+prob.us.ncols(),
nx * prob.us.ncols(), MPI_DOUBLE, other, 2, comm, &status);
for (intptr_t ix = 1; ix <= nx; ix++)
{
for (intptr_t iy = 1; iy <= ny; iy++)
{
log_file << (ix_all - 0.5) * hx << " " << (iy - 0.5) * hy << " "
<< prob.us_next(ix, iy) << "\n";
}
++ix_all;
}
}
log_file << "\n\n" << endl;
} else
{
MPI_Send(&nx, 1, MPI_INT, 0, 1, comm);
MPI_Send(prob.us.raw_ptr()+prob.us.ncols(),
nx * prob.us.ncols(), MPI_DOUBLE, 0, 2, comm);
}
}
void apply_bc_mpi(heat_eq_problem& prob, int comm_size, int myrank, MPI_Comm comm)
{
// Граничное условие имеет вид
// a u + b du/dx = z
// Аппроксимируем как
// a (u_0 + u_1) / 2 + b (u_1 - u_0) / h = z
matrix<double> us = prob.us;
vec<boundary_cond> bc = prob.bc;
intptr_t nx = us.nrows() - 2, ny = us.ncols() - 2;
double hx = prob.step_x, hy = prob.step_y;
MPI_Status status;
MPI_Request req_send_l, req_recv_l, req_send_r, req_recv_r;
double* us_ptr = us.raw_ptr();
int left_neigh = (myrank + comm_size - 1) % comm_size;
int right_neigh = (myrank+1) % comm_size;
// отправляем вторую строку соседу слева
MPI_Isend(us_ptr+us.ncols(), us.ncols(), MPI_DOUBLE, left_neigh, 0, comm, &req_send_l);
// отправляем предпоследнюю строку соседу справа
MPI_Isend(us_ptr+nx*us.ncols(), us.ncols(), MPI_DOUBLE, right_neigh, 1, comm, &req_send_r);
// принимаем от соседа слева значение
MPI_Irecv(us_ptr, us.ncols(), MPI_DOUBLE, left_neigh, 1, comm, &req_recv_l);
// принимаем от соседа справа значение
MPI_Irecv(us_ptr+(nx+1)*us.ncols(), us.ncols(), MPI_DOUBLE, right_neigh, 0, comm, &req_recv_r);
// Пока передаются данные, можно применить ГУ на границах y=0, y=Ly
// y = 0
for (intptr_t ix = 1; ix <= nx; ix++)
{
boundary_cond bci = bc(ix - 1);
double a = bci.coeff_u, b = bci.coeff_du, z = bci.rhp_val;
us(ix, 0) = (z - (a / 2 + b / hy) * us(ix, 1)) / (a / 2 - b / hy);
}
// y = Ly
for (intptr_t ix = 1; ix <= nx; ix++)
{
boundary_cond bci = bc(nx + ix - 1);
double a = bci.coeff_u, b = bci.coeff_du, z = bci.rhp_val;
us(ix, ny+1) = (z - (a / 2 - b / hy) * us(ix, ny)) / (a / 2 + b / hy);
}
MPI_Wait(&req_recv_l, &status);
MPI_Wait(&req_recv_r, &status);
if (myrank == 0)
{
// x = 0
for (intptr_t iy = 1; iy <= ny; iy++)
{
boundary_cond bci = bc(2 * nx + iy - 1);
double a = bci.coeff_u, b = bci.coeff_du, z = bci.rhp_val;
us(0, iy) = (z - (a / 2 + b / hx) * us(1, iy)) / (a / 2 - b / hx);
}
} else if (myrank == comm_size - 1)
{
// x = Lx
for (intptr_t iy = 1; iy <= ny; iy++)
{
boundary_cond bci = bc(2 * nx + ny + iy - 1);
double a = bci.coeff_u, b = bci.coeff_du, z = bci.rhp_val;
us(nx+1, iy) = (z - (a / 2 - b / hx) * us(nx, iy)) / (a / 2 + b / hx);
}
}
}
void explicit_step(heat_eq_problem& prob, double tau,
int comm_size, int myrank, MPI_Comm comm)
{
matrix<double> us = prob.us;
matrix<double> us_next = prob.us_next;
intptr_t nx = us.nrows() - 2, ny = us.ncols() - 2;
double hx = prob.step_x, hy = prob.step_y;
double courant_x = tau * prob.conductivity / (hx * hx),
courant_y = tau * prob.conductivity / (hy * hy);
apply_bc_mpi(prob, comm_size, myrank, comm);
for (intptr_t ix=1; ix<=nx; ix++)
{
for (intptr_t iy=1; iy<=ny; iy++) {
us_next(ix, iy) = us(ix, iy) +
courant_x * (-2 * us(ix, iy) + us(ix-1, iy) + us(ix+1, iy)) +
courant_y * (-2 * us(ix, iy) + us(ix, iy-1) + us(ix, iy+1));
}
}
// prob.us, prob.us_next = prob.us_next, prob.us
matrix<double> tmp = us;
prob.us = us_next;
prob.us_next = tmp;
}
void explicit_solver(heat_eq_problem& prob,
double Lx, double Ly, double tau, intptr_t ntimesteps,
intptr_t log_freq, std::ostream& log_file,
int comm_size, int myrank, MPI_Comm comm)
{
// входные параметры:
// prob - начальная конфигурация
// L - длина отрезка, на котором решается уравнение
// tau - шаг по времени
// ntimesteps - сколько шагов по времени сделать
// log_freq - раз в столько шагов выводить информацию
// log_file - файловый дескриптор для вывода информации
intptr_t nx = prob.us.nrows() - 2, ny = prob.us.ncols() - 2;
double hx = Lx / nx, hy = Ly / ny;
prob.step_x = hx; prob.step_y = hy;
double courant = 2.0 * prob.conductivity * tau * (1 / (hx * hx) + 1 / (hy * hy));
if ((courant > 1.0) && (myrank == 0)) {
std::cerr << "Warning: Courant number > 1\n";
}
apply_bc_mpi(prob, comm_size, myrank, comm);
print_log_mpi(log_file, 0, 0.0, prob, comm_size, myrank, comm);
// эволюция по времени
for (intptr_t step = 1; step <= ntimesteps; step++) {
explicit_step(prob, tau, comm_size, myrank, comm);
if (step % log_freq == 0) {
print_log_mpi(log_file, step, step * tau, prob, comm_size, myrank, comm);
}
}
}
int main(int argc, char* argv[]) {
std::string npoints_str, dx_str, dt_str, tmax_str;
double heat_cond = 1.0;
int nx;
double dx, dt, tmax;
MPI_Init(&argc, &argv);
int comm_size, myrank;
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
MPI_Comm_size(MPI_COMM_WORLD, &comm_size);
if (myrank == 0)
{
cout << "Enter:\nNPOINTS: n\nSTEP_X: h\nSTEP_T: dt\nT_MAX: T" << endl;
cin >> npoints_str;
if (npoints_str != "NPOINTS:") {
cout << "ERROR: Cannot find NPOINTS record\n";
return 1;
}
cin >> nx;
cin >> dx_str;
if (dx_str != "STEP_X:") {
cout << "ERROR: Cannot find STEP_X record\n";
return 1;
}
cin >> dx;
cin >> dt_str;
if (dt_str != "STEP_T:") {
cout << "ERROR: Cannot find STEP_T record\n";
return 1;
}
cin >> dt;
cin >> tmax_str;
if (tmax_str != "T_MAX:") {
cout << "ERROR: Cannot find T_MAX record\n";
return 1;
}
cin >> tmax;
}
MPI_Bcast(&nx, 1, MPI_INT, 0, MPI_COMM_WORLD);
MPI_Bcast(&dx, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&dt, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
MPI_Bcast(&tmax, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
int ny = nx;
intptr_t nsteps_t = floor(tmax / dt);
std::string log_name = "heat_eq_mpi_log.grid" + std::to_string(nx) + "x" + std::to_string(nx) +
"pts.dx" + std::to_string(dx) + ".dt" + std::to_string(dt) + ".txt";
std::ofstream log_file(log_name);
intptr_t log_freq = nsteps_t / 5;
if (log_freq == 0) {
log_freq = 1;
}
// Гарантируем, что на процессе 0 размер массива не меньше, чем на остальных
int nx_each = nx / comm_size + (myrank < (nx % comm_size));
matrix<double> us(nx_each+2, ny+2), us_next(nx_each+2, ny+2);
vec<boundary_cond> bc(2 * (nx_each + ny));
for (intptr_t i=0; i < bc.length(); i++) {
bc(i) = dirichlet_bc(0.5);
}
heat_eq_problem prob = {us, us_next, dx, dx, heat_cond, bc};
set_constant_u(prob.us, 1.0);
double t0 = omp_get_wtime();
explicit_solver(prob, nx * dx, nx * dx, dt, nsteps_t, log_freq, log_file, comm_size, myrank, MPI_COMM_WORLD);
double t1 = omp_get_wtime();
if (myrank == 0)
{
cout << "Execution time: " << t1 - t0 << "\n";
cout << "Steps per second: " << nsteps_t / (t1 - t0) << "\n";
cout << "Output written to " + log_name << std::endl;
}
MPI_Finalize();
return 0;
}
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment