Actual source code: test8.c
slepc-3.22.1 2024-10-28
1: /*
2: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3: SLEPc - Scalable Library for Eigenvalue Problem Computations
4: Copyright (c) 2002-, Universitat Politecnica de Valencia, Spain
6: This file is part of SLEPc.
7: SLEPc is distributed under a 2-clause BSD license (see LICENSE).
8: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
9: */
11: static char help[] = "Solves the same eigenproblem as in example ex2, but using a shell matrix. "
12: "The problem is a standard symmetric eigenproblem corresponding to the 2-D Laplacian operator.\n\n"
13: "The command line options are:\n"
14: " -n <n>, where <n> = number of grid subdivisions in both x and y dimensions.\n\n";
16: #include <slepceps.h>
17: #include <petscblaslapack.h>
19: /*
20: User-defined routines
21: */
22: PetscErrorCode MatMult_Laplacian2D(Mat A,Vec x,Vec y);
23: PetscErrorCode MatGetDiagonal_Laplacian2D(Mat A,Vec diag);
25: int main(int argc,char **argv)
26: {
27: Mat A; /* operator matrix */
28: EPS eps; /* eigenproblem solver context */
29: PetscReal tol=1000*PETSC_MACHINE_EPSILON;
30: PetscMPIInt size;
31: PetscInt N,n=10,nev;
33: PetscFunctionBeginUser;
34: PetscCall(SlepcInitialize(&argc,&argv,NULL,help));
35: PetscCallMPI(MPI_Comm_size(PETSC_COMM_WORLD,&size));
36: PetscCheck(size==1,PETSC_COMM_WORLD,PETSC_ERR_WRONG_MPI_SIZE,"This is a uniprocessor example only");
38: PetscCall(PetscOptionsGetInt(NULL,NULL,"-n",&n,NULL));
39: N = n*n;
40: PetscCall(PetscPrintf(PETSC_COMM_WORLD,"\n2-D Laplacian Eigenproblem (matrix-free version), N=%" PetscInt_FMT " (%" PetscInt_FMT "x%" PetscInt_FMT " grid)\n\n",N,n,n));
42: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
43: Compute the operator matrix that defines the eigensystem, Ax=kx
44: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
46: PetscCall(MatCreateShell(PETSC_COMM_WORLD,N,N,N,N,&n,&A));
47: PetscCall(MatShellSetOperation(A,MATOP_MULT,(void(*)(void))MatMult_Laplacian2D));
48: PetscCall(MatShellSetOperation(A,MATOP_MULT_TRANSPOSE,(void(*)(void))MatMult_Laplacian2D));
49: PetscCall(MatShellSetOperation(A,MATOP_GET_DIAGONAL,(void(*)(void))MatGetDiagonal_Laplacian2D));
51: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
52: Create the eigensolver and set various options
53: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
55: /*
56: Create eigensolver context
57: */
58: PetscCall(EPSCreate(PETSC_COMM_WORLD,&eps));
60: /*
61: Set operators. In this case, it is a standard eigenvalue problem
62: */
63: PetscCall(EPSSetOperators(eps,A,NULL));
64: PetscCall(EPSSetProblemType(eps,EPS_HEP));
65: PetscCall(EPSSetTolerances(eps,tol,PETSC_CURRENT));
67: /*
68: Set solver parameters at runtime
69: */
70: PetscCall(EPSSetFromOptions(eps));
72: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
73: Solve the eigensystem
74: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
76: PetscCall(EPSSolve(eps));
77: PetscCall(EPSGetDimensions(eps,&nev,NULL,NULL));
78: PetscCall(PetscPrintf(PETSC_COMM_WORLD," Number of requested eigenvalues: %" PetscInt_FMT "\n",nev));
80: /* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
81: Display solution and clean up
82: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
84: PetscCall(EPSErrorView(eps,EPS_ERROR_RELATIVE,NULL));
85: PetscCall(EPSDestroy(&eps));
86: PetscCall(MatDestroy(&A));
87: PetscCall(SlepcFinalize());
88: return 0;
89: }
91: /*
92: Compute the matrix vector multiplication y<---T*x where T is a nx by nx
93: tridiagonal matrix with DD on the diagonal, DL on the subdiagonal, and
94: DU on the superdiagonal.
95: */
96: static void tv(int nx,const PetscScalar *x,PetscScalar *y)
97: {
98: PetscScalar dd,dl,du;
99: int j;
101: dd = 4.0;
102: dl = -1.0;
103: du = -1.0;
105: y[0] = dd*x[0] + du*x[1];
106: for (j=1;j<nx-1;j++)
107: y[j] = dl*x[j-1] + dd*x[j] + du*x[j+1];
108: y[nx-1] = dl*x[nx-2] + dd*x[nx-1];
109: }
111: /*
112: Matrix-vector product subroutine for the 2D Laplacian.
114: The matrix used is the 2 dimensional discrete Laplacian on unit square with
115: zero Dirichlet boundary condition.
117: Computes y <-- A*x, where A is the block tridiagonal matrix
119: | T -I |
120: |-I T -I |
121: A = | -I T |
122: | ... -I|
123: | -I T|
125: The subroutine TV is called to compute y<--T*x.
126: */
127: PetscErrorCode MatMult_Laplacian2D(Mat A,Vec x,Vec y)
128: {
129: void *ctx;
130: int nx,lo,i,j;
131: const PetscScalar *px;
132: PetscScalar *py;
134: PetscFunctionBeginUser;
135: PetscCall(MatShellGetContext(A,&ctx));
136: nx = *(int*)ctx;
137: PetscCall(VecGetArrayRead(x,&px));
138: PetscCall(VecGetArray(y,&py));
140: tv(nx,&px[0],&py[0]);
141: for (i=0;i<nx;i++) py[i] -= px[nx+i];
143: for (j=2;j<nx;j++) {
144: lo = (j-1)*nx;
145: tv(nx,&px[lo],&py[lo]);
146: for (i=0;i<nx;i++) py[lo+i] -= px[lo-nx+i] + px[lo+nx+i];
147: }
149: lo = (nx-1)*nx;
150: tv(nx,&px[lo],&py[lo]);
151: for (i=0;i<nx;i++) py[lo+i] -= px[lo-nx+i];
153: PetscCall(VecRestoreArrayRead(x,&px));
154: PetscCall(VecRestoreArray(y,&py));
155: PetscFunctionReturn(PETSC_SUCCESS);
156: }
158: PetscErrorCode MatGetDiagonal_Laplacian2D(Mat A,Vec diag)
159: {
160: PetscFunctionBeginUser;
161: PetscCall(VecSet(diag,4.0));
162: PetscFunctionReturn(PETSC_SUCCESS);
163: }
165: /*TEST
167: testset:
168: args: -n 20 -eps_nev 4 -eps_ncv 11 -eps_max_it 40000
169: requires: !single
170: output_file: output/test8_1.out
171: test:
172: suffix: 1
173: args: -eps_type {{power subspace arnoldi}}
174: test:
175: suffix: 1_lanczos
176: args: -eps_type lanczos -eps_lanczos_reorthog local
177: test:
178: suffix: 1_lapack
179: args: -eps_type lapack
180: timeoutfactor: 2
181: test:
182: suffix: 1_elemental
183: args: -eps_type elemental
184: requires: elemental
185: test:
186: suffix: 1_krylovschur_vecs
187: args: -bv_type vecs -bv_orthog_refine always -eps_ncv 10 -vec_mdot_use_gemv 0
188: test:
189: suffix: 1_jd
190: args: -eps_type jd -eps_jd_blocksize 3
191: test:
192: suffix: 1_gd
193: args: -eps_type gd -eps_gd_blocksize 3 -eps_tol 1e-8
194: test:
195: suffix: 1_gd2
196: args: -eps_type gd -eps_gd_double_expansion
197: test:
198: suffix: 1_primme
199: args: -eps_type primme -eps_conv_abs -eps_largest_magnitude
200: requires: primme
202: testset:
203: args: -eps_nev 4 -eps_smallest_real -eps_max_it 600
204: output_file: output/test8_2.out
205: test:
206: suffix: 2
207: args: -eps_type {{rqcg lobpcg}}
208: test:
209: suffix: 2_lanczos
210: args: -eps_type lanczos -eps_lanczos_reorthog local
211: test:
212: suffix: 2_arpack
213: args: -eps_type arpack -eps_ncv 6
214: requires: arpack !single
215: test:
216: suffix: 2_primme
217: args: -eps_type primme -eps_conv_abs -eps_primme_method lobpcg_orthobasisw -eps_ncv 24
218: requires: primme
220: testset:
221: args: -eps_nev 12 -eps_mpd 9 -eps_smallest_real -eps_max_it 1000
222: output_file: output/test8_3.out
223: test:
224: suffix: 3_rqcg
225: args: -eps_type rqcg
226: test:
227: suffix: 3_lanczos
228: args: -eps_type lanczos -eps_lanczos_reorthog local
229: test:
230: suffix: 3_lobpcg
231: args: -eps_type lobpcg -eps_lobpcg_blocksize 3 -eps_lobpcg_locking 0 -st_ksp_type preonly -st_pc_type jacobi
232: requires: !__float128
233: test:
234: suffix: 3_lobpcg_quad
235: args: -eps_type lobpcg -eps_lobpcg_blocksize 3 -eps_lobpcg_locking 0 -st_ksp_type preonly -st_pc_type jacobi -eps_tol 1e-25
236: requires: __float128
237: TEST*/