Actual source code: lmesolve.c
1: /*
2: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3: SLEPc - Scalable Library for Eigenvalue Problem Computations
4: Copyright (c) 2002-, Universitat Politecnica de Valencia, Spain
6: This file is part of SLEPc.
7: SLEPc is distributed under a 2-clause BSD license (see LICENSE).
8: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
9: */
10: /*
11: LME routines related to the solution process
12: */
14: #include <slepc/private/lmeimpl.h>
15: #include <slepcblaslapack.h>
17: /*@
18: LMESolve - Solves the linear matrix equation.
20: Collective
22: Input Parameter:
23: . lme - the linear matrix equation solver context
25: Options Database Keys:
26: + -lme_view - print information about the solver once the solve is complete
27: . -lme_view_pre - print information about the solver before the solve starts
28: . -lme_view_mat_a - view the A matrix
29: . -lme_view_mat_b - view the B matrix
30: . -lme_view_mat_d - view the D matrix
31: . -lme_view_mat_e - view the E matrix
32: . -lme_view_rhs - view the right hand side of the equation
33: . -lme_view_solution - view the computed solution
34: - -lme_converged_reason - print reason for convergence/divergence, and number of iterations
36: Notes:
37: The matrix coefficients are specified with `LMESetCoefficients()`. The right-hand
38: side is specified with `LMESetRHS()`. The placeholder for the solution is specified
39: with `LMESetSolution()`.
41: All the command-line options listed above admit an optional argument specifying
42: the viewer type and options. For instance, use `-lme_view_mat_a binary:amatrix.bin`
43: to save the $A$ matrix to a binary file.
45: Level: beginner
47: .seealso: [](ch:lme), `LMECreate()`, `LMESetUp()`, `LMEDestroy()`, `LMESetTolerances()`, `LMESetCoefficients()`, `LMESetRHS()`, `LMESetSolution()`
48: @*/
49: PetscErrorCode LMESolve(LME lme)
50: {
51: PetscFunctionBegin;
54: /* call setup */
55: PetscCall(LMESetUp(lme));
56: lme->its = 0;
57: lme->errest = 0.0;
59: PetscCall(LMEViewFromOptions(lme,NULL,"-lme_view_pre"));
61: /* call solver */
62: PetscCheck(lme->ops->solve[lme->problem_type],PetscObjectComm((PetscObject)lme),PETSC_ERR_SUP,"The specified solver does not support equation type %s",LMEProblemTypes[lme->problem_type]);
63: PetscCall(PetscLogEventBegin(LME_Solve,lme,0,0,0));
64: PetscUseTypeMethod(lme,solve[lme->problem_type]);
65: PetscCall(PetscLogEventEnd(LME_Solve,lme,0,0,0));
67: PetscCheck(lme->reason,PetscObjectComm((PetscObject)lme),PETSC_ERR_PLIB,"Internal error, solver returned without setting converged reason");
69: PetscCheck(!lme->errorifnotconverged || lme->reason>=0,PetscObjectComm((PetscObject)lme),PETSC_ERR_NOT_CONVERGED,"LMESolve has not converged");
71: /* various viewers */
72: PetscCall(LMEViewFromOptions(lme,NULL,"-lme_view"));
73: PetscCall(LMEConvergedReasonViewFromOptions(lme));
74: PetscCall(MatViewFromOptions(lme->A,(PetscObject)lme,"-lme_view_mat_a"));
75: if (lme->B) PetscCall(MatViewFromOptions(lme->B,(PetscObject)lme,"-lme_view_mat_b"));
76: if (lme->D) PetscCall(MatViewFromOptions(lme->D,(PetscObject)lme,"-lme_view_mat_d"));
77: if (lme->E) PetscCall(MatViewFromOptions(lme->E,(PetscObject)lme,"-lme_view_mat_e"));
78: PetscCall(MatViewFromOptions(lme->C,(PetscObject)lme,"-lme_view_rhs"));
79: PetscCall(MatViewFromOptions(lme->X,(PetscObject)lme,"-lme_view_solution"));
80: PetscFunctionReturn(PETSC_SUCCESS);
81: }
83: /*@
84: LMEGetIterationNumber - Gets the current iteration number. If the
85: call to `LMESolve()` is complete, then it returns the number of iterations
86: carried out by the solution method.
88: Not Collective
90: Input Parameter:
91: . lme - the linear matrix equation solver context
93: Output Parameter:
94: . its - number of iterations
96: Note:
97: During the $i$-th iteration this call returns $i-1$. If `LMESolve()` is
98: complete, then parameter `its` contains either the iteration number at
99: which convergence was successfully reached, or failure was detected.
100: Call `LMEGetConvergedReason()` to determine if the solver converged or
101: failed and why.
103: Level: intermediate
105: .seealso: [](ch:lme), `LMEGetConvergedReason()`, `LMESetTolerances()`
106: @*/
107: PetscErrorCode LMEGetIterationNumber(LME lme,PetscInt *its)
108: {
109: PetscFunctionBegin;
111: PetscAssertPointer(its,2);
112: *its = lme->its;
113: PetscFunctionReturn(PETSC_SUCCESS);
114: }
116: /*@
117: LMEGetConvergedReason - Gets the reason why the `LMESolve()` iteration was
118: stopped.
120: Not Collective
122: Input Parameter:
123: . lme - the linear matrix equation solver context
125: Output Parameter:
126: . reason - negative value indicates diverged, positive value converged, see
127: `LMEConvergedReason` for the possible values
129: Options Database Key:
130: . -lme_converged_reason - print reason for convergence/divergence, and number of iterations
132: Note:
133: If this routine is called before or doing the `LMESolve()` the value of
134: `LME_CONVERGED_ITERATING` is returned.
136: Level: intermediate
138: .seealso: [](ch:lme), `LMESetTolerances()`, `LMESolve()`, `LMEConvergedReason`, `LMESetErrorIfNotConverged()`
139: @*/
140: PetscErrorCode LMEGetConvergedReason(LME lme,LMEConvergedReason *reason)
141: {
142: PetscFunctionBegin;
144: PetscAssertPointer(reason,2);
145: *reason = lme->reason;
146: PetscFunctionReturn(PETSC_SUCCESS);
147: }
149: /*@
150: LMEGetErrorEstimate - Returns the error estimate obtained during the solve.
152: Not Collective
154: Input Parameter:
155: . lme - the linear matrix equation solver context
157: Output Parameter:
158: . errest - the error estimate
160: Note:
161: This is the error estimated internally by the solver. The actual
162: error bound can be computed with `LMEComputeError()`. Note that some
163: solvers may not be able to provide an error estimate.
165: Level: advanced
167: .seealso: [](ch:lme), `LMEComputeError()`
168: @*/
169: PetscErrorCode LMEGetErrorEstimate(LME lme,PetscReal *errest)
170: {
171: PetscFunctionBegin;
173: PetscAssertPointer(errest,2);
174: *errest = lme->errest;
175: PetscFunctionReturn(PETSC_SUCCESS);
176: }
178: /*
179: LMEComputeResidualNorm_Lyapunov - Computes the Frobenius norm of the residual matrix
180: associated with the Lyapunov equation.
181: */
182: static PetscErrorCode LMEComputeResidualNorm_Lyapunov(LME lme,PetscReal *norm)
183: {
184: PetscInt j,n,N,k,l,lda,ldb;
185: PetscBLASInt n_,N_,k_,l_,lda_,ldb_;
186: PetscScalar *Rarray,alpha=1.0,beta=0.0;
187: const PetscScalar *A,*B;
188: BV W,AX,X1,C1;
189: Mat R,X1m,C1m;
190: Vec v,w;
191: VecScatter vscat;
193: PetscFunctionBegin;
194: PetscCall(MatLRCGetMats(lme->C,NULL,&C1m,NULL,NULL));
195: PetscCall(MatLRCGetMats(lme->X,NULL,&X1m,NULL,NULL));
196: PetscCall(BVCreateFromMat(C1m,&C1));
197: PetscCall(BVSetFromOptions(C1));
198: PetscCall(BVCreateFromMat(X1m,&X1));
199: PetscCall(BVSetFromOptions(X1));
200: PetscCall(BVGetSizes(X1,&n,&N,&k));
201: PetscCall(BVGetSizes(C1,NULL,NULL,&l));
202: PetscCall(PetscBLASIntCast(n,&n_));
203: PetscCall(PetscBLASIntCast(N,&N_));
204: PetscCall(PetscBLASIntCast(k,&k_));
205: PetscCall(PetscBLASIntCast(l,&l_));
207: /* create W to store a redundant copy of a BV in each process */
208: PetscCall(BVCreate(PETSC_COMM_SELF,&W));
209: PetscCall(BVSetSizes(W,N,N,k));
210: PetscCall(BVSetFromOptions(W));
211: PetscCall(BVGetColumn(X1,0,&v));
212: PetscCall(VecScatterCreateToAll(v,&vscat,NULL));
213: PetscCall(BVRestoreColumn(X1,0,&v));
215: /* create AX to hold the product A*X1 */
216: PetscCall(BVDuplicate(X1,&AX));
217: PetscCall(BVMatMult(X1,lme->A,AX));
219: /* create dense matrix to hold the residual R=C1*C1'+AX*X1'+X1*AX' */
220: PetscCall(MatCreateDense(PetscObjectComm((PetscObject)lme),n,n,N,N,NULL,&R));
222: /* R=C1*C1' */
223: PetscCall(MatDenseGetArrayWrite(R,&Rarray));
224: for (j=0;j<l;j++) {
225: PetscCall(BVGetColumn(C1,j,&v));
226: PetscCall(BVGetColumn(W,j,&w));
227: PetscCall(VecScatterBegin(vscat,v,w,INSERT_VALUES,SCATTER_FORWARD));
228: PetscCall(VecScatterEnd(vscat,v,w,INSERT_VALUES,SCATTER_FORWARD));
229: PetscCall(BVRestoreColumn(C1,j,&v));
230: PetscCall(BVRestoreColumn(W,j,&w));
231: }
232: if (n) {
233: PetscCall(BVGetArrayRead(C1,&A));
234: PetscCall(BVGetLeadingDimension(C1,&lda));
235: PetscCall(PetscBLASIntCast(lda,&lda_));
236: PetscCall(BVGetArrayRead(W,&B));
237: PetscCall(BVGetLeadingDimension(W,&ldb));
238: PetscCall(PetscBLASIntCast(ldb,&ldb_));
239: PetscCallBLAS("BLASgemm",BLASgemm_("N","C",&n_,&N_,&l_,&alpha,(PetscScalar*)A,&lda_,(PetscScalar*)B,&ldb_,&beta,Rarray,&n_));
240: PetscCall(BVRestoreArrayRead(C1,&A));
241: PetscCall(BVRestoreArrayRead(W,&B));
242: }
243: beta = 1.0;
245: /* R+=AX*X1' */
246: for (j=0;j<k;j++) {
247: PetscCall(BVGetColumn(X1,j,&v));
248: PetscCall(BVGetColumn(W,j,&w));
249: PetscCall(VecScatterBegin(vscat,v,w,INSERT_VALUES,SCATTER_FORWARD));
250: PetscCall(VecScatterEnd(vscat,v,w,INSERT_VALUES,SCATTER_FORWARD));
251: PetscCall(BVRestoreColumn(X1,j,&v));
252: PetscCall(BVRestoreColumn(W,j,&w));
253: }
254: if (n) {
255: PetscCall(BVGetArrayRead(AX,&A));
256: PetscCall(BVGetLeadingDimension(AX,&lda));
257: PetscCall(PetscBLASIntCast(lda,&lda_));
258: PetscCall(BVGetArrayRead(W,&B));
259: PetscCallBLAS("BLASgemm",BLASgemm_("N","C",&n_,&N_,&k_,&alpha,(PetscScalar*)A,&lda_,(PetscScalar*)B,&ldb_,&beta,Rarray,&n_));
260: PetscCall(BVRestoreArrayRead(AX,&A));
261: PetscCall(BVRestoreArrayRead(W,&B));
262: }
264: /* R+=X1*AX' */
265: for (j=0;j<k;j++) {
266: PetscCall(BVGetColumn(AX,j,&v));
267: PetscCall(BVGetColumn(W,j,&w));
268: PetscCall(VecScatterBegin(vscat,v,w,INSERT_VALUES,SCATTER_FORWARD));
269: PetscCall(VecScatterEnd(vscat,v,w,INSERT_VALUES,SCATTER_FORWARD));
270: PetscCall(BVRestoreColumn(AX,j,&v));
271: PetscCall(BVRestoreColumn(W,j,&w));
272: }
273: if (n) {
274: PetscCall(BVGetArrayRead(X1,&A));
275: PetscCall(BVGetLeadingDimension(AX,&lda));
276: PetscCall(PetscBLASIntCast(lda,&lda_));
277: PetscCall(BVGetArrayRead(W,&B));
278: PetscCallBLAS("BLASgemm",BLASgemm_("N","C",&n_,&N_,&k_,&alpha,(PetscScalar*)A,&lda_,(PetscScalar*)B,&ldb_,&beta,Rarray,&n_));
279: PetscCall(BVRestoreArrayRead(X1,&A));
280: PetscCall(BVRestoreArrayRead(W,&B));
281: }
282: PetscCall(MatDenseRestoreArrayWrite(R,&Rarray));
284: /* compute ||R||_F */
285: PetscCall(MatNorm(R,NORM_FROBENIUS,norm));
287: PetscCall(BVDestroy(&W));
288: PetscCall(VecScatterDestroy(&vscat));
289: PetscCall(BVDestroy(&AX));
290: PetscCall(MatDestroy(&R));
291: PetscCall(BVDestroy(&C1));
292: PetscCall(BVDestroy(&X1));
293: PetscFunctionReturn(PETSC_SUCCESS);
294: }
296: /*@
297: LMEComputeError - Computes the error (based on the residual norm) associated
298: with the last equation solved.
300: Collective
302: Input Parameter:
303: . lme - the linear matrix equation solver context
305: Output Parameter:
306: . error - the error
308: Notes:
309: This function is not scalable (in terms of memory or parallel communication),
310: so it should not be called except in the case of small problem size. For
311: large equations, use `LMEGetErrorEstimate()`.
313: Level: advanced
315: .seealso: [](ch:lme), `LMESolve()`, `LMEGetErrorEstimate()`
316: @*/
317: PetscErrorCode LMEComputeError(LME lme,PetscReal *error)
318: {
319: PetscFunctionBegin;
321: PetscAssertPointer(error,2);
323: PetscCall(PetscLogEventBegin(LME_ComputeError,lme,0,0,0));
324: /* compute residual norm */
325: switch (lme->problem_type) {
326: case LME_LYAPUNOV:
327: PetscCall(LMEComputeResidualNorm_Lyapunov(lme,error));
328: break;
329: default:
330: SETERRQ(PetscObjectComm((PetscObject)lme),PETSC_ERR_SUP,"Not implemented for equation type %s",LMEProblemTypes[lme->problem_type]);
331: }
333: /* compute error */
334: /* currently we only support absolute error, so just return the norm */
335: PetscCall(PetscLogEventEnd(LME_ComputeError,lme,0,0,0));
336: PetscFunctionReturn(PETSC_SUCCESS);
337: }