Actual source code: lmesolve.c
1: /*
2: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3: SLEPc - Scalable Library for Eigenvalue Problem Computations
4: Copyright (c) 2002-, Universitat Politecnica de Valencia, Spain
6: This file is part of SLEPc.
7: SLEPc is distributed under a 2-clause BSD license (see LICENSE).
8: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
9: */
10: /*
11: LME routines related to the solution process
12: */
14: #include <slepc/private/lmeimpl.h>
15: #include <slepcblaslapack.h>
17: /*@
18: LMESolve - Solves the linear matrix equation.
20: Collective
22: Input Parameter:
23: . lme - the linear matrix equation solver context
25: Options Database Keys:
26: + -lme_view - print information about the solver once the solve is complete
27: . -lme_view_pre - print information about the solver before the solve starts
28: . -lme_view_mat_a - view the A matrix
29: . -lme_view_mat_b - view the B matrix
30: . -lme_view_mat_d - view the D matrix
31: . -lme_view_mat_e - view the E matrix
32: . -lme_view_rhs - view the right hand side of the equation
33: . -lme_view_solution - view the computed solution
34: - -lme_converged_reason - print reason for convergence/divergence, and number of iterations
36: Notes:
37: The matrix coefficients are specified with `LMESetCoefficients()`. The right-hand
38: side is specified with `LMESetRHS()`. The placeholder for the solution is specified
39: with `LMESetSolution()`.
41: All the command-line options listed above admit an optional argument specifying
42: the viewer type and options. For instance, use `-lme_view_mat_a binary:amatrix.bin`
43: to save the $A$ matrix to a binary file.
44: See `PetscObjectViewFromOptions()` for more details.
46: Level: beginner
48: .seealso: [](ch:lme), `LMECreate()`, `LMESetUp()`, `LMEDestroy()`, `LMESetTolerances()`, `LMESetCoefficients()`, `LMESetRHS()`, `LMESetSolution()`
49: @*/
50: PetscErrorCode LMESolve(LME lme)
51: {
52: PetscFunctionBegin;
55: /* call setup */
56: PetscCall(LMESetUp(lme));
57: lme->its = 0;
58: lme->errest = 0.0;
60: PetscCall(LMEViewFromOptions(lme,NULL,"-lme_view_pre"));
62: /* call solver */
63: PetscCheck(lme->ops->solve[lme->problem_type],PetscObjectComm((PetscObject)lme),PETSC_ERR_SUP,"The specified solver does not support equation type %s",LMEProblemTypes[lme->problem_type]);
64: PetscCall(PetscLogEventBegin(LME_Solve,lme,0,0,0));
65: PetscUseTypeMethod(lme,solve[lme->problem_type]);
66: PetscCall(PetscLogEventEnd(LME_Solve,lme,0,0,0));
68: PetscCheck(lme->reason,PetscObjectComm((PetscObject)lme),PETSC_ERR_PLIB,"Internal error, solver returned without setting converged reason");
70: PetscCheck(!lme->errorifnotconverged || lme->reason>=0,PetscObjectComm((PetscObject)lme),PETSC_ERR_NOT_CONVERGED,"LMESolve has not converged");
72: /* various viewers */
73: PetscCall(LMEViewFromOptions(lme,NULL,"-lme_view"));
74: PetscCall(LMEConvergedReasonViewFromOptions(lme));
75: PetscCall(MatViewFromOptions(lme->A,(PetscObject)lme,"-lme_view_mat_a"));
76: if (lme->B) PetscCall(MatViewFromOptions(lme->B,(PetscObject)lme,"-lme_view_mat_b"));
77: if (lme->D) PetscCall(MatViewFromOptions(lme->D,(PetscObject)lme,"-lme_view_mat_d"));
78: if (lme->E) PetscCall(MatViewFromOptions(lme->E,(PetscObject)lme,"-lme_view_mat_e"));
79: PetscCall(MatViewFromOptions(lme->C,(PetscObject)lme,"-lme_view_rhs"));
80: PetscCall(MatViewFromOptions(lme->X,(PetscObject)lme,"-lme_view_solution"));
81: PetscFunctionReturn(PETSC_SUCCESS);
82: }
84: /*@
85: LMEGetIterationNumber - Gets the current iteration number. If the
86: call to `LMESolve()` is complete, then it returns the number of iterations
87: carried out by the solution method.
89: Not Collective
91: Input Parameter:
92: . lme - the linear matrix equation solver context
94: Output Parameter:
95: . its - number of iterations
97: Note:
98: During the $i$-th iteration this call returns $i-1$. If `LMESolve()` is
99: complete, then parameter `its` contains either the iteration number at
100: which convergence was successfully reached, or failure was detected.
101: Call `LMEGetConvergedReason()` to determine if the solver converged or
102: failed and why.
104: Level: intermediate
106: .seealso: [](ch:lme), `LMEGetConvergedReason()`, `LMESetTolerances()`
107: @*/
108: PetscErrorCode LMEGetIterationNumber(LME lme,PetscInt *its)
109: {
110: PetscFunctionBegin;
112: PetscAssertPointer(its,2);
113: *its = lme->its;
114: PetscFunctionReturn(PETSC_SUCCESS);
115: }
117: /*@
118: LMEGetConvergedReason - Gets the reason why the `LMESolve()` iteration was
119: stopped.
121: Not Collective
123: Input Parameter:
124: . lme - the linear matrix equation solver context
126: Output Parameter:
127: . reason - negative value indicates diverged, positive value converged, see
128: `LMEConvergedReason` for the possible values
130: Options Database Key:
131: . -lme_converged_reason - print reason for convergence/divergence, and number of iterations
133: Note:
134: If this routine is called before or doing the `LMESolve()` the value of
135: `LME_CONVERGED_ITERATING` is returned.
137: Level: intermediate
139: .seealso: [](ch:lme), `LMESetTolerances()`, `LMESolve()`, `LMEConvergedReason`, `LMESetErrorIfNotConverged()`
140: @*/
141: PetscErrorCode LMEGetConvergedReason(LME lme,LMEConvergedReason *reason)
142: {
143: PetscFunctionBegin;
145: PetscAssertPointer(reason,2);
146: *reason = lme->reason;
147: PetscFunctionReturn(PETSC_SUCCESS);
148: }
150: /*@
151: LMEGetErrorEstimate - Returns the error estimate obtained during the solve.
153: Not Collective
155: Input Parameter:
156: . lme - the linear matrix equation solver context
158: Output Parameter:
159: . errest - the error estimate
161: Note:
162: This is the error estimated internally by the solver. The actual
163: error bound can be computed with `LMEComputeError()`. Note that some
164: solvers may not be able to provide an error estimate.
166: Level: advanced
168: .seealso: [](ch:lme), `LMEComputeError()`
169: @*/
170: PetscErrorCode LMEGetErrorEstimate(LME lme,PetscReal *errest)
171: {
172: PetscFunctionBegin;
174: PetscAssertPointer(errest,2);
175: *errest = lme->errest;
176: PetscFunctionReturn(PETSC_SUCCESS);
177: }
179: /*
180: LMEComputeResidualNorm_Lyapunov - Computes the Frobenius norm of the residual matrix
181: associated with the Lyapunov equation.
182: */
183: static PetscErrorCode LMEComputeResidualNorm_Lyapunov(LME lme,PetscReal *norm)
184: {
185: PetscInt j,n,N,k,l,lda,ldb;
186: PetscBLASInt n_,N_,k_,l_,lda_,ldb_;
187: PetscScalar *Rarray,alpha=1.0,beta=0.0;
188: const PetscScalar *A,*B;
189: BV W,AX,X1,C1;
190: Mat R,X1m,C1m;
191: Vec v,w;
192: VecScatter vscat;
194: PetscFunctionBegin;
195: PetscCall(MatLRCGetMats(lme->C,NULL,&C1m,NULL,NULL));
196: PetscCall(MatLRCGetMats(lme->X,NULL,&X1m,NULL,NULL));
197: PetscCall(BVCreateFromMat(C1m,&C1));
198: PetscCall(BVSetFromOptions(C1));
199: PetscCall(BVCreateFromMat(X1m,&X1));
200: PetscCall(BVSetFromOptions(X1));
201: PetscCall(BVGetSizes(X1,&n,&N,&k));
202: PetscCall(BVGetSizes(C1,NULL,NULL,&l));
203: PetscCall(PetscBLASIntCast(n,&n_));
204: PetscCall(PetscBLASIntCast(N,&N_));
205: PetscCall(PetscBLASIntCast(k,&k_));
206: PetscCall(PetscBLASIntCast(l,&l_));
208: /* create W to store a redundant copy of a BV in each process */
209: PetscCall(BVCreate(PETSC_COMM_SELF,&W));
210: PetscCall(BVSetSizes(W,N,N,k));
211: PetscCall(BVSetFromOptions(W));
212: PetscCall(BVGetColumn(X1,0,&v));
213: PetscCall(VecScatterCreateToAll(v,&vscat,NULL));
214: PetscCall(BVRestoreColumn(X1,0,&v));
216: /* create AX to hold the product A*X1 */
217: PetscCall(BVDuplicate(X1,&AX));
218: PetscCall(BVMatMult(X1,lme->A,AX));
220: /* create dense matrix to hold the residual R=C1*C1'+AX*X1'+X1*AX' */
221: PetscCall(MatCreateDense(PetscObjectComm((PetscObject)lme),n,n,N,N,NULL,&R));
223: /* R=C1*C1' */
224: PetscCall(MatDenseGetArrayWrite(R,&Rarray));
225: for (j=0;j<l;j++) {
226: PetscCall(BVGetColumn(C1,j,&v));
227: PetscCall(BVGetColumn(W,j,&w));
228: PetscCall(VecScatterBegin(vscat,v,w,INSERT_VALUES,SCATTER_FORWARD));
229: PetscCall(VecScatterEnd(vscat,v,w,INSERT_VALUES,SCATTER_FORWARD));
230: PetscCall(BVRestoreColumn(C1,j,&v));
231: PetscCall(BVRestoreColumn(W,j,&w));
232: }
233: if (n) {
234: PetscCall(BVGetArrayRead(C1,&A));
235: PetscCall(BVGetLeadingDimension(C1,&lda));
236: PetscCall(PetscBLASIntCast(lda,&lda_));
237: PetscCall(BVGetArrayRead(W,&B));
238: PetscCall(BVGetLeadingDimension(W,&ldb));
239: PetscCall(PetscBLASIntCast(ldb,&ldb_));
240: PetscCallBLAS("BLASgemm",BLASgemm_("N","C",&n_,&N_,&l_,&alpha,(PetscScalar*)A,&lda_,(PetscScalar*)B,&ldb_,&beta,Rarray,&n_));
241: PetscCall(BVRestoreArrayRead(C1,&A));
242: PetscCall(BVRestoreArrayRead(W,&B));
243: }
244: beta = 1.0;
246: /* R+=AX*X1' */
247: for (j=0;j<k;j++) {
248: PetscCall(BVGetColumn(X1,j,&v));
249: PetscCall(BVGetColumn(W,j,&w));
250: PetscCall(VecScatterBegin(vscat,v,w,INSERT_VALUES,SCATTER_FORWARD));
251: PetscCall(VecScatterEnd(vscat,v,w,INSERT_VALUES,SCATTER_FORWARD));
252: PetscCall(BVRestoreColumn(X1,j,&v));
253: PetscCall(BVRestoreColumn(W,j,&w));
254: }
255: if (n) {
256: PetscCall(BVGetArrayRead(AX,&A));
257: PetscCall(BVGetLeadingDimension(AX,&lda));
258: PetscCall(PetscBLASIntCast(lda,&lda_));
259: PetscCall(BVGetArrayRead(W,&B));
260: PetscCallBLAS("BLASgemm",BLASgemm_("N","C",&n_,&N_,&k_,&alpha,(PetscScalar*)A,&lda_,(PetscScalar*)B,&ldb_,&beta,Rarray,&n_));
261: PetscCall(BVRestoreArrayRead(AX,&A));
262: PetscCall(BVRestoreArrayRead(W,&B));
263: }
265: /* R+=X1*AX' */
266: for (j=0;j<k;j++) {
267: PetscCall(BVGetColumn(AX,j,&v));
268: PetscCall(BVGetColumn(W,j,&w));
269: PetscCall(VecScatterBegin(vscat,v,w,INSERT_VALUES,SCATTER_FORWARD));
270: PetscCall(VecScatterEnd(vscat,v,w,INSERT_VALUES,SCATTER_FORWARD));
271: PetscCall(BVRestoreColumn(AX,j,&v));
272: PetscCall(BVRestoreColumn(W,j,&w));
273: }
274: if (n) {
275: PetscCall(BVGetArrayRead(X1,&A));
276: PetscCall(BVGetLeadingDimension(AX,&lda));
277: PetscCall(PetscBLASIntCast(lda,&lda_));
278: PetscCall(BVGetArrayRead(W,&B));
279: PetscCallBLAS("BLASgemm",BLASgemm_("N","C",&n_,&N_,&k_,&alpha,(PetscScalar*)A,&lda_,(PetscScalar*)B,&ldb_,&beta,Rarray,&n_));
280: PetscCall(BVRestoreArrayRead(X1,&A));
281: PetscCall(BVRestoreArrayRead(W,&B));
282: }
283: PetscCall(MatDenseRestoreArrayWrite(R,&Rarray));
285: /* compute ||R||_F */
286: PetscCall(MatNorm(R,NORM_FROBENIUS,norm));
288: PetscCall(BVDestroy(&W));
289: PetscCall(VecScatterDestroy(&vscat));
290: PetscCall(BVDestroy(&AX));
291: PetscCall(MatDestroy(&R));
292: PetscCall(BVDestroy(&C1));
293: PetscCall(BVDestroy(&X1));
294: PetscFunctionReturn(PETSC_SUCCESS);
295: }
297: /*@
298: LMEComputeError - Computes the error (based on the residual norm) associated
299: with the last equation solved.
301: Collective
303: Input Parameter:
304: . lme - the linear matrix equation solver context
306: Output Parameter:
307: . error - the error
309: Notes:
310: This function is not scalable (in terms of memory or parallel communication),
311: so it should not be called except in the case of small problem size. For
312: large equations, use `LMEGetErrorEstimate()`.
314: Level: advanced
316: .seealso: [](ch:lme), `LMESolve()`, `LMEGetErrorEstimate()`
317: @*/
318: PetscErrorCode LMEComputeError(LME lme,PetscReal *error)
319: {
320: PetscFunctionBegin;
322: PetscAssertPointer(error,2);
324: PetscCall(PetscLogEventBegin(LME_ComputeError,lme,0,0,0));
325: /* compute residual norm */
326: switch (lme->problem_type) {
327: case LME_LYAPUNOV:
328: PetscCall(LMEComputeResidualNorm_Lyapunov(lme,error));
329: break;
330: default:
331: SETERRQ(PetscObjectComm((PetscObject)lme),PETSC_ERR_SUP,"Not implemented for equation type %s",LMEProblemTypes[lme->problem_type]);
332: }
334: /* compute error */
335: /* currently we only support absolute error, so just return the norm */
336: PetscCall(PetscLogEventEnd(LME_ComputeError,lme,0,0,0));
337: PetscFunctionReturn(PETSC_SUCCESS);
338: }