Actual source code: neprefine.c
slepc-3.11.2 2019-07-30
1: /*
2: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3: SLEPc - Scalable Library for Eigenvalue Problem Computations
4: Copyright (c) 2002-2019, Universitat Politecnica de Valencia, Spain
6: This file is part of SLEPc.
7: SLEPc is distributed under a 2-clause BSD license (see LICENSE).
8: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
9: */
10: /*
11: Newton refinement for NEP, simple version
12: */
14: #include <slepc/private/nepimpl.h>
15: #include <slepcblaslapack.h>
17: #define NREF_MAXIT 10
19: typedef struct {
20: VecScatter *scatter_id,nst;
21: Mat *A;
22: Vec nv,vg,v,w;
23: FN *fn;
24: } NEPSimpNRefctx;
26: typedef struct {
27: Mat M1;
28: Vec M2,M3;
29: PetscScalar M4,m3;
30: } FSubctx;
32: static PetscErrorCode MatFSMult(Mat M ,Vec x,Vec y)
33: {
35: FSubctx *ctx;
36: PetscScalar t;
39: MatShellGetContext(M,&ctx);
40: VecDot(x,ctx->M3,&t);
41: t *= ctx->m3/ctx->M4;
42: MatMult(ctx->M1,x,y);
43: VecAXPY(y,-t,ctx->M2);
44: return(0);
45: }
47: static PetscErrorCode NEPSimpleNRefSetUp(NEP nep,NEPSimpNRefctx **ctx_)
48: {
50: PetscInt i,si,j,n0,m0,nloc,*idx1,*idx2,ne;
51: IS is1,is2;
52: NEPSimpNRefctx *ctx;
53: Vec v;
54: PetscMPIInt rank,size;
57: PetscCalloc1(1,ctx_);
58: ctx = *ctx_;
59: if (nep->npart==1) {
60: ctx->A = nep->A;
61: ctx->fn = nep->f;
62: nep->refinesubc = NULL;
63: ctx->scatter_id = NULL;
64: } else {
65: PetscMalloc2(nep->nt,&ctx->A,nep->npart,&ctx->scatter_id);
67: /* Duplicate matrices */
68: for (i=0;i<nep->nt;i++) {
69: MatCreateRedundantMatrix(nep->A[i],0,PetscSubcommChild(nep->refinesubc),MAT_INITIAL_MATRIX,&ctx->A[i]);
70: }
71: MatCreateVecs(ctx->A[0],&ctx->v,NULL);
73: /* Duplicate FNs */
74: PetscMalloc1(nep->nt,&ctx->fn);
75: for (i=0;i<nep->nt;i++) {
76: FNDuplicate(nep->f[i],PetscSubcommChild(nep->refinesubc),&ctx->fn[i]);
77: }
79: /* Create scatters for sending vectors to each subcommucator */
80: BVGetColumn(nep->V,0,&v);
81: VecGetOwnershipRange(v,&n0,&m0);
82: BVRestoreColumn(nep->V,0,&v);
83: VecGetLocalSize(ctx->v,&nloc);
84: PetscMalloc2(m0-n0,&idx1,m0-n0,&idx2);
85: VecCreateMPI(PetscObjectComm((PetscObject)nep),nloc,PETSC_DECIDE,&ctx->vg);
86: for (si=0;si<nep->npart;si++) {
87: j = 0;
88: for (i=n0;i<m0;i++) {
89: idx1[j] = i;
90: idx2[j++] = i+nep->n*si;
91: }
92: ISCreateGeneral(PetscObjectComm((PetscObject)nep),(m0-n0),idx1,PETSC_COPY_VALUES,&is1);
93: ISCreateGeneral(PetscObjectComm((PetscObject)nep),(m0-n0),idx2,PETSC_COPY_VALUES,&is2);
94: BVGetColumn(nep->V,0,&v);
95: VecScatterCreate(v,is1,ctx->vg,is2,&ctx->scatter_id[si]);
96: BVRestoreColumn(nep->V,0,&v);
97: ISDestroy(&is1);
98: ISDestroy(&is2);
99: }
100: PetscFree2(idx1,idx2);
101: }
102: if (nep->scheme==NEP_REFINE_SCHEME_EXPLICIT) {
103: MPI_Comm_rank(PetscObjectComm((PetscObject)ctx->A[0]),&rank);
104: MPI_Comm_size(PetscObjectComm((PetscObject)ctx->A[0]),&size);
105: if (size>1) {
106: if (nep->npart==1) {
107: BVGetColumn(nep->V,0,&v);
108: } else v = ctx->v;
109: VecGetOwnershipRange(v,&n0,&m0);
110: ne = (rank == size-1)?nep->n:0;
111: VecCreateMPI(PetscObjectComm((PetscObject)ctx->A[0]),ne,PETSC_DECIDE,&ctx->nv);
112: PetscMalloc1(m0-n0,&idx1);
113: for (i=n0;i<m0;i++) {
114: idx1[i-n0] = i;
115: }
116: ISCreateGeneral(PetscObjectComm((PetscObject)ctx->A[0]),(m0-n0),idx1,PETSC_COPY_VALUES,&is1);
117: VecScatterCreate(v,is1,ctx->nv,is1,&ctx->nst);
118: if (nep->npart==1) {
119: BVRestoreColumn(nep->V,0,&v);
120: }
121: PetscFree(idx1);
122: ISDestroy(&is1);
123: }
124: } return(0);
125: }
127: /*
128: Gather Eigenpair idx from subcommunicator with color sc
129: */
130: static PetscErrorCode NEPSimpleNRefGatherEigenpair(NEP nep,NEPSimpNRefctx *ctx,PetscInt sc,PetscInt idx,PetscInt *fail)
131: {
133: PetscMPIInt nproc,p;
134: MPI_Comm comm=((PetscObject)nep)->comm;
135: Vec v;
136: PetscScalar *array;
139: MPI_Comm_size(comm,&nproc);
140: p = (nproc/nep->npart)*(sc+1)+PetscMin(sc+1,nproc%nep->npart)-1;
141: if (nep->npart>1) {
142: /* Communicate convergence successful */
143: MPI_Bcast(fail,1,MPIU_INT,p,comm);
144: if (!(*fail)) {
145: /* Process 0 of subcommunicator sc broadcasts the eigenvalue */
146: MPI_Bcast(&nep->eigr[idx],1,MPIU_SCALAR,p,comm);
147: /* Gather nep->V[idx] from the subcommuniator sc */
148: BVGetColumn(nep->V,idx,&v);
149: if (nep->refinesubc->color==sc) {
150: VecGetArray(ctx->v,&array);
151: VecPlaceArray(ctx->vg,array);
152: }
153: VecScatterBegin(ctx->scatter_id[sc],ctx->vg,v,INSERT_VALUES,SCATTER_REVERSE);
154: VecScatterEnd(ctx->scatter_id[sc],ctx->vg,v,INSERT_VALUES,SCATTER_REVERSE);
155: if (nep->refinesubc->color==sc) {
156: VecResetArray(ctx->vg);
157: VecRestoreArray(ctx->v,&array);
158: }
159: BVRestoreColumn(nep->V,idx,&v);
160: }
161: } else {
162: if (nep->scheme==NEP_REFINE_SCHEME_EXPLICIT && !(*fail)) {
163: MPI_Bcast(&nep->eigr[idx],1,MPIU_SCALAR,p,comm);
164: }
165: }
166: return(0);
167: }
169: static PetscErrorCode NEPSimpleNRefScatterEigenvector(NEP nep,NEPSimpNRefctx *ctx,PetscInt sc,PetscInt idx)
170: {
172: Vec v;
173: PetscScalar *array;
176: if (nep->npart>1) {
177: BVGetColumn(nep->V,idx,&v);
178: if (nep->refinesubc->color==sc) {
179: VecGetArray(ctx->v,&array);
180: VecPlaceArray(ctx->vg,array);
181: }
182: VecScatterBegin(ctx->scatter_id[sc],v,ctx->vg,INSERT_VALUES,SCATTER_FORWARD);
183: VecScatterEnd(ctx->scatter_id[sc],v,ctx->vg,INSERT_VALUES,SCATTER_FORWARD);
184: if (nep->refinesubc->color==sc) {
185: VecResetArray(ctx->vg);
186: VecRestoreArray(ctx->v,&array);
187: }
188: BVRestoreColumn(nep->V,idx,&v);
189: }
190: return(0);
191: }
193: static PetscErrorCode NEPSimpleNRefSetUpSystem(NEP nep,NEPSimpNRefctx *ctx,Mat *A,PetscInt idx,Mat *Mt,Mat *T,Mat *P,PetscBool ini,Vec t,Vec v)
194: {
195: PetscErrorCode ierr;
196: PetscInt i,st,ml,m0,n0,m1,mg;
197: PetscInt *dnz,*onz,ncols,*cols2=NULL,*nnz,nt=nep->nt;
198: PetscScalar zero=0.0,*coeffs,*coeffs2;
199: PetscMPIInt rank,size;
200: MPI_Comm comm;
201: const PetscInt *cols;
202: const PetscScalar *vals,*array;
203: FSubctx *fctx;
204: Vec w=ctx->w;
205: Mat M;
208: PetscMalloc2(nt,&coeffs,nt,&coeffs2);
209: switch (nep->scheme) {
210: case NEP_REFINE_SCHEME_SCHUR:
211: if (ini) {
212: PetscCalloc1(1,&fctx);
213: MatGetSize(A[0],&m0,&n0);
214: MatCreateShell(PetscObjectComm((PetscObject)A[0]),PETSC_DECIDE,PETSC_DECIDE,m0,n0,fctx,T);
215: MatShellSetOperation(*T,MATOP_MULT,(void(*)(void))MatFSMult);
216: } else {
217: MatShellGetContext(*T,&fctx);
218: }
219: M=fctx->M1;
220: break;
221: case NEP_REFINE_SCHEME_MBE:
222: M=*T;
223: break;
224: case NEP_REFINE_SCHEME_EXPLICIT:
225: M=*Mt;
226: break;
227: }
228: if (ini) {
229: MatDuplicate(A[0],MAT_COPY_VALUES,&M);
230: } else {
231: MatCopy(A[0],M,DIFFERENT_NONZERO_PATTERN);
232: }
233: for (i=0;i<nt;i++) {
234: FNEvaluateFunction(ctx->fn[i],nep->eigr[idx],coeffs+i);
235: }
236: if (coeffs[0]!=1.0) {
237: MatScale(M,coeffs[0]);
238: }
239: for (i=1;i<nt;i++) {
240: MatAXPY(M,coeffs[i],A[i],(ini)?nep->mstr:SUBSET_NONZERO_PATTERN);
241: }
242: for (i=0;i<nt;i++) {
243: FNEvaluateDerivative(ctx->fn[i],nep->eigr[idx],coeffs2+i);
244: }
245: st = 0;
246: for (i=0;i<nt && PetscAbsScalar(coeffs2[i])==0.0;i++) st++;
247: MatMult(A[st],v,w);
248: if (coeffs2[st]!=1.0) {
249: VecScale(w,coeffs2[st]);
250: }
251: for (i=st+1;i<nt;i++) {
252: MatMult(A[i],v,t);
253: VecAXPY(w,coeffs2[i],t);
254: }
256: switch (nep->scheme) {
257: case NEP_REFINE_SCHEME_EXPLICIT:
258: comm = PetscObjectComm((PetscObject)A[0]);
259: MPI_Comm_rank(comm,&rank);
260: MPI_Comm_size(comm,&size);
261: MatGetSize(M,&mg,NULL);
262: MatGetOwnershipRange(M,&m0,&m1);
263: if (ini) {
264: MatCreate(comm,T);
265: MatGetLocalSize(M,&ml,NULL);
266: if (rank==size-1) ml++;
267: MatSetSizes(*T,ml,ml,mg+1,mg+1);
268: MatSetFromOptions(*T);
269: MatSetUp(*T);
270: /* Preallocate M */
271: if (size>1) {
272: MatPreallocateInitialize(comm,ml,ml,dnz,onz);
273: for (i=m0;i<m1;i++) {
274: MatGetRow(M,i,&ncols,&cols,NULL);
275: MatPreallocateSet(i,ncols,cols,dnz,onz);
276: MatPreallocateSet(i,1,&mg,dnz,onz);
277: MatRestoreRow(M,i,&ncols,&cols,NULL);
278: }
279: if (rank==size-1) {
280: PetscCalloc1(mg+1,&cols2);
281: for (i=0;i<mg+1;i++) cols2[i]=i;
282: MatPreallocateSet(m1,mg+1,cols2,dnz,onz);
283: PetscFree(cols2);
284: }
285: MatMPIAIJSetPreallocation(*T,0,dnz,0,onz);
286: MatPreallocateFinalize(dnz,onz);
287: } else {
288: PetscCalloc1(mg+1,&nnz);
289: for (i=0;i<mg;i++) {
290: MatGetRow(M,i,&ncols,NULL,NULL);
291: nnz[i] = ncols+1;
292: MatRestoreRow(M,i,&ncols,NULL,NULL);
293: }
294: nnz[mg] = mg+1;
295: MatSeqAIJSetPreallocation(*T,0,nnz);
296: PetscFree(nnz);
297: }
298: *Mt = M;
299: *P = *T;
300: }
302: /* Set values */
303: VecGetArrayRead(w,&array);
304: for (i=m0;i<m1;i++) {
305: MatGetRow(M,i,&ncols,&cols,&vals);
306: MatSetValues(*T,1,&i,ncols,cols,vals,INSERT_VALUES);
307: MatRestoreRow(M,i,&ncols,&cols,&vals);
308: MatSetValues(*T,1,&i,1,&mg,array+i-m0,INSERT_VALUES);
309: }
310: VecRestoreArrayRead(w,&array);
311: VecConjugate(v);
312: MPI_Comm_size(PetscObjectComm((PetscObject)A[0]),&size);
313: MPI_Comm_rank(PetscObjectComm((PetscObject)A[0]),&rank);
314: if (size>1) {
315: if (rank==size-1) {
316: PetscMalloc1(nep->n,&cols2);
317: for (i=0;i<nep->n;i++) cols2[i]=i;
318: }
319: VecScatterBegin(ctx->nst,v,ctx->nv,INSERT_VALUES,SCATTER_FORWARD);
320: VecScatterEnd(ctx->nst,v,ctx->nv,INSERT_VALUES,SCATTER_FORWARD);
321: VecGetArrayRead(ctx->nv,&array);
322: if (rank==size-1) {
323: MatSetValues(*T,1,&mg,nep->n,cols2,array,INSERT_VALUES);
324: MatSetValues(*T,1,&mg,1,&mg,&zero,INSERT_VALUES);
325: }
326: VecRestoreArrayRead(ctx->nv,&array);
327: } else {
328: PetscMalloc1(m1-m0,&cols2);
329: for (i=0;i<m1-m0;i++) cols2[i]=m0+i;
330: VecGetArrayRead(v,&array);
331: MatSetValues(*T,1,&mg,m1-m0,cols2,array,INSERT_VALUES);
332: MatSetValues(*T,1,&mg,1,&mg,&zero,INSERT_VALUES);
333: VecRestoreArrayRead(v,&array);
334: }
335: VecConjugate(v);
336: MatAssemblyBegin(*T,MAT_FINAL_ASSEMBLY);
337: MatAssemblyEnd(*T,MAT_FINAL_ASSEMBLY);
338: PetscFree(cols2);
339: break;
340: case NEP_REFINE_SCHEME_SCHUR:
341: fctx->M2 = ctx->w;
342: fctx->M3 = v;
343: fctx->m3 = 1.0+PetscConj(nep->eigr[idx])*nep->eigr[idx];
344: fctx->M4 = PetscConj(nep->eigr[idx]);
345: fctx->M1 = M;
346: if (ini) {
347: MatDuplicate(M,MAT_COPY_VALUES,P);
348: } else {
349: MatCopy(M,*P,SAME_NONZERO_PATTERN);
350: }
351: if (fctx->M4!=0.0) {
352: VecConjugate(v);
353: VecPointwiseMult(t,v,w);
354: VecConjugate(v);
355: VecScale(t,-fctx->m3/fctx->M4);
356: MatDiagonalSet(*P,t,ADD_VALUES);
357: }
358: break;
359: case NEP_REFINE_SCHEME_MBE:
360: *T = M;
361: *P = M;
362: break;
363: }
364: PetscFree2(coeffs,coeffs2);
365: return(0);
366: }
368: PetscErrorCode NEPNewtonRefinementSimple(NEP nep,PetscInt *maxits,PetscReal tol,PetscInt k)
369: {
370: PetscErrorCode ierr;
371: PetscInt i,n,its,idx=0,*idx_sc,*its_sc,color,*fail_sc;
372: PetscMPIInt rank,size;
373: Mat Mt=NULL,T=NULL,P=NULL;
374: MPI_Comm comm;
375: Vec r,v,dv,rr=NULL,dvv=NULL,t[2];
376: const PetscScalar *array;
377: PetscScalar *array2,deig=0.0,tt[2],ttt;
378: PetscReal norm,error;
379: PetscBool ini=PETSC_TRUE,sc_pend,solved=PETSC_FALSE;
380: NEPSimpNRefctx *ctx;
381: FSubctx *fctx=NULL;
382: KSPConvergedReason reason;
385: PetscLogEventBegin(NEP_Refine,nep,0,0,0);
386: NEPSimpleNRefSetUp(nep,&ctx);
387: its = (maxits)?*maxits:NREF_MAXIT;
388: comm = (nep->npart==1)?PetscObjectComm((PetscObject)nep):PetscSubcommChild(nep->refinesubc);
389: if (!nep->refineksp) { NEPRefineGetKSP(nep,&nep->refineksp); }
390: if (nep->npart==1) {
391: BVGetColumn(nep->V,0,&v);
392: } else v = ctx->v;
393: VecDuplicate(v,&ctx->w);
394: VecDuplicate(v,&r);
395: VecDuplicate(v,&dv);
396: VecDuplicate(v,&t[0]);
397: VecDuplicate(v,&t[1]);
398: if (nep->npart==1) { BVRestoreColumn(nep->V,0,&v); }
399: MPI_Comm_size(comm,&size);
400: MPI_Comm_rank(comm,&rank);
401: VecGetLocalSize(r,&n);
402: PetscMalloc3(nep->npart,&idx_sc,nep->npart,&its_sc,nep->npart,&fail_sc);
403: for (i=0;i<nep->npart;i++) fail_sc[i] = 0;
404: for (i=0;i<nep->npart;i++) its_sc[i] = 0;
405: color = (nep->npart==1)?0:nep->refinesubc->color;
407: /* Loop performing iterative refinements */
408: while (!solved) {
409: for (i=0;i<nep->npart;i++) {
410: sc_pend = PETSC_TRUE;
411: if (its_sc[i]==0) {
412: idx_sc[i] = idx++;
413: if (idx_sc[i]>=k) {
414: sc_pend = PETSC_FALSE;
415: } else {
416: NEPSimpleNRefScatterEigenvector(nep,ctx,i,idx_sc[i]);
417: }
418: } else { /* Gather Eigenpair from subcommunicator i */
419: NEPSimpleNRefGatherEigenpair(nep,ctx,i,idx_sc[i],&fail_sc[i]);
420: }
421: while (sc_pend) {
422: if (!fail_sc[i]) {
423: NEPComputeError(nep,idx_sc[i],NEP_ERROR_RELATIVE,&error);
424: }
425: if (error<=tol || its_sc[i]>=its || fail_sc[i]) {
426: idx_sc[i] = idx++;
427: its_sc[i] = 0;
428: fail_sc[i] = 0;
429: if (idx_sc[i]<k) { NEPSimpleNRefScatterEigenvector(nep,ctx,i,idx_sc[i]); }
430: } else {
431: sc_pend = PETSC_FALSE;
432: its_sc[i]++;
433: }
434: if (idx_sc[i]>=k) sc_pend = PETSC_FALSE;
435: }
436: }
437: solved = PETSC_TRUE;
438: for (i=0;i<nep->npart&&solved;i++) solved = PetscNot(idx_sc[i]<k);
439: if (idx_sc[color]<k) {
440: #if !defined(PETSC_USE_COMPLEX)
441: if (nep->eigi[idx_sc[color]]!=0.0) SETERRQ(PetscObjectComm((PetscObject)nep),1,"Simple Refinement not implemented in real scalar for complex eigenvalues");
442: #endif
443: if (nep->npart==1) {
444: BVGetColumn(nep->V,idx_sc[color],&v);
445: } else v = ctx->v;
446: NEPSimpleNRefSetUpSystem(nep,ctx,ctx->A,idx_sc[color],&Mt,&T,&P,ini,t[0],v);
447: KSPSetOperators(nep->refineksp,T,P);
448: if (ini) {
449: KSPSetFromOptions(nep->refineksp);
450: if (nep->scheme==NEP_REFINE_SCHEME_EXPLICIT) {
451: MatCreateVecs(T,&dvv,NULL);
452: VecDuplicate(dvv,&rr);
453: }
454: ini = PETSC_FALSE;
455: }
456: switch (nep->scheme) {
457: case NEP_REFINE_SCHEME_EXPLICIT:
458: MatMult(Mt,v,r);
459: VecGetArrayRead(r,&array);
460: if (rank==size-1) {
461: VecGetArray(rr,&array2);
462: PetscMemcpy(array2,array,n*sizeof(PetscScalar));
463: array2[n] = 0.0;
464: VecRestoreArray(rr,&array2);
465: } else {
466: VecPlaceArray(rr,array);
467: }
468: KSPSolve(nep->refineksp,rr,dvv);
469: KSPGetConvergedReason(nep->refineksp,&reason);
470: if (reason>0) {
471: if (rank != size-1) {
472: VecResetArray(rr);
473: }
474: VecRestoreArrayRead(r,&array);
475: VecGetArrayRead(dvv,&array);
476: VecPlaceArray(dv,array);
477: VecAXPY(v,-1.0,dv);
478: VecNorm(v,NORM_2,&norm);
479: VecScale(v,1.0/norm);
480: VecResetArray(dv);
481: if (rank==size-1) nep->eigr[idx_sc[color]] -= array[n];
482: VecRestoreArrayRead(dvv,&array);
483: } else fail_sc[color] = 1;
484: break;
485: case NEP_REFINE_SCHEME_MBE:
486: MatMult(T,v,r);
487: /* Mixed block elimination */
488: VecConjugate(v);
489: KSPSolveTranspose(nep->refineksp,v,t[0]);
490: KSPGetConvergedReason(nep->refineksp,&reason);
491: if (reason>0) {
492: VecConjugate(t[0]);
493: VecDot(ctx->w,t[0],&tt[0]);
494: KSPSolve(nep->refineksp,ctx->w,t[1]);
495: KSPGetConvergedReason(nep->refineksp,&reason);
496: if (reason>0) {
497: VecDot(t[1],v,&tt[1]);
498: VecDot(r,t[0],&ttt);
499: tt[0] = ttt/tt[0];
500: VecAXPY(r,-tt[0],ctx->w);
501: KSPSolve(nep->refineksp,r,dv);
502: KSPGetConvergedReason(nep->refineksp,&reason);
503: if (reason>0) {
504: VecDot(dv,v,&ttt);
505: tt[1] = ttt/tt[1];
506: VecAXPY(dv,-tt[1],t[1]);
507: deig = tt[0]+tt[1];
508: }
509: }
510: VecConjugate(v);
511: VecAXPY(v,-1.0,dv);
512: VecNorm(v,NORM_2,&norm);
513: VecScale(v,1.0/norm);
514: nep->eigr[idx_sc[color]] -= deig;
515: fail_sc[color] = 0;
516: } else {
517: VecConjugate(v);
518: fail_sc[color] = 1;
519: }
520: break;
521: case NEP_REFINE_SCHEME_SCHUR:
522: fail_sc[color] = 1;
523: MatShellGetContext(T,&fctx);
524: if (fctx->M4!=0.0) {
525: MatMult(fctx->M1,v,r);
526: KSPSolve(nep->refineksp,r,dv);
527: KSPGetConvergedReason(nep->refineksp,&reason);
528: if (reason>0) {
529: VecDot(dv,v,&deig);
530: deig *= -fctx->m3/fctx->M4;
531: VecAXPY(v,-1.0,dv);
532: VecNorm(v,NORM_2,&norm);
533: VecScale(v,1.0/norm);
534: nep->eigr[idx_sc[color]] -= deig;
535: fail_sc[color] = 0;
536: }
537: }
538: break;
539: }
540: if (nep->npart==1) { BVRestoreColumn(nep->V,idx_sc[color],&v); }
541: }
542: }
543: VecDestroy(&t[0]);
544: VecDestroy(&t[1]);
545: VecDestroy(&dv);
546: VecDestroy(&ctx->w);
547: VecDestroy(&r);
548: PetscFree3(idx_sc,its_sc,fail_sc);
549: VecScatterDestroy(&ctx->nst);
550: if (nep->npart>1) {
551: VecDestroy(&ctx->vg);
552: VecDestroy(&ctx->v);
553: for (i=0;i<nep->nt;i++) {
554: MatDestroy(&ctx->A[i]);
555: }
556: for (i=0;i<nep->npart;i++) {
557: VecScatterDestroy(&ctx->scatter_id[i]);
558: }
559: PetscFree2(ctx->A,ctx->scatter_id);
560: }
561: if (fctx && nep->scheme==NEP_REFINE_SCHEME_SCHUR) {
562: MatDestroy(&P);
563: MatDestroy(&fctx->M1);
564: PetscFree(fctx);
565: }
566: if (nep->scheme==NEP_REFINE_SCHEME_EXPLICIT) {
567: MatDestroy(&Mt);
568: VecDestroy(&dvv);
569: VecDestroy(&rr);
570: VecDestroy(&ctx->nv);
571: if (nep->npart>1) {
572: for (i=0;i<nep->nt;i++) { FNDestroy(&ctx->fn[i]); }
573: PetscFree(ctx->fn);
574: }
575: }
576: if (nep->scheme==NEP_REFINE_SCHEME_MBE) {
577: if (nep->npart>1) {
578: for (i=0;i<nep->nt;i++) { FNDestroy(&ctx->fn[i]); }
579: PetscFree(ctx->fn);
580: }
581: }
582: MatDestroy(&T);
583: PetscFree(ctx);
584: PetscLogEventEnd(NEP_Refine,nep,0,0,0);
585: return(0);
586: }