Actual source code: mmaij.c
petsc-3.8.4 2018-03-24
2: /*
3: Support for the parallel AIJ matrix vector multiply
4: */
5: #include <../src/mat/impls/aij/mpi/mpiaij.h>
6: #include <petsc/private/isimpl.h>
8: PetscErrorCode MatSetUpMultiply_MPIAIJ(Mat mat)
9: {
10: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)mat->data;
11: Mat_SeqAIJ *B = (Mat_SeqAIJ*)(aij->B->data);
13: PetscInt i,j,*aj = B->j,ec = 0,*garray;
14: IS from,to;
15: Vec gvec;
16: #if defined(PETSC_USE_CTABLE)
17: PetscTable gid1_lid1;
18: PetscTablePosition tpos;
19: PetscInt gid,lid;
20: #else
21: PetscInt N = mat->cmap->N,*indices;
22: #endif
25: #if defined(PETSC_USE_CTABLE)
26: /* use a table */
27: PetscTableCreate(aij->B->rmap->n,mat->cmap->N+1,&gid1_lid1);
28: for (i=0; i<aij->B->rmap->n; i++) {
29: for (j=0; j<B->ilen[i]; j++) {
30: PetscInt data,gid1 = aj[B->i[i] + j] + 1;
31: PetscTableFind(gid1_lid1,gid1,&data);
32: if (!data) {
33: /* one based table */
34: PetscTableAdd(gid1_lid1,gid1,++ec,INSERT_VALUES);
35: }
36: }
37: }
38: /* form array of columns we need */
39: PetscMalloc1(ec+1,&garray);
40: PetscTableGetHeadPosition(gid1_lid1,&tpos);
41: while (tpos) {
42: PetscTableGetNext(gid1_lid1,&tpos,&gid,&lid);
43: gid--;
44: lid--;
45: garray[lid] = gid;
46: }
47: PetscSortInt(ec,garray); /* sort, and rebuild */
48: PetscTableRemoveAll(gid1_lid1);
49: for (i=0; i<ec; i++) {
50: PetscTableAdd(gid1_lid1,garray[i]+1,i+1,INSERT_VALUES);
51: }
52: /* compact out the extra columns in B */
53: for (i=0; i<aij->B->rmap->n; i++) {
54: for (j=0; j<B->ilen[i]; j++) {
55: PetscInt gid1 = aj[B->i[i] + j] + 1;
56: PetscTableFind(gid1_lid1,gid1,&lid);
57: lid--;
58: aj[B->i[i] + j] = lid;
59: }
60: }
61: aij->B->cmap->n = aij->B->cmap->N = ec;
62: aij->B->cmap->bs = 1;
64: PetscLayoutSetUp((aij->B->cmap));
65: PetscTableDestroy(&gid1_lid1);
66: #else
67: /* Make an array as long as the number of columns */
68: /* mark those columns that are in aij->B */
69: PetscCalloc1(N+1,&indices);
70: for (i=0; i<aij->B->rmap->n; i++) {
71: for (j=0; j<B->ilen[i]; j++) {
72: if (!indices[aj[B->i[i] + j]]) ec++;
73: indices[aj[B->i[i] + j]] = 1;
74: }
75: }
77: /* form array of columns we need */
78: PetscMalloc1(ec+1,&garray);
79: ec = 0;
80: for (i=0; i<N; i++) {
81: if (indices[i]) garray[ec++] = i;
82: }
84: /* make indices now point into garray */
85: for (i=0; i<ec; i++) {
86: indices[garray[i]] = i;
87: }
89: /* compact out the extra columns in B */
90: for (i=0; i<aij->B->rmap->n; i++) {
91: for (j=0; j<B->ilen[i]; j++) {
92: aj[B->i[i] + j] = indices[aj[B->i[i] + j]];
93: }
94: }
95: aij->B->cmap->n = aij->B->cmap->N = ec;
96: aij->B->cmap->bs = 1;
98: PetscLayoutSetUp((aij->B->cmap));
99: PetscFree(indices);
100: #endif
101: /* create local vector that is used to scatter into */
102: VecCreateSeq(PETSC_COMM_SELF,ec,&aij->lvec);
104: /* create two temporary Index sets for build scatter gather */
105: ISCreateGeneral(((PetscObject)mat)->comm,ec,garray,PETSC_COPY_VALUES,&from);
107: ISCreateStride(PETSC_COMM_SELF,ec,0,1,&to);
109: /* create temporary global vector to generate scatter context */
110: /* This does not allocate the array's memory so is efficient */
111: VecCreateMPIWithArray(PetscObjectComm((PetscObject)mat),1,mat->cmap->n,mat->cmap->N,NULL,&gvec);
113: /* generate the scatter context */
114: VecScatterCreate(gvec,from,aij->lvec,to,&aij->Mvctx);
115: PetscLogObjectParent((PetscObject)mat,(PetscObject)aij->Mvctx);
116: PetscLogObjectParent((PetscObject)mat,(PetscObject)aij->lvec);
117: PetscLogObjectParent((PetscObject)mat,(PetscObject)from);
118: PetscLogObjectParent((PetscObject)mat,(PetscObject)to);
120: aij->garray = garray;
122: PetscLogObjectMemory((PetscObject)mat,(ec+1)*sizeof(PetscInt));
123: ISDestroy(&from);
124: ISDestroy(&to);
125: VecDestroy(&gvec);
126: return(0);
127: }
130: /*
131: Takes the local part of an already assembled MPIAIJ matrix
132: and disassembles it. This is to allow new nonzeros into the matrix
133: that require more communication in the matrix vector multiply.
134: Thus certain data-structures must be rebuilt.
136: Kind of slow! But that's what application programmers get when
137: they are sloppy.
138: */
139: PetscErrorCode MatDisAssemble_MPIAIJ(Mat A)
140: {
141: Mat_MPIAIJ *aij = (Mat_MPIAIJ*)A->data;
142: Mat B = aij->B,Bnew;
143: Mat_SeqAIJ *Baij = (Mat_SeqAIJ*)B->data;
145: PetscInt i,j,m = B->rmap->n,n = A->cmap->N,col,ct = 0,*garray = aij->garray,*nz,ec;
146: PetscScalar v;
149: /* free stuff related to matrix-vec multiply */
150: VecGetSize(aij->lvec,&ec); /* needed for PetscLogObjectMemory below */
151: VecDestroy(&aij->lvec);
152: VecScatterDestroy(&aij->Mvctx);
153: if (aij->colmap) {
154: #if defined(PETSC_USE_CTABLE)
155: PetscTableDestroy(&aij->colmap);
156: #else
157: PetscFree(aij->colmap);
158: PetscLogObjectMemory((PetscObject)A,-aij->B->cmap->n*sizeof(PetscInt));
159: #endif
160: }
162: /* make sure that B is assembled so we can access its values */
163: MatAssemblyBegin(B,MAT_FINAL_ASSEMBLY);
164: MatAssemblyEnd(B,MAT_FINAL_ASSEMBLY);
166: /* invent new B and copy stuff over */
167: PetscMalloc1(m+1,&nz);
168: for (i=0; i<m; i++) {
169: nz[i] = Baij->i[i+1] - Baij->i[i];
170: }
171: MatCreate(PETSC_COMM_SELF,&Bnew);
172: MatSetSizes(Bnew,m,n,m,n);
173: MatSetBlockSizesFromMats(Bnew,A,A);
174: MatSetType(Bnew,((PetscObject)B)->type_name);
175: MatSeqAIJSetPreallocation(Bnew,0,nz);
177: ((Mat_SeqAIJ*)Bnew->data)->nonew = Baij->nonew; /* Inherit insertion error options. */
178: /*
179: Ensure that B's nonzerostate is monotonically increasing.
180: Or should this follow the MatSetValues() loop to preserve B's nonzerstate across a MatDisAssemble() call?
181: */
182: Bnew->nonzerostate = B->nonzerostate;
184: PetscFree(nz);
185: for (i=0; i<m; i++) {
186: for (j=Baij->i[i]; j<Baij->i[i+1]; j++) {
187: col = garray[Baij->j[ct]];
188: v = Baij->a[ct++];
189: MatSetValues(Bnew,1,&i,1,&col,&v,B->insertmode);
190: }
191: }
192: PetscFree(aij->garray);
193: PetscLogObjectMemory((PetscObject)A,-ec*sizeof(PetscInt));
194: MatDestroy(&B);
195: PetscLogObjectParent((PetscObject)A,(PetscObject)Bnew);
197: aij->B = Bnew;
198: A->was_assembled = PETSC_FALSE;
199: return(0);
200: }
202: /* ugly stuff added for Glenn someday we should fix this up */
204: static PetscInt *auglyrmapd = 0,*auglyrmapo = 0; /* mapping from the local ordering to the "diagonal" and "off-diagonal" parts of the local matrix */
205: static Vec auglydd = 0,auglyoo = 0; /* work vectors used to scale the two parts of the local matrix */
208: PetscErrorCode MatMPIAIJDiagonalScaleLocalSetUp(Mat inA,Vec scale)
209: {
210: Mat_MPIAIJ *ina = (Mat_MPIAIJ*) inA->data; /*access private part of matrix */
212: PetscInt i,n,nt,cstart,cend,no,*garray = ina->garray,*lindices;
213: PetscInt *r_rmapd,*r_rmapo;
216: MatGetOwnershipRange(inA,&cstart,&cend);
217: MatGetSize(ina->A,NULL,&n);
218: PetscCalloc1(inA->rmap->mapping->n+1,&r_rmapd);
219: nt = 0;
220: for (i=0; i<inA->rmap->mapping->n; i++) {
221: if (inA->rmap->mapping->indices[i] >= cstart && inA->rmap->mapping->indices[i] < cend) {
222: nt++;
223: r_rmapd[i] = inA->rmap->mapping->indices[i] + 1;
224: }
225: }
226: if (nt != n) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Hmm nt %D n %D",nt,n);
227: PetscMalloc1(n+1,&auglyrmapd);
228: for (i=0; i<inA->rmap->mapping->n; i++) {
229: if (r_rmapd[i]) {
230: auglyrmapd[(r_rmapd[i]-1)-cstart] = i;
231: }
232: }
233: PetscFree(r_rmapd);
234: VecCreateSeq(PETSC_COMM_SELF,n,&auglydd);
236: PetscCalloc1(inA->cmap->N+1,&lindices);
237: for (i=0; i<ina->B->cmap->n; i++) {
238: lindices[garray[i]] = i+1;
239: }
240: no = inA->rmap->mapping->n - nt;
241: PetscCalloc1(inA->rmap->mapping->n+1,&r_rmapo);
242: nt = 0;
243: for (i=0; i<inA->rmap->mapping->n; i++) {
244: if (lindices[inA->rmap->mapping->indices[i]]) {
245: nt++;
246: r_rmapo[i] = lindices[inA->rmap->mapping->indices[i]];
247: }
248: }
249: if (nt > no) SETERRQ2(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Hmm nt %D no %D",nt,n);
250: PetscFree(lindices);
251: PetscMalloc1(nt+1,&auglyrmapo);
252: for (i=0; i<inA->rmap->mapping->n; i++) {
253: if (r_rmapo[i]) {
254: auglyrmapo[(r_rmapo[i]-1)] = i;
255: }
256: }
257: PetscFree(r_rmapo);
258: VecCreateSeq(PETSC_COMM_SELF,nt,&auglyoo);
259: return(0);
260: }
262: PetscErrorCode MatMPIAIJDiagonalScaleLocal(Mat A,Vec scale)
263: {
264: /* This routine should really be abandoned as it duplicates MatDiagonalScaleLocal */
268: PetscTryMethod(A,"MatDiagonalScaleLocal_C",(Mat,Vec),(A,scale));
269: return(0);
270: }
272: PetscErrorCode MatDiagonalScaleLocal_MPIAIJ(Mat A,Vec scale)
273: {
274: Mat_MPIAIJ *a = (Mat_MPIAIJ*) A->data; /*access private part of matrix */
275: PetscErrorCode ierr;
276: PetscInt n,i;
277: PetscScalar *d,*o;
278: const PetscScalar *s;
281: if (!auglyrmapd) {
282: MatMPIAIJDiagonalScaleLocalSetUp(A,scale);
283: }
285: VecGetArrayRead(scale,&s);
287: VecGetLocalSize(auglydd,&n);
288: VecGetArray(auglydd,&d);
289: for (i=0; i<n; i++) {
290: d[i] = s[auglyrmapd[i]]; /* copy "diagonal" (true local) portion of scale into dd vector */
291: }
292: VecRestoreArray(auglydd,&d);
293: /* column scale "diagonal" portion of local matrix */
294: MatDiagonalScale(a->A,NULL,auglydd);
296: VecGetLocalSize(auglyoo,&n);
297: VecGetArray(auglyoo,&o);
298: for (i=0; i<n; i++) {
299: o[i] = s[auglyrmapo[i]]; /* copy "off-diagonal" portion of scale into oo vector */
300: }
301: VecRestoreArrayRead(scale,&s);
302: VecRestoreArray(auglyoo,&o);
303: /* column scale "off-diagonal" portion of local matrix */
304: MatDiagonalScale(a->B,NULL,auglyoo);
305: return(0);
306: }