Actual source code: iscoloring.c
1: /*$Id: iscoloring.c,v 1.70 2001/06/21 21:15:55 bsmith Exp $*/
3: #include petscsys.h
4: #include petscis.h
6: #undef __FUNCT__
8: /*@C
9: ISColoringDestroy - Destroys a coloring context.
11: Collective on ISColoring
13: Input Parameter:
14: . iscoloring - the coloring context
16: Level: advanced
18: .seealso: ISColoringView(), MatGetColoring()
19: @*/
20: int ISColoringDestroy(ISColoring iscoloring)
21: {
22: int i,ierr;
26: if (--iscoloring->refct > 0) return(0);
28: if (iscoloring->is) {
29: for (i=0; i<iscoloring->n; i++) {
30: ISDestroy(iscoloring->is[i]);
31: }
32: PetscFree(iscoloring->is);
33: }
34: if (iscoloring->colors) {
35: PetscFree(iscoloring->colors);
36: }
37: PetscCommDestroy_Private(&iscoloring->comm);
38: PetscFree(iscoloring);
39: return(0);
40: }
42: #undef __FUNCT__
44: /*@C
45: ISColoringView - Views a coloring context.
47: Collective on ISColoring
49: Input Parameters:
50: + iscoloring - the coloring context
51: - viewer - the viewer
53: Level: advanced
55: .seealso: ISColoringDestroy(), ISColoringGetIS(), MatGetColoring()
56: @*/
57: int ISColoringView(ISColoring iscoloring,PetscViewer viewer)
58: {
59: int i,ierr;
60: PetscTruth isascii;
61: IS *is;
65: if (!viewer) viewer = PETSC_VIEWER_STDOUT_(iscoloring->comm);
68: PetscTypeCompare((PetscObject)viewer,PETSC_VIEWER_ASCII,&isascii);
69: if (isascii) {
70: MPI_Comm comm;
71: int rank;
72: PetscObjectGetComm((PetscObject)viewer,&comm);
73: MPI_Comm_rank(comm,&rank);
74: PetscViewerASCIISynchronizedPrintf(viewer,"[%d] Number of colors %dn",rank,iscoloring->n);
75: PetscViewerFlush(viewer);
76: } else {
77: SETERRQ1(1,"Viewer type %s not supported for ISColoring",((PetscObject)viewer)->type_name);
78: }
80: ISColoringGetIS(iscoloring,PETSC_IGNORE,&is);
81: for (i=0; i<iscoloring->n; i++) {
82: ISView(iscoloring->is[i],viewer);
83: }
84: ISColoringRestoreIS(iscoloring,&is);
85: return(0);
86: }
88: #undef __FUNCT__
90: /*@C
91: ISColoringGetIS - Extracts index sets from the coloring context
93: Collective on ISColoring
95: Input Parameter:
96: . iscoloring - the coloring context
98: Output Parameters:
99: + nn - number of index sets in the coloring context
100: - is - array of index sets
102: Level: advanced
104: .seealso: ISColoringRestoreIS(), ISColoringView()
105: @*/
106: int ISColoringGetIS(ISColoring iscoloring,int *nn,IS *isis[])
107: {
113: if (nn) *nn = iscoloring->n;
114: if (isis) {
115: if (!iscoloring->is) {
116: int *mcolors,**ii,nc = iscoloring->n,i,base, n = iscoloring->N;
117: int *colors = iscoloring->colors;
118: IS *is;
119:
120: /* generate the lists of nodes for each color */
121: PetscMalloc((nc+1)*sizeof(int),&mcolors);
122: PetscMemzero(mcolors,nc*sizeof(int));
123: for (i=0; i<n; i++) {
124: mcolors[colors[i]]++;
125: }
127: PetscMalloc((nc+1)*sizeof(int*),&ii);
128: PetscMalloc((n+1)*sizeof(int),&ii[0]);
129: for (i=1; i<nc; i++) {
130: ii[i] = ii[i-1] + mcolors[i-1];
131: }
132:
133: MPI_Scan(&iscoloring->N,&base,1,MPI_INT,MPI_SUM,iscoloring->comm);
134: base -= iscoloring->N;
135: PetscMemzero(mcolors,nc*sizeof(int));
136: for (i=0; i<n; i++) {
137: ii[colors[i]][mcolors[colors[i]]++] = i + base;
138: }
139: PetscMalloc((nc+1)*sizeof(IS),&is);
140: for (i=0; i<nc; i++) {
141: ISCreateGeneral(iscoloring->comm,mcolors[i],ii[i],is+i);
142: }
144: iscoloring->is = is;
145: PetscFree(ii[0]);
146: PetscFree(ii);
147: PetscFree(mcolors);
148: }
149: *isis = iscoloring->is;
150: }
152: return(0);
153: }
155: #undef __FUNCT__
157: /*@C
158: ISColoringGetIS - Restores the index sets extracted from the coloring context
160: Collective on ISColoring
162: Input Parameter:
163: + iscoloring - the coloring context
164: - is - array of index sets
166: Level: advanced
168: .seealso: ISColoringGetIS(), ISColoringView()
169: @*/
170: int ISColoringRestoreIS(ISColoring iscoloring,IS *is[])
171: {
174:
175: /* currently nothing is done here */
177: return(0);
178: }
181: #undef __FUNCT__
183: /*@C
184: ISColoringCreate - Generates an ISColoring context from lists (provided
185: by each processor) of colors for each node.
187: Collective on MPI_Comm
189: Input Parameters:
190: + comm - communicator for the processors creating the coloring
191: . n - number of nodes on this processor
192: - colors - array containing the colors for this processor, color
193: numbers begin at 0. In C/C++ this array must have been obtained with PetscMalloc()
194: and should NOT be freed (The ISColoringDestroy() will free it).
196: Output Parameter:
197: . iscoloring - the resulting coloring data structure
199: Options Database Key:
200: . -is_coloring_view - Activates ISColoringView()
202: Level: advanced
203:
204: Notes: By default sets coloring type to IS_COLORING_LOCAL
206: .seealso: MatColoringCreate(), ISColoringView(), ISColoringDestroy(), ISColoringSetType()
208: @*/
209: int ISColoringCreate(MPI_Comm comm,int n,const int colors[],ISColoring *iscoloring)
210: {
211: int ierr,size,rank,base,top,tag,nc,ncwork,i;
212: PetscTruth flg;
213: MPI_Status status;
216: PetscNew(struct _p_ISColoring,iscoloring);
217: PetscCommDuplicate_Private(comm,&(*iscoloring)->comm,&tag);
218: comm = (*iscoloring)->comm;
220: /* compute the number of the first node on my processor */
221: MPI_Comm_size(comm,&size);
223: /* should use MPI_Scan() */
224: MPI_Comm_rank(comm,&rank);
225: if (!rank) {
226: base = 0;
227: top = n;
228: } else {
229: MPI_Recv(&base,1,MPI_INT,rank-1,tag,comm,&status);
230: top = base+n;
231: }
232: if (rank < size-1) {
233: MPI_Send(&top,1,MPI_INT,rank+1,tag,comm);
234: }
236: /* compute the total number of colors */
237: ncwork = 0;
238: for (i=0; i<n; i++) {
239: #if defined(PETSC_USE_BOPT_g)
240: if (colors[i] < 0) SETERRQ2(1,"Colors must be 0 or greater, you have given %d at %d",colors[i],i);
241: #endif
242: if (ncwork < colors[i]) ncwork = colors[i];
243: }
244: ncwork++;
245: MPI_Allreduce(&ncwork,&nc,1,MPI_INT,MPI_MAX,comm);
246: (*iscoloring)->n = nc;
247: (*iscoloring)->is = 0;
248: (*iscoloring)->colors = (int *)colors;
249: (*iscoloring)->N = n;
250: (*iscoloring)->refct = 1;
251: (*iscoloring)->ctype = IS_COLORING_LOCAL;
253: PetscOptionsHasName(PETSC_NULL,"-is_coloring_view",&flg);
254: if (flg) {
255: ISColoringView(*iscoloring,PETSC_VIEWER_STDOUT_((*iscoloring)->comm));
256: }
257: PetscLogInfo(0,"ISColoringCreate: Number of colors %dn",nc);
258: return(0);
259: }
261: #undef __FUNCT__
263: /*@C
264: ISPartitioningToNumbering - Takes an ISPartitioning and on each processor
265: generates an IS that contains a new global node number for each index based
266: on the partitioing.
268: Collective on IS
270: Input Parameters
271: . partitioning - a partitioning as generated by MatPartitioningApply()
273: Output Parameter:
274: . is - on each processor the index set that defines the global numbers
275: (in the new numbering) for all the nodes currently (before the partitioning)
276: on that processor
278: Level: advanced
280: .seealso: MatPartitioningCreate(), AOCreateBasic(), ISPartioningCount()
282: @*/
283: int ISPartitioningToNumbering(IS part,IS *is)
284: {
285: MPI_Comm comm;
286: int i,ierr,size,*indices,np,n,*starts,*sums,*lsizes,*newi;
289: PetscObjectGetComm((PetscObject)part,&comm);
290: MPI_Comm_size(comm,&size);
292: /* count the number of partitions, make sure <= size */
293: ISGetLocalSize(part,&n);
294: ISGetIndices(part,&indices);
295: np = 0;
296: for (i=0; i<n; i++) {
297: np = PetscMax(np,indices[i]);
298: }
299: if (np >= size) {
300: SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Number of partitions %d larger than number of processors %d",np,size);
301: }
303: /*
304: lsizes - number of elements of each partition on this particular processor
305: sums - total number of "previous" nodes for any particular partition
306: starts - global number of first element in each partition on this processor
307: */
308: ierr = PetscMalloc(3*size*sizeof(int),&lsizes);
309: starts = lsizes + size;
310: sums = starts + size;
311: ierr = PetscMemzero(lsizes,size*sizeof(int));
312: for (i=0; i<n; i++) {
313: lsizes[indices[i]]++;
314: }
315: MPI_Allreduce(lsizes,sums,size,MPI_INT,MPI_SUM,comm);
316: MPI_Scan(lsizes,starts,size,MPI_INT,MPI_SUM,comm);
317: for (i=0; i<size; i++) {
318: starts[i] -= lsizes[i];
319: }
320: for (i=1; i<size; i++) {
321: sums[i] += sums[i-1];
322: starts[i] += sums[i-1];
323: }
325: /*
326: For each local index give it the new global number
327: */
328: PetscMalloc((n+1)*sizeof(int),&newi);
329: for (i=0; i<n; i++) {
330: newi[i] = starts[indices[i]]++;
331: }
332: PetscFree(lsizes);
334: ISRestoreIndices(part,&indices);
335: ISCreateGeneral(comm,n,newi,is);
336: PetscFree(newi);
337: ISSetPermutation(*is);
338: return(0);
339: }
341: #undef __FUNCT__
343: /*@C
344: ISPartitioningCount - Takes a ISPartitioning and determines the number of
345: resulting elements on each processor
347: Collective on IS
349: Input Parameters:
350: . partitioning - a partitioning as generated by MatPartitioningApply()
352: Output Parameter:
353: . count - array of length size of communicator associated with IS, contains
354: the number of elements assigned to each processor
356: Level: advanced
358: .seealso: MatPartitioningCreate(), AOCreateBasic(), ISPartitioningToNumbering()
360: @*/
361: int ISPartitioningCount(IS part,int count[])
362: {
363: MPI_Comm comm;
364: int i,ierr,size,*indices,np,n,*lsizes;
367: PetscObjectGetComm((PetscObject)part,&comm);
368: MPI_Comm_size(comm,&size);
370: /* count the number of partitions,make sure <= size */
371: ISGetLocalSize(part,&n);
372: ISGetIndices(part,&indices);
373: np = 0;
374: for (i=0; i<n; i++) {
375: np = PetscMax(np,indices[i]);
376: }
377: if (np >= size) {
378: SETERRQ2(PETSC_ERR_ARG_OUTOFRANGE,"Number of partitions %d larger than number of processors %d",np,size);
379: }
381: /*
382: lsizes - number of elements of each partition on this particular processor
383: sums - total number of "previous" nodes for any particular partition
384: starts - global number of first element in each partition on this processor
385: */
386: PetscMalloc(size*sizeof(int),&lsizes);
387: ierr = PetscMemzero(lsizes,size*sizeof(int));
388: for (i=0; i<n; i++) {
389: lsizes[indices[i]]++;
390: }
391: ISRestoreIndices(part,&indices);
392: MPI_Allreduce(lsizes,count,size,MPI_INT,MPI_SUM,comm);
393: PetscFree(lsizes);
395: return(0);
396: }
398: #undef __FUNCT__
400: /*@C
401: ISAllGather - Given an index set (IS) on each processor, generates a large
402: index set (same on each processor) by concatenating together each
403: processors index set.
405: Collective on IS
407: Input Parameter:
408: . is - the distributed index set
410: Output Parameter:
411: . isout - the concatenated index set (same on all processors)
413: Notes:
414: ISAllGather() is clearly not scalable for large index sets.
416: The IS created on each processor must be created with a common
417: communicator (e.g., PETSC_COMM_WORLD). If the index sets were created
418: with PETSC_COMM_SELF, this routine will not work as expected, since
419: each process will generate its own new IS that consists only of
420: itself.
422: Level: intermediate
424: Concepts: gather^index sets
425: Concepts: index sets^gathering to all processors
426: Concepts: IS^gathering to all processors
428: .seealso: ISCreateGeneral(), ISCreateStride(), ISCreateBlock(), ISAllGatherIndices()
429: @*/
430: int ISAllGather(IS is,IS *isout)
431: {
432: int *indices,*sizes,size,*offsets,n,*lindices,i,N,ierr;
433: MPI_Comm comm;
438: PetscObjectGetComm((PetscObject)is,&comm);
439: MPI_Comm_size(comm,&size);
440: PetscMalloc(2*size*sizeof(int),&sizes);
441: offsets = sizes + size;
442:
443: ISGetLocalSize(is,&n);
444: MPI_Allgather(&n,1,MPI_INT,sizes,1,MPI_INT,comm);
445: offsets[0] = 0;
446: for (i=1;i<size; i++) offsets[i] = offsets[i-1] + sizes[i-1];
447: N = offsets[size-1] + sizes[size-1];
449: PetscMalloc((N+1)*sizeof(int),&indices);
450: ISGetIndices(is,&lindices);
451: MPI_Allgatherv(lindices,n,MPI_INT,indices,sizes,offsets,MPI_INT,comm);
452: ISRestoreIndices(is,&lindices);
454: ISCreateGeneral(PETSC_COMM_SELF,N,indices,isout);
455: PetscFree(indices);
457: PetscFree(sizes);
458: return(0);
459: }
461: #undef __FUNCT__
463: /*@C
464: ISAllGatherIndices - Given a a set of integers on each processor, generates a large
465: set (same on each processor) by concatenating together each processors integers
467: Collective on MPI_Comm
469: Input Parameter:
470: + comm - communicator to share the indices
471: . n - local size of set
472: - lindices - local indices
474: Output Parameter:
475: + outN - total number of indices
476: - outindices - all of the integers
478: Notes:
479: ISAllGatherIndices() is clearly not scalable for large index sets.
482: Level: intermediate
484: Concepts: gather^index sets
485: Concepts: index sets^gathering to all processors
486: Concepts: IS^gathering to all processors
488: .seealso: ISCreateGeneral(), ISCreateStride(), ISCreateBlock(), ISAllGather()
489: @*/
490: int ISAllGatherIndices(MPI_Comm comm,int n,int *lindices,int *outN,int **outindices)
491: {
492: int *indices,*sizes,size,*offsets,i,N,ierr;
495: MPI_Comm_size(comm,&size);
496: PetscMalloc(2*size*sizeof(int),&sizes);
497: offsets = sizes + size;
498:
499: MPI_Allgather(&n,1,MPI_INT,sizes,1,MPI_INT,comm);
500: offsets[0] = 0;
501: for (i=1;i<size; i++) offsets[i] = offsets[i-1] + sizes[i-1];
502: N = offsets[size-1] + sizes[size-1];
503: PetscFree(sizes);
505: PetscMalloc((N+1)*sizeof(int),&indices);
506: MPI_Allgatherv(lindices,n,MPI_INT,indices,sizes,offsets,MPI_INT,comm);
508: *outindices = indices;
509: if (outN) *outN = N;
510: return(0);
511: }