Actual source code: plexland.c
1: #include <petsc/private/dmpleximpl.h>
2: #include <petsclandau.h>
3: #include <petscts.h>
4: #include <petscdmforest.h>
6: /* Landau collision operator */
7: #define PETSC_THREAD_SYNC
8: #define PETSC_DEVICE_FUNC_DECL static
9: #include "land_tensors.h"
11: /* vector padding not supported */
12: #define LANDAU_VL 1
14: int LandauGetIPDataSize(const LandauIPData *const d) {
15: return d->nip_*(1 + d->dim_ + d->ns_); /* assumes Nq == Nd */
16: }
18: static PetscErrorCode LandauPointDataCreate(LandauIPData *IPData, PetscInt dim, PetscInt nip, PetscInt Ns)
19: {
20: PetscErrorCode ierr;
21: PetscInt sz, nip_pad = nip ; /* LANDAU_VL*(nip/LANDAU_VL + !!(nip%LANDAU_VL)); */
22: LandauIPReal *pdata;
24: IPData->dim_ = dim;
25: IPData->nip_ = nip_pad;
26: IPData->ns_ = Ns;
27: sz = LandauGetIPDataSize(IPData);
28: PetscMalloc(sizeof(LandauIPReal)*sz,&pdata);
29: /* pack data */
30: IPData->w = pdata + 0; /* w */
31: IPData->x = pdata + 1*nip_pad;
32: IPData->y = pdata + 2*nip_pad;
33: IPData->z = pdata + 3*nip_pad;
34: IPData->coefs= pdata + (dim+1)*nip_pad;
35: return(0);
36: }
37: static PetscErrorCode LandauGPUDataDestroy(void *ptr)
38: {
39: P4estVertexMaps *maps = (P4estVertexMaps *)ptr;
40: PetscErrorCode ierr;
42: if (maps->deviceType != LANDAU_CPU) {
43: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
44: if (maps->deviceType == LANDAU_KOKKOS) {
45: LandauKokkosDestroyMatMaps(maps); // imples Kokkos does
46: } // else could be CUDA
47: #elif defined(PETSC_HAVE_CUDA)
48: if (maps->deviceType == LANDAU_CUDA){
49: LandauCUDADestroyMatMaps(maps);
50: } else SETERRQ1(PETSC_COMM_SELF, PETSC_ERR_PLIB, "maps->deviceType %D ?????",maps->deviceType);
51: #endif
52: }
53: PetscFree(maps->c_maps);
54: PetscFree(maps->gIdx);
55: PetscFree(maps);
56: return(0);
57: }
58: static PetscErrorCode LandauPointDataDestroy(LandauIPData *IPData)
59: {
60: PetscErrorCode ierr;
62: PetscFree(IPData->w);
63: return(0);
64: }
65: /* ------------------------------------------------------------------- */
66: /*
67: LandauFormJacobian_Internal - Evaluates Jacobian matrix.
69: Input Parameters:
70: . globX - input vector
71: . actx - optional user-defined context
72: . dim - dimension
74: Output Parameters:
75: . J0acP - Jacobian matrix filled, not created
76: */
77: static PetscErrorCode LandauFormJacobian_Internal(Vec a_X, Mat JacP, const PetscInt dim, PetscReal shift, void *a_ctx)
78: {
79: LandauCtx *ctx = (LandauCtx*)a_ctx;
80: PetscErrorCode ierr;
81: PetscInt cStart, cEnd, elemMatSize;
82: DM plex = NULL;
83: PetscDS prob;
84: PetscSection section,globsection;
85: PetscInt numCells,totDim,ej,Nq,*Nbf,*Ncf,Nb,Ncx,Nf,d,f,fieldA,qj;
86: PetscQuadrature quad;
87: PetscTabulation *Tf;
88: PetscReal nu_alpha[LANDAU_MAX_SPECIES], nu_beta[LANDAU_MAX_SPECIES];
89: const PetscReal *quadWeights;
90: PetscReal *invJ,*invJ_a=NULL,*mass_w=NULL;
91: PetscReal invMass[LANDAU_MAX_SPECIES],Eq_m[LANDAU_MAX_SPECIES],m_0=ctx->m_0; /* normalize mass -- not needed! */
92: PetscLogDouble flops;
93: Vec locX;
94: LandauIPData IPData;
95: PetscContainer container;
96: P4estVertexMaps *maps=NULL;
103: /* check for matrix container for GPU assembly */
104: PetscLogEventBegin(ctx->events[10],0,0,0,0);
105: PetscObjectQuery((PetscObject) JacP, "assembly_maps", (PetscObject *) &container);
106: if (container /* && ctx->deviceType != LANDAU_CPU */) {
107: if (!ctx->gpu_assembly) SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"GPU matrix container but no GPU assembly");
108: PetscContainerGetPointer(container, (void **) &maps);
109: if (!maps) SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"empty GPU matrix container");
110: }
111: DMConvert(ctx->dmv, DMPLEX, &plex);
112: DMCreateLocalVector(plex, &locX);
113: VecZeroEntries(locX); /* zero BCs so don't set */
114: DMGlobalToLocalBegin(plex, a_X, INSERT_VALUES, locX);
115: DMGlobalToLocalEnd (plex, a_X, INSERT_VALUES, locX);
116: DMPlexGetHeightStratum(plex, 0, &cStart, &cEnd);
117: DMGetLocalSection(plex, §ion);
118: DMGetGlobalSection(plex, &globsection);
119: DMGetDS(plex, &prob);
120: PetscDSGetTabulation(prob, &Tf); // Bf, &Df
121: PetscDSGetDimensions(prob, &Nbf); Nb = Nbf[0]; /* number of vertices*S */
122: PetscSectionGetNumFields(section, &Nf); if (Nf!=ctx->num_species) SETERRQ1(ctx->comm, PETSC_ERR_PLIB, "Nf %D != S",Nf);
123: PetscDSGetComponents(prob, &Ncf); Ncx = Ncf[0]; if (Ncx!=1) SETERRQ1(ctx->comm, PETSC_ERR_PLIB, "Nc %D != 1",Ncx);
124: if (shift==0.0) {
125: for (fieldA=0;fieldA<Nf;fieldA++) {
126: invMass[fieldA] = m_0/ctx->masses[fieldA];
127: Eq_m[fieldA] = ctx->Ez * ctx->t_0 * ctx->charges[fieldA] / (ctx->v_0 * ctx->masses[fieldA]); /* normalize dimensionless */
128: if (dim==2) Eq_m[fieldA] *= 2 * PETSC_PI; /* add the 2pi term that is not in Landau */
129: nu_alpha[fieldA] = PetscSqr(ctx->charges[fieldA]/m_0)*m_0/ctx->masses[fieldA];
130: nu_beta[fieldA] = PetscSqr(ctx->charges[fieldA]/ctx->epsilon0)*ctx->lnLam / (8*PETSC_PI) * ctx->t_0*ctx->n_0/PetscPowReal(ctx->v_0,3);
131: }
132: }
133: PetscDSGetTotalDimension(prob, &totDim);
134: numCells = cEnd - cStart;
135: PetscFEGetQuadrature(ctx->fe[0], &quad);
136: PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, &quadWeights);
137: if (Nb!=Nq) SETERRQ4(ctx->comm, PETSC_ERR_PLIB, "Nb!=Nq %D %D over integration or simplices? Tf[0]->Nb=%D dim=%D",Nb,Nq,Tf[0]->Nb,dim);
138: if (Nq >LANDAU_MAX_NQ) SETERRQ2(ctx->comm,PETSC_ERR_ARG_WRONG,"Order too high. Nq = %D > LANDAU_MAX_NQ (%D)",Nq,LANDAU_MAX_NQ);
139: if (LANDAU_DIM != dim) SETERRQ2(ctx->comm, PETSC_ERR_PLIB, "dim %D != LANDAU_DIM %d",dim,LANDAU_DIM);
140: if (shift==0.0) {
141: MatZeroEntries(JacP);
142: flops = (PetscLogDouble)numCells*(PetscLogDouble)Nq*(PetscLogDouble)(5*dim*dim*Nf*Nf + 165);
143: } else {
144: flops = (PetscLogDouble)numCells*(PetscLogDouble)Nq*(PetscLogDouble)(5*dim*dim*Nf*Nf);
145: }
146: elemMatSize = totDim*totDim;
147: PetscLogEventEnd(ctx->events[10],0,0,0,0);
148: {
149: static int cc = 0;
150: /* collect f data */
151: if (ctx->verbose > 1 || (ctx->verbose > 0 && cc++ == 0)) {
152: PetscInt N,Nloc;
153: MatGetSize(JacP,&N,NULL);
154: VecGetSize(locX,&Nloc);
155: PetscPrintf(ctx->comm,"[%D]%s: %D IPs, %D cells, totDim=%D, Nb=%D, Nq=%D, elemMatSize=%D, dim=%D, Tab: Nb=%D Nf=%D Np=%D cdim=%D N=%D N+hang=%D, shift=%g\n",
156: 0,"FormLandau",Nq*numCells,numCells, totDim, Nb, Nq, elemMatSize, dim, Tf[0]->Nb, Nf, Tf[0]->Np, Tf[0]->cdim, N, Nloc, shift);
157: }
158: if (shift==0.0) {
159: LandauPointDataCreate(&IPData, dim, Nq*numCells, Nf);
160: PetscMalloc1(numCells*Nq*dim*dim,&invJ_a);
161: PetscLogEventBegin(ctx->events[7],0,0,0,0);
162: } else { // mass
163: PetscMalloc1(numCells*Nq,&mass_w);
164: IPData.w = NULL;
165: PetscLogEventBegin(ctx->events[1],0,0,0,0);
166: }
167: /* cache geometry and x, f and df/dx at IPs */
168: for (ej = 0 ; ej < numCells; ++ej) {
169: PetscReal vj[LANDAU_MAX_NQ*LANDAU_DIM],detJj[LANDAU_MAX_NQ], Jdummy[LANDAU_MAX_NQ*LANDAU_DIM*LANDAU_DIM];
170: PetscScalar *coef = NULL;
171: invJ = invJ_a ? invJ_a + ej * Nq*dim*dim : NULL;
172: DMPlexComputeCellGeometryFEM(plex, cStart+ej, quad, vj, Jdummy, invJ, detJj);
173: if (shift!=0.0) { // mass
174: for (qj = 0; qj < Nq; ++qj) {
175: PetscInt gidx = (ej*Nq + qj);
176: mass_w[gidx] = detJj[qj] * quadWeights[qj];
177: if (dim==2) mass_w[gidx] *= 2.*PETSC_PI*vj[qj * dim + 0]; /* cylindrical coordinate, w/o 2pi */
178: }
179: } else {
180: DMPlexVecGetClosure(plex, section, locX, cStart+ej, NULL, &coef);
181: PetscMemcpy(&IPData.coefs[ej*Nb*Nf],coef,Nb*Nf*sizeof(PetscScalar)); /* change if LandauIPReal != PetscScalar */
182: /* create point data for cell i for Landau tensor: x, f(x), grad f(x) */
183: for (qj = 0; qj < Nq; ++qj) {
184: PetscInt gidx = (ej*Nq + qj);
185: IPData.x[gidx] = vj[qj * dim + 0]; /* coordinate */
186: IPData.y[gidx] = vj[qj * dim + 1];
187: if (dim==3) IPData.z[gidx] = vj[qj * dim + 2];
188: IPData.w[gidx] = detJj[qj] * quadWeights[qj];
189: if (dim==2) IPData.w[gidx] *= IPData.x[gidx]; /* cylindrical coordinate, w/o 2pi */
190: } /* q */
191: DMPlexVecRestoreClosure(plex, section, locX, cStart+ej, NULL, &coef);
192: }
193: } /* ej */
194: if (shift==0.0) {
195: PetscLogEventEnd(ctx->events[7],0,0,0,0);
196: } else { // mass
197: PetscLogEventEnd(ctx->events[1],0,0,0,0);
198: }
199: }
200: DMRestoreLocalVector(plex, &locX);
202: /* do it */
203: if (ctx->deviceType == LANDAU_CUDA || ctx->deviceType == LANDAU_KOKKOS) {
204: if (ctx->deviceType == LANDAU_CUDA) {
205: #if defined(PETSC_HAVE_CUDA)
206: LandauCUDAJacobian(plex,Nq,nu_alpha,nu_beta,invMass,Eq_m,&IPData,invJ_a,mass_w,shift,ctx->events,JacP);
207: #else
208: SETERRQ1(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","cuda");
209: #endif
210: } else if (ctx->deviceType == LANDAU_KOKKOS) {
211: #if defined(PETSC_HAVE_KOKKOS)
212: LandauKokkosJacobian(plex,Nq,nu_alpha,nu_beta,invMass,Eq_m,&IPData,invJ_a,ctx->subThreadBlockSize,mass_w,shift,ctx->events,JacP);
213: #else
214: SETERRQ1(ctx->comm,PETSC_ERR_ARG_WRONG,"-landau_device_type %s not built","kokkos");
215: #endif
216: }
217: } else { /* CPU version */
218: PetscInt ei, qi;
219: PetscScalar *elemMat;
220: PetscReal *ff, *dudx, *dudy, *dudz;
221: const PetscReal * const BB = Tf[0]->T[0], * const DD = Tf[0]->T[1];
222: if (shift!=0.0) { // mass
223: PetscMalloc1(elemMatSize, &elemMat);
224: } else {
225: PetscLogEventBegin(ctx->events[8],0,0,0,0);
226: PetscMalloc5(elemMatSize, &elemMat, IPData.nip_*Nf, &ff,IPData.nip_*Nf, &dudx, IPData.nip_*Nf, &dudy, dim==3 ? IPData.nip_*Nf : 0, &dudz);
227: /* compute f and df */
228: for (ei = cStart, invJ = invJ_a; ei < cEnd; ++ei, invJ += Nq*dim*dim) {
229: LandauIPReal *coef = &IPData.coefs[ei*Nb*Nf];
230: PetscScalar u_x[LANDAU_MAX_SPECIES][LANDAU_DIM];
231: /* get f and df */
232: for (qi = 0; qi < Nq; ++qi) {
233: const PetscReal *Bq = &BB[qi*Nb];
234: const PetscReal *Dq = &DD[qi*Nb*dim];
235: const PetscInt gidx = ei*Nq + qi;
236: /* get f & df */
237: for (f = 0; f < Nf; ++f) {
238: PetscInt b, e;
239: PetscScalar refSpaceDer[LANDAU_DIM];
240: ff[gidx + f*IPData.nip_] = 0.0;
241: for (d = 0; d < LANDAU_DIM; ++d) refSpaceDer[d] = 0.0;
242: for (b = 0; b < Nb; ++b) {
243: const PetscInt cidx = b;
244: ff[gidx + f*IPData.nip_] += Bq[cidx]*coef[f*Nb+cidx];
245: for (d = 0; d < dim; ++d) refSpaceDer[d] += Dq[cidx*dim+d]*coef[f*Nb+cidx];
246: }
247: for (d = 0; d < dim; ++d) {
248: for (e = 0, u_x[f][d] = 0.0; e < dim; ++e) {
249: u_x[f][d] += invJ[qi * dim * dim + e*dim+d]*refSpaceDer[e];
250: }
251: }
252: }
253: for (f=0;f<Nf;f++) {
254: dudx[gidx + f*IPData.nip_] = PetscRealPart(u_x[f][0]);
255: dudy[gidx + f*IPData.nip_] = PetscRealPart(u_x[f][1]);
256: #if LANDAU_DIM==3
257: dudz[gidx + f*IPData.nip_] = PetscRealPart(u_x[f][2]);
258: #endif
259: }
260: }
261: }
262: PetscLogEventEnd(ctx->events[8],0,0,0,0);
263: }
264: for (ej = cStart; ej < cEnd; ++ej) {
265: PetscLogEventBegin(ctx->events[3],0,0,0,0);
266: PetscMemzero(elemMat, totDim *totDim * sizeof(PetscScalar));
267: PetscLogEventEnd(ctx->events[3],0,0,0,0);
268: PetscLogEventBegin(ctx->events[4],0,0,0,0);
269: PetscLogFlops((PetscLogDouble)Nq*flops);
270: invJ = invJ_a ? invJ_a + ej * Nq*dim*dim : NULL;
271: for (qj = 0; qj < Nq; ++qj) {
272: const PetscReal * const BB = Tf[0]->T[0], * const DD = Tf[0]->T[1];
273: PetscReal g0[LANDAU_MAX_SPECIES], g2[LANDAU_MAX_SPECIES][LANDAU_DIM], g3[LANDAU_MAX_SPECIES][LANDAU_DIM][LANDAU_DIM];
274: PetscInt d,d2,dp,d3,ipidx,fieldA;
275: const PetscInt jpidx = Nq*(ej-cStart) + qj;
276: if (shift==0.0) {
277: const PetscReal * const invJj = &invJ[qj*dim*dim];
278: PetscReal gg2[LANDAU_MAX_SPECIES][LANDAU_DIM],gg3[LANDAU_MAX_SPECIES][LANDAU_DIM][LANDAU_DIM], gg2_temp[LANDAU_DIM], gg3_temp[LANDAU_DIM][LANDAU_DIM];
279: const PetscReal vj[3] = {IPData.x[jpidx], IPData.y[jpidx], IPData.z ? IPData.z[jpidx] : 0}, wj = IPData.w[jpidx];
281: // create g2 & g3
282: for (d=0;d<dim;d++) { // clear accumulation data D & K
283: gg2_temp[d] = 0;
284: for (d2=0;d2<dim;d2++) gg3_temp[d][d2] = 0;
285: }
286: for (ipidx = 0; ipidx < IPData.nip_; ipidx++) {
287: const PetscReal wi = IPData.w[ipidx], x = IPData.x[ipidx], y = IPData.y[ipidx];
288: PetscReal temp1[3] = {0, 0, 0}, temp2 = 0;
289: #if LANDAU_DIM==2
290: PetscReal Ud[2][2], Uk[2][2];
291: LandauTensor2D(vj, x, y, Ud, Uk, (ipidx==jpidx) ? 0. : 1.);
292: #else
293: PetscReal U[3][3], z = IPData.z[ipidx];
294: LandauTensor3D(vj, x, y, z, U, (ipidx==jpidx) ? 0. : 1.);
295: #endif
296: for (fieldA = 0; fieldA < Nf; ++fieldA) {
297: temp1[0] += dudx[ipidx + fieldA*IPData.nip_]*nu_beta[fieldA]*invMass[fieldA];
298: temp1[1] += dudy[ipidx + fieldA*IPData.nip_]*nu_beta[fieldA]*invMass[fieldA];
299: #if LANDAU_DIM==3
300: temp1[2] += dudz[ipidx + fieldA*IPData.nip_]*nu_beta[fieldA]*invMass[fieldA];
301: #endif
302: temp2 += ff[ipidx + fieldA*IPData.nip_]*nu_beta[fieldA];
303: }
304: temp1[0] *= wi;
305: temp1[1] *= wi;
306: #if LANDAU_DIM==3
307: temp1[2] *= wi;
308: #endif
309: temp2 *= wi;
310: #if LANDAU_DIM==2
311: for (d2 = 0; d2 < 2; d2++) {
312: for (d3 = 0; d3 < 2; ++d3) {
313: /* K = U * grad(f): g2=e: i,A */
314: gg2_temp[d2] += Uk[d2][d3]*temp1[d3];
315: /* D = -U * (I \kron (fx)): g3=f: i,j,A */
316: gg3_temp[d2][d3] += Ud[d2][d3]*temp2;
317: }
318: }
319: #else
320: for (d2 = 0; d2 < 3; ++d2) {
321: for (d3 = 0; d3 < 3; ++d3) {
322: /* K = U * grad(f): g2 = e: i,A */
323: gg2_temp[d2] += U[d2][d3]*temp1[d3];
324: /* D = -U * (I \kron (fx)): g3 = f: i,j,A */
325: gg3_temp[d2][d3] += U[d2][d3]*temp2;
326: }
327: }
328: #endif
329: } /* IPs */
330: //if (ej==0) printf("\t:%d.%d) temp gg3=%e %e %e %e\n",ej,qj,gg3_temp[0][0],gg3_temp[1][0],gg3_temp[0][1],gg3_temp[1][1]);
331: // add alpha and put in gg2/3
332: for (fieldA = 0; fieldA < Nf; ++fieldA) {
333: for (d2 = 0; d2 < dim; d2++) {
334: gg2[fieldA][d2] = gg2_temp[d2]*nu_alpha[fieldA];
335: for (d3 = 0; d3 < dim; d3++) {
336: gg3[fieldA][d2][d3] = -gg3_temp[d2][d3]*nu_alpha[fieldA]*invMass[fieldA];
337: }
338: }
339: }
340: /* add electric field term once per IP */
341: for (fieldA = 0; fieldA < Nf; ++fieldA) {
342: gg2[fieldA][dim-1] += Eq_m[fieldA];
343: }
344: /* Jacobian transform - g2, g3 */
345: for (fieldA = 0; fieldA < Nf; ++fieldA) {
346: for (d = 0; d < dim; ++d) {
347: g2[fieldA][d] = 0.0;
348: for (d2 = 0; d2 < dim; ++d2) {
349: g2[fieldA][d] += invJj[d*dim+d2]*gg2[fieldA][d2];
350: g3[fieldA][d][d2] = 0.0;
351: for (d3 = 0; d3 < dim; ++d3) {
352: for (dp = 0; dp < dim; ++dp) {
353: g3[fieldA][d][d2] += invJj[d*dim + d3]*gg3[fieldA][d3][dp]*invJj[d2*dim + dp];
354: }
355: }
356: g3[fieldA][d][d2] *= wj;
357: }
358: g2[fieldA][d] *= wj;
359: }
360: }
361: } else { // mass
362: /* Jacobian transform - g0 */
363: for (fieldA = 0; fieldA < Nf; ++fieldA) {
364: g0[fieldA] = mass_w[jpidx] * shift; // move this to below and remove g0
365: }
366: }
367: /* FE matrix construction */
368: {
369: PetscInt fieldA,d,f,d2,g;
370: const PetscReal *BJq = &BB[qj*Nb], *DIq = &DD[qj*Nb*dim];
371: /* assemble - on the diagonal (I,I) */
372: for (fieldA = 0; fieldA < Nf ; fieldA++) {
373: for (f = 0; f < Nb ; f++) {
374: const PetscInt i = fieldA*Nb + f; /* Element matrix row */
375: for (g = 0; g < Nb; ++g) {
376: const PetscInt j = fieldA*Nb + g; /* Element matrix column */
377: const PetscInt fOff = i*totDim + j;
378: if (shift==0.0) {
379: for (d = 0; d < dim; ++d) {
380: elemMat[fOff] += DIq[f*dim+d]*g2[fieldA][d]*BJq[g];
381: for (d2 = 0; d2 < dim; ++d2) {
382: elemMat[fOff] += DIq[f*dim + d]*g3[fieldA][d][d2]*DIq[g*dim + d2];
383: }
384: }
385: } else { // mass
386: elemMat[fOff] += BJq[f]*g0[fieldA]*BJq[g];
387: }
388: }
389: }
390: }
391: }
392: } /* qj loop */
393: PetscLogEventEnd(ctx->events[4],0,0,0,0);
394: /* assemble matrix */
395: PetscLogEventBegin(ctx->events[6],0,0,0,0);
396: if (!maps) {
397: DMPlexMatSetClosure(plex, section, globsection, JacP, ej, elemMat, ADD_VALUES);
398: } else { // GPU like assembly for debugging
399: PetscInt fieldA,idx,q,f,g,d,nr,nc,rows0[LANDAU_MAX_Q_FACE],cols0[LANDAU_MAX_Q_FACE],rows[LANDAU_MAX_Q_FACE],cols[LANDAU_MAX_Q_FACE];
400: PetscScalar vals[LANDAU_MAX_Q_FACE*LANDAU_MAX_Q_FACE],row_scale[LANDAU_MAX_Q_FACE],col_scale[LANDAU_MAX_Q_FACE];
401: for (g=0;g<LANDAU_MAX_Q_FACE;g++) { col_scale[g]=0.; cols0[g]=0.; }
402: /* assemble - from the diagonal (I,I) in this format for DMPlexMatSetClosure */
403: for (fieldA = 0; fieldA < Nf ; fieldA++) {
404: LandauIdx *const Idxs = &maps->gIdx[ej-cStart][fieldA][0];
405: for (f = 0; f < Nb ; f++) {
406: idx = Idxs[f];
407: if (idx >= 0) {
408: nr = 1;
409: rows0[0] = idx;
410: row_scale[0] = 1.;
411: } else {
412: idx = -idx - 1;
413: nr = maps->num_face;
414: for (q = 0; q < maps->num_face; q++) {
415: rows0[q] = maps->c_maps[idx][q].gid;
416: row_scale[q] = maps->c_maps[idx][q].scale;
417: }
418: }
419: for (g = 0; g < Nb; ++g) {
420: idx = Idxs[g];
421: if (idx >= 0) {
422: nc = 1;
423: cols0[0] = idx;
424: col_scale[0] = 1.;
425: } else {
426: idx = -idx - 1;
427: nc = maps->num_face;
428: for (q = 0; q < maps->num_face; q++) {
429: cols0[q] = maps->c_maps[idx][q].gid;
430: col_scale[q] = maps->c_maps[idx][q].scale;
431: }
432: }
433: const PetscInt i = fieldA*Nb + f; /* Element matrix row */
434: const PetscInt j = fieldA*Nb + g; /* Element matrix column */
435: const PetscScalar Aij = elemMat[i*totDim + j];
436: for (q = 0; q < nr; q++) rows[q] = rows0[q];
437: for (q = 0; q < nc; q++) cols[q] = cols0[q];
438: for (q = 0; q < nr; q++) {
439: for (d = 0; d < nc; d++) {
440: vals[q*nc + d] = row_scale[q]*col_scale[d]*Aij;
441: }
442: }
443: MatSetValues(JacP,nr,rows,nc,cols,vals,ADD_VALUES);
444: }
445: }
446: }
447: }
448: if (ej==-3) {
449: PetscErrorCode ierr2;
450: ierr2 = PetscPrintf(ctx->comm,"CPU Element matrix\n");CHKERRQ(ierr2);
451: for (d = 0; d < totDim; ++d){
452: for (f = 0; f < totDim; ++f) {ierr2 = PetscPrintf(ctx->comm," %12.5e", PetscRealPart(elemMat[d*totDim + f]));CHKERRQ(ierr2);}
453: ierr2 = PetscPrintf(ctx->comm,"\n");CHKERRQ(ierr2);
454: }
455: exit(12);
456: }
457: PetscLogEventEnd(ctx->events[6],0,0,0,0);
458: } /* ej cells loop, not cuda */
459: if (shift!=0.0) { // mass
460: PetscFree(elemMat);
461: } else {
462: PetscFree5(elemMat, ff, dudx, dudy, dudz);
463: }
464: } /* CPU version */
465: /* assemble matrix or vector */
466: MatAssemblyBegin(JacP, MAT_FINAL_ASSEMBLY);
467: MatAssemblyEnd(JacP, MAT_FINAL_ASSEMBLY);
468: #define MAP_BF_SIZE (128*LANDAU_DIM*LANDAU_MAX_Q_FACE*LANDAU_MAX_SPECIES)
469: if (ctx->gpu_assembly && !container) {
470: PetscScalar elemMatrix[LANDAU_MAX_NQ*LANDAU_MAX_NQ*LANDAU_MAX_SPECIES*LANDAU_MAX_SPECIES], *elMat;
471: pointInterpolationP4est pointMaps[MAP_BF_SIZE][LANDAU_MAX_Q_FACE];
472: PetscInt q,eidx,fieldA;
473: MatType type;
474: PetscInfo1(JacP, "Make GPU maps %D\n",1);
475: MatGetType(JacP,&type);
476: PetscLogEventBegin(ctx->events[2],0,0,0,0);
477: PetscMalloc(sizeof(P4estVertexMaps), &maps);
478: PetscContainerCreate(PETSC_COMM_SELF, &container);
479: PetscContainerSetPointer(container, (void *)maps);
480: PetscContainerSetUserDestroy(container, LandauGPUDataDestroy);
481: PetscObjectCompose((PetscObject) JacP, "assembly_maps", (PetscObject) container);
482: PetscContainerDestroy(&container);
483: // make maps
484: maps->data = NULL;
485: maps->num_elements = numCells;
486: maps->num_face = (PetscInt)(pow(Nq,1./((double)dim))+.001); // Q
487: maps->num_face = (PetscInt)(pow(maps->num_face,(double)(dim-1))+.001); // Q^2
488: maps->num_reduced = 0;
489: maps->deviceType = ctx->deviceType;
490: // count reduced and get
491: PetscMalloc(maps->num_elements * sizeof *maps->gIdx, &maps->gIdx);
492: for (fieldA=0;fieldA<Nf;fieldA++) {
493: for (ej = cStart, eidx = 0 ; ej < cEnd; ++ej, ++eidx) {
494: for (q = 0; q < Nb; ++q) {
495: PetscInt numindices,*indices;
496: PetscScalar *valuesOrig = elMat = elemMatrix;
497: PetscMemzero(elMat, totDim*totDim*sizeof(PetscScalar));
498: elMat[ (fieldA*Nb + q)*totDim + fieldA*Nb + q] = 1;
499: DMPlexGetClosureIndices(plex, section, globsection, ej, PETSC_TRUE, &numindices, &indices, NULL, (PetscScalar **) &elMat);
500: for (f = 0 ; f < numindices ; ++f) { // look for a non-zero on the diagonal
501: if (PetscAbs(PetscRealPart(elMat[f*numindices + f])) > PETSC_MACHINE_EPSILON) {
502: // found it
503: if (PetscAbs(PetscRealPart(elMat[f*numindices + f] - 1.)) < PETSC_MACHINE_EPSILON) {
504: maps->gIdx[eidx][fieldA][q] = (LandauIdx)indices[f]; // normal vertex 1.0
505: } else { //found a constraint
506: int jj = 0;
507: PetscReal sum = 0;
508: const PetscInt ff = f;
509: maps->gIdx[eidx][fieldA][q] = -maps->num_reduced - 1; // gid = -(idx+1): idx = -gid - 1
510: do { // constraints are continous in Plex - exploit that here
511: int ii;
512: for (ii = 0, pointMaps[maps->num_reduced][jj].scale = 0; ii < maps->num_face; ii++) { // DMPlex puts them all together
513: if (ff + ii < numindices) {
514: pointMaps[maps->num_reduced][jj].scale += PetscRealPart(elMat[f*numindices + ff + ii]);
515: }
516: }
517: sum += pointMaps[maps->num_reduced][jj].scale;
518: if (pointMaps[maps->num_reduced][jj].scale == 0) pointMaps[maps->num_reduced][jj].gid = -1; // 3D has Q and Q^2 interps -- all contiguous???
519: else pointMaps[maps->num_reduced][jj].gid = indices[f];
520: } while (++jj < maps->num_face && ++f < numindices); // jj is incremented if we hit the end
521: while (jj++ < maps->num_face) {
522: pointMaps[maps->num_reduced][jj].scale = 0;
523: pointMaps[maps->num_reduced][jj].gid = -1;
524: }
525: if (PetscAbs(sum-1.0)>PETSC_MACHINE_EPSILON*2.0) { // debug
526: int d,f;
527: PetscReal tmp = 0;
528: PetscPrintf(PETSC_COMM_SELF,"\t\t%D.%D.%D) ERROR total I = %22.16e (LANDAU_MAX_Q_FACE=%d, #face=%D)\n",eidx,q,fieldA,tmp,LANDAU_MAX_Q_FACE,maps->num_face);
529: for (d = 0, tmp = 0; d < numindices; ++d){
530: if (tmp!=0 && PetscAbs(tmp-1.0)>2*PETSC_MACHINE_EPSILON) PetscPrintf(PETSC_COMM_WORLD,"%3D) %3D: ",d,indices[d]);
531: for (f = 0; f < numindices; ++f) {
532: tmp += PetscRealPart(elMat[d*numindices + f]);
533: }
534: if (tmp!=0) PetscPrintf(ctx->comm," | %22.16e\n",tmp);
535: }
536: }
537: maps->num_reduced++;
538: if (maps->num_reduced>=MAP_BF_SIZE) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_PLIB, "maps->num_reduced %d > %d",maps->num_reduced,MAP_BF_SIZE);
539: }
540: break;
541: }
542: }
543: // cleanup
544: DMPlexRestoreClosureIndices(plex, section, globsection, ej, PETSC_TRUE, &numindices, &indices, NULL, (PetscScalar **) &elMat);
545: if (elMat != valuesOrig) {DMRestoreWorkArray(plex, numindices*numindices, MPIU_SCALAR, &elMat);}
546: }
547: }
548: }
549: // allocate and copy point datamaps->gIdx[eidx][field][q] -- for CPU version of this code, for debugging
550: PetscMalloc(maps->num_reduced * sizeof *maps->c_maps, &maps->c_maps);
551: for (ej = 0; ej < maps->num_reduced; ++ej) {
552: for (q = 0; q < maps->num_face; ++q) {
553: maps->c_maps[ej][q].scale = pointMaps[ej][q].scale;
554: maps->c_maps[ej][q].gid = pointMaps[ej][q].gid;
555: }
556: }
557: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
558: if (ctx->deviceType == LANDAU_KOKKOS) {
559: LandauKokkosCreateMatMaps(maps, pointMaps,Nf,Nq); // imples Kokkos does
560: } // else could be CUDA
561: #endif
562: #if defined(PETSC_HAVE_CUDA)
563: if (ctx->deviceType == LANDAU_CUDA){
564: LandauCUDACreateMatMaps(maps, pointMaps,Nf,Nq);
565: }
566: #endif
567: PetscLogEventEnd(ctx->events[2],0,0,0,0);
568: }
569: /* clean up */
570: DMDestroy(&plex);
571: if (shift==0.0) {
572: LandauPointDataDestroy(&IPData);
573: PetscFree(invJ_a);
574: } else {
575: PetscFree(mass_w);
576: }
577: return(0);
578: }
580: #if defined(LANDAU_ADD_BCS)
581: static void zero_bc(PetscInt dim, PetscInt Nf, PetscInt NfAux,
582: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
583: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
584: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar uexact[])
585: {
586: uexact[0] = 0;
587: }
588: #endif
590: #define MATVEC2(__a,__x,__p) {int i,j; for (i=0.; i<2; i++) {__p[i] = 0; for (j=0.; j<2; j++) __p[i] += __a[i][j]*__x[j]; }}
591: static void CircleInflate(PetscReal r1, PetscReal r2, PetscReal r0, PetscInt num_sections, PetscReal x, PetscReal y,
592: PetscReal *outX, PetscReal *outY)
593: {
594: PetscReal rr = PetscSqrtReal(x*x + y*y), outfact, efact;
595: if (rr < r1 + PETSC_SQRT_MACHINE_EPSILON) {
596: *outX = x; *outY = y;
597: } else {
598: const PetscReal xy[2] = {x,y}, sinphi=y/rr, cosphi=x/rr;
599: PetscReal cth,sth,xyprime[2],Rth[2][2],rotcos,newrr;
600: if (num_sections==2) {
601: rotcos = 0.70710678118654;
602: outfact = 1.5; efact = 2.5;
603: /* rotate normalized vector into [-pi/4,pi/4) */
604: if (sinphi >= 0.) { /* top cell, -pi/2 */
605: cth = 0.707106781186548; sth = -0.707106781186548;
606: } else { /* bottom cell -pi/8 */
607: cth = 0.707106781186548; sth = .707106781186548;
608: }
609: } else if (num_sections==3) {
610: rotcos = 0.86602540378443;
611: outfact = 1.5; efact = 2.5;
612: /* rotate normalized vector into [-pi/6,pi/6) */
613: if (sinphi >= 0.5) { /* top cell, -pi/3 */
614: cth = 0.5; sth = -0.866025403784439;
615: } else if (sinphi >= -.5) { /* mid cell 0 */
616: cth = 1.; sth = .0;
617: } else { /* bottom cell +pi/3 */
618: cth = 0.5; sth = 0.866025403784439;
619: }
620: } else if (num_sections==4) {
621: rotcos = 0.9238795325112;
622: outfact = 1.5; efact = 3;
623: /* rotate normalized vector into [-pi/8,pi/8) */
624: if (sinphi >= 0.707106781186548) { /* top cell, -3pi/8 */
625: cth = 0.38268343236509; sth = -0.923879532511287;
626: } else if (sinphi >= 0.) { /* mid top cell -pi/8 */
627: cth = 0.923879532511287; sth = -.38268343236509;
628: } else if (sinphi >= -0.707106781186548) { /* mid bottom cell + pi/8 */
629: cth = 0.923879532511287; sth = 0.38268343236509;
630: } else { /* bottom cell + 3pi/8 */
631: cth = 0.38268343236509; sth = .923879532511287;
632: }
633: } else {
634: cth = 0.; sth = 0.; rotcos = 0; efact = 0;
635: }
636: Rth[0][0] = cth; Rth[0][1] =-sth;
637: Rth[1][0] = sth; Rth[1][1] = cth;
638: MATVEC2(Rth,xy,xyprime);
639: if (num_sections==2) {
640: newrr = xyprime[0]/rotcos;
641: } else {
642: PetscReal newcosphi=xyprime[0]/rr, rin = r1, rout = rr - rin;
643: PetscReal routmax = r0*rotcos/newcosphi - rin, nroutmax = r0 - rin, routfrac = rout/routmax;
644: newrr = rin + routfrac*nroutmax;
645: }
646: *outX = cosphi*newrr; *outY = sinphi*newrr;
647: /* grade */
648: PetscReal fact,tt,rs,re, rr = PetscSqrtReal(PetscSqr(*outX) + PetscSqr(*outY));
649: if (rr > r2) { rs = r2; re = r0; fact = outfact;} /* outer zone */
650: else { rs = r1; re = r2; fact = efact;} /* electron zone */
651: tt = (rs + PetscPowReal((rr - rs)/(re - rs),fact) * (re-rs)) / rr;
652: *outX *= tt;
653: *outY *= tt;
654: }
655: }
657: static PetscErrorCode GeometryDMLandau(DM base, PetscInt point, PetscInt dim, const PetscReal abc[], PetscReal xyz[], void *a_ctx)
658: {
659: LandauCtx *ctx = (LandauCtx*)a_ctx;
660: PetscReal r = abc[0], z = abc[1];
661: if (ctx->inflate) {
662: PetscReal absR, absZ;
663: absR = PetscAbs(r);
664: absZ = PetscAbs(z);
665: CircleInflate(ctx->i_radius,ctx->e_radius,ctx->radius,ctx->num_sections,absR,absZ,&absR,&absZ);
666: r = (r > 0) ? absR : -absR;
667: z = (z > 0) ? absZ : -absZ;
668: }
669: xyz[0] = r;
670: xyz[1] = z;
671: if (dim==3) xyz[2] = abc[2];
673: return(0);
674: }
676: static PetscErrorCode ErrorIndicator_Simple(PetscInt dim, PetscReal volume, PetscReal x[], PetscInt Nc, const PetscInt Nf[], const PetscScalar u[], const PetscScalar u_x[], PetscReal *error, void *actx)
677: {
678: PetscReal err = 0.0;
679: PetscInt f = *(PetscInt*)actx, j;
681: for (j = 0; j < dim; ++j) {
682: err += PetscSqr(PetscRealPart(u_x[f*dim+j]));
683: }
684: err = PetscRealPart(u[f]); /* just use rho */
685: *error = volume * err; /* * (ctx->axisymmetric ? 2.*PETSC_PI * r : 1); */
686: return(0);
687: }
689: static PetscErrorCode LandauDMCreateVMesh(MPI_Comm comm, const PetscInt dim, const char prefix[], LandauCtx *ctx, DM *dm)
690: {
692: PetscReal radius = ctx->radius;
693: size_t len;
694: char fname[128] = ""; /* we can add a file if we want */
697: /* create DM */
698: PetscStrlen(fname, &len);
699: if (len) {
700: PetscInt dim2;
701: DMPlexCreateFromFile(comm, fname, ctx->interpolate, dm);
702: DMGetDimension(*dm, &dim2);
703: if (LANDAU_DIM != dim2) SETERRQ2(comm, PETSC_ERR_PLIB, "dim %D != LANDAU_DIM %d",dim2,LANDAU_DIM);
704: } else { /* p4est, quads */
705: /* Create plex mesh of Landau domain */
706: if (!ctx->sphere) {
707: PetscInt cells[] = {2,2,2};
708: PetscReal lo[] = {-radius,-radius,-radius}, hi[] = {radius,radius,radius};
709: DMBoundaryType periodicity[3] = {DM_BOUNDARY_NONE, dim==2 ? DM_BOUNDARY_NONE : DM_BOUNDARY_NONE, DM_BOUNDARY_NONE};
710: if (dim==2) { lo[0] = 0; cells[0] = 1; }
711: DMPlexCreateBoxMesh(comm, dim, PETSC_FALSE, cells, lo, hi, periodicity, PETSC_TRUE, dm);
712: DMLocalizeCoordinates(*dm); /* needed for periodic */
713: if (dim==3) {PetscObjectSetName((PetscObject) *dm, "cube");}
714: else {PetscObjectSetName((PetscObject) *dm, "half-plane");}
715: } else if (dim==2) {
716: PetscInt numCells,cells[16][4],i,j;
717: PetscInt numVerts;
718: PetscReal inner_radius1 = ctx->i_radius, inner_radius2 = ctx->e_radius;
719: PetscReal *flatCoords = NULL;
720: PetscInt *flatCells = NULL, *pcell;
721: if (ctx->num_sections==2) {
722: #if 1
723: numCells = 5;
724: numVerts = 10;
725: int cells2[][4] = { {0,1,4,3},
726: {1,2,5,4},
727: {3,4,7,6},
728: {4,5,8,7},
729: {6,7,8,9} };
730: for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
731: PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells);
732: {
733: PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords;
734: for (j = 0; j < numVerts-1; j++) {
735: PetscReal z, r, theta = -PETSC_PI/2 + (j%3) * PETSC_PI/2;
736: PetscReal rad = (j >= 6) ? inner_radius1 : (j >= 3) ? inner_radius2 : ctx->radius;
737: z = rad * PetscSinReal(theta);
738: coords[j][1] = z;
739: r = rad * PetscCosReal(theta);
740: coords[j][0] = r;
741: }
742: coords[numVerts-1][0] = coords[numVerts-1][1] = 0;
743: }
744: #else
745: numCells = 4;
746: numVerts = 8;
747: static int cells2[][4] = {{0,1,2,3},
748: {4,5,1,0},
749: {5,6,2,1},
750: {6,7,3,2}};
751: for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
752: loc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells);
753: {
754: PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords;
755: PetscInt j;
756: for (j = 0; j < 8; j++) {
757: PetscReal z, r;
758: PetscReal theta = -PETSC_PI/2 + (j%4) * PETSC_PI/3.;
759: PetscReal rad = ctx->radius * ((j < 4) ? 0.5 : 1.0);
760: z = rad * PetscSinReal(theta);
761: coords[j][1] = z;
762: r = rad * PetscCosReal(theta);
763: coords[j][0] = r;
764: }
765: }
766: #endif
767: } else if (ctx->num_sections==3) {
768: numCells = 7;
769: numVerts = 12;
770: int cells2[][4] = { {0,1,5,4},
771: {1,2,6,5},
772: {2,3,7,6},
773: {4,5,9,8},
774: {5,6,10,9},
775: {6,7,11,10},
776: {8,9,10,11} };
777: for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
778: PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells);
779: {
780: PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords;
781: for (j = 0; j < numVerts; j++) {
782: PetscReal z, r, theta = -PETSC_PI/2 + (j%4) * PETSC_PI/3;
783: PetscReal rad = (j >= 8) ? inner_radius1 : (j >= 4) ? inner_radius2 : ctx->radius;
784: z = rad * PetscSinReal(theta);
785: coords[j][1] = z;
786: r = rad * PetscCosReal(theta);
787: coords[j][0] = r;
788: }
789: }
790: } else if (ctx->num_sections==4) {
791: numCells = 10;
792: numVerts = 16;
793: int cells2[][4] = { {0,1,6,5},
794: {1,2,7,6},
795: {2,3,8,7},
796: {3,4,9,8},
797: {5,6,11,10},
798: {6,7,12,11},
799: {7,8,13,12},
800: {8,9,14,13},
801: {10,11,12,15},
802: {12,13,14,15}};
803: for (i = 0; i < numCells; i++) for (j = 0; j < 4; j++) cells[i][j] = cells2[i][j];
804: PetscMalloc2(numVerts * 2, &flatCoords, numCells * 4, &flatCells);
805: {
806: PetscReal (*coords)[2] = (PetscReal (*) [2]) flatCoords;
807: for (j = 0; j < numVerts-1; j++) {
808: PetscReal z, r, theta = -PETSC_PI/2 + (j%5) * PETSC_PI/4;
809: PetscReal rad = (j >= 10) ? inner_radius1 : (j >= 5) ? inner_radius2 : ctx->radius;
810: z = rad * PetscSinReal(theta);
811: coords[j][1] = z;
812: r = rad * PetscCosReal(theta);
813: coords[j][0] = r;
814: }
815: coords[numVerts-1][0] = coords[numVerts-1][1] = 0;
816: }
817: } else {
818: numCells = 0;
819: numVerts = 0;
820: }
821: for (j = 0, pcell = flatCells; j < numCells; j++, pcell += 4) {
822: pcell[0] = cells[j][0]; pcell[1] = cells[j][1];
823: pcell[2] = cells[j][2]; pcell[3] = cells[j][3];
824: }
825: DMPlexCreateFromCellListPetsc(comm,2,numCells,numVerts,4,ctx->interpolate,flatCells,2,flatCoords,dm);
826: PetscFree2(flatCoords,flatCells);
827: PetscObjectSetName((PetscObject) *dm, "semi-circle");
828: } else SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Velocity space meshes does not support cubed sphere");
829: }
830: PetscObjectSetOptionsPrefix((PetscObject)*dm,prefix);
832: DMSetFromOptions(*dm); /* Plex refine */
834: { /* p4est? */
835: char convType[256];
836: PetscBool flg;
837: PetscOptionsBegin(ctx->comm, prefix, "Mesh conversion options", "DMPLEX");
838: PetscOptionsFList("-dm_landau_type","Convert DMPlex to another format (should not be Plex!)","plexland.c",DMList,DMPLEX,convType,256,&flg);
839: PetscOptionsEnd();
840: if (flg) {
841: DM dmforest;
842: DMConvert(*dm,convType,&dmforest);
843: if (dmforest) {
844: PetscBool isForest;
845: if (dmforest->prealloc_only != (*dm)->prealloc_only) SETERRQ(PetscObjectComm((PetscObject)dm),PETSC_ERR_SUP,"plex->prealloc_only != dm->prealloc_only");
846: PetscObjectSetOptionsPrefix((PetscObject)dmforest,prefix);
847: DMIsForest(dmforest,&isForest);
848: if (isForest) {
849: if (ctx->sphere && ctx->inflate) {
850: DMForestSetBaseCoordinateMapping(dmforest,GeometryDMLandau,ctx);
851: }
852: if (dmforest->prealloc_only != (*dm)->prealloc_only) SETERRQ(PetscObjectComm((PetscObject)dm),PETSC_ERR_SUP,"plex->prealloc_only != dm->prealloc_only");
853: DMDestroy(dm);
854: *dm = dmforest;
855: ctx->errorIndicator = ErrorIndicator_Simple; /* flag for Forest */
856: } else SETERRQ(ctx->comm, PETSC_ERR_USER, "Converted to non Forest?");
857: } else SETERRQ(ctx->comm, PETSC_ERR_USER, "Convert failed?");
858: }
859: }
860: PetscObjectSetName((PetscObject) *dm, "Mesh");
861: return(0);
862: }
864: static PetscErrorCode SetupDS(DM dm, PetscInt dim, LandauCtx *ctx)
865: {
866: PetscErrorCode ierr;
867: PetscInt ii;
869: for (ii=0;ii<ctx->num_species;ii++) {
870: char buf[256];
871: if (ii==0) PetscSNPrintf(buf, 256, "e");
872: else {PetscSNPrintf(buf, 256, "i%D", ii);}
873: /* Setup Discretization - FEM */
874: PetscFECreateDefault(PetscObjectComm((PetscObject) dm), dim, 1, PETSC_FALSE, NULL, PETSC_DECIDE, &ctx->fe[ii]);
875: PetscObjectSetName((PetscObject) ctx->fe[ii], buf);
876: DMSetField(dm, ii, NULL, (PetscObject) ctx->fe[ii]);
877: }
878: DMCreateDS(dm);
879: if (1) {
880: PetscInt ii;
881: PetscSection section;
882: DMGetSection(dm, §ion);
883: for (ii=0;ii<ctx->num_species;ii++){
884: char buf[256];
885: if (ii==0) PetscSNPrintf(buf, 256, "se");
886: else PetscSNPrintf(buf, 256, "si%D", ii);
887: PetscSectionSetComponentName(section, ii, 0, buf);
888: }
889: }
890: return(0);
891: }
893: /* Define a Maxwellian function for testing out the operator. */
895: /* Using cartesian velocity space coordinates, the particle */
896: /* density, [1/m^3], is defined according to */
898: /* $$ n=\int_{R^3} dv^3 \left(\frac{m}{2\pi T}\right)^{3/2}\exp [- mv^2/(2T)] $$ */
900: /* Using some constant, c, we normalize the velocity vector into a */
901: /* dimensionless variable according to v=c*x. Thus the density, $n$, becomes */
903: /* $$ n=\int_{R^3} dx^3 \left(\frac{mc^2}{2\pi T}\right)^{3/2}\exp [- mc^2/(2T)*x^2] $$ */
905: /* Defining $\theta=2T/mc^2$, we thus find that the probability density */
906: /* for finding the particle within the interval in a box dx^3 around x is */
908: /* f(x;\theta)=\left(\frac{1}{\pi\theta}\right)^{3/2} \exp [ -x^2/\theta ] */
910: typedef struct {
911: LandauCtx *ctx;
912: PetscReal kT_m;
913: PetscReal n;
914: PetscReal shift;
915: } MaxwellianCtx;
917: static PetscErrorCode maxwellian(PetscInt dim, PetscReal time, const PetscReal x[], PetscInt Nf_dummy, PetscScalar *u, void *actx)
918: {
919: MaxwellianCtx *mctx = (MaxwellianCtx*)actx;
920: LandauCtx *ctx = mctx->ctx;
921: PetscInt i;
922: PetscReal v2 = 0, theta = 2*mctx->kT_m/(ctx->v_0*ctx->v_0); /* theta = 2kT/mc^2 */
924: /* compute the exponents, v^2 */
925: for (i = 0; i < dim; ++i) v2 += x[i]*x[i];
926: /* evaluate the Maxwellian */
927: u[0] = mctx->n*PetscPowReal(PETSC_PI*theta,-1.5)*(PetscExpReal(-v2/theta));
928: if (mctx->shift!=0.) {
929: v2 = 0;
930: for (i = 0; i < dim-1; ++i) v2 += x[i]*x[i];
931: v2 += (x[dim-1]-mctx->shift)*(x[dim-1]-mctx->shift);
932: /* evaluate the shifted Maxwellian */
933: u[0] += mctx->n*PetscPowReal(PETSC_PI*theta,-1.5)*(PetscExpReal(-v2/theta));
934: }
935: return(0);
936: }
938: /*@
939: LandauAddMaxwellians - Add a Maxwellian distribution to a state
941: Collective on X
943: Input Parameters:
944: . dm - The mesh
945: + time - Current time
946: - temps - Temperatures of each species
947: . ns - Number density of each species
948: + actx - Landau context
950: Output Parameter:
951: . X - The state
953: Level: beginner
955: .keywords: mesh
956: .seealso: LandauCreateVelocitySpace()
957: @*/
958: PetscErrorCode LandauAddMaxwellians(DM dm, Vec X, PetscReal time, PetscReal temps[], PetscReal ns[], void *actx)
959: {
960: LandauCtx *ctx = (LandauCtx*)actx;
961: PetscErrorCode (*initu[LANDAU_MAX_SPECIES])(PetscInt, PetscReal, const PetscReal [], PetscInt, PetscScalar [], void *);
962: PetscErrorCode ierr,ii;
963: PetscInt dim;
964: MaxwellianCtx *mctxs[LANDAU_MAX_SPECIES], data[LANDAU_MAX_SPECIES];
967: DMGetDimension(dm, &dim);
968: if (!ctx) { DMGetApplicationContext(dm, &ctx); }
969: for (ii=0;ii<ctx->num_species;ii++) {
970: mctxs[ii] = &data[ii];
971: data[ii].ctx = ctx;
972: data[ii].kT_m = ctx->k*temps[ii]/ctx->masses[ii]; /* kT/m */
973: data[ii].n = ns[ii];
974: initu[ii] = maxwellian;
975: data[ii].shift = 0;
976: }
977: data[0].shift = ctx->electronShift;
978: /* need to make ADD_ALL_VALUES work - TODO */
979: DMProjectFunction(dm, time, initu, (void**)mctxs, INSERT_ALL_VALUES, X);
980: return(0);
981: }
983: /*
984: LandauSetInitialCondition - Addes Maxwellians with context
986: Collective on X
988: Input Parameters:
989: . dm - The mesh
990: + actx - Landau context with T and n
992: Output Parameter:
993: . X - The state
995: Level: beginner
997: .keywords: mesh
998: .seealso: LandauCreateVelocitySpace(), LandauAddMaxwellians()
999: */
1000: static PetscErrorCode LandauSetInitialCondition(DM dm, Vec X, void *actx)
1001: {
1002: LandauCtx *ctx = (LandauCtx*)actx;
1005: if (!ctx) { DMGetApplicationContext(dm, &ctx); }
1006: VecZeroEntries(X);
1007: LandauAddMaxwellians(dm, X, 0.0, ctx->thermal_temps, ctx->n, ctx);
1008: return(0);
1009: }
1011: static PetscErrorCode adaptToleranceFEM(PetscFE fem, Vec sol, PetscReal refineTol[], PetscReal coarsenTol[], PetscInt type, LandauCtx *ctx, DM *newDM)
1012: {
1013: DM dm, plex, adaptedDM = NULL;
1014: PetscDS prob;
1015: PetscBool isForest;
1016: PetscQuadrature quad;
1017: PetscInt Nq, *Nb, cStart, cEnd, c, dim, qj, k;
1018: DMLabel adaptLabel = NULL;
1019: PetscErrorCode ierr;
1022: VecGetDM(sol, &dm);
1023: DMCreateDS(dm);
1024: DMGetDS(dm, &prob);
1025: DMGetDimension(dm, &dim);
1026: DMIsForest(dm, &isForest);
1027: DMConvert(dm, DMPLEX, &plex);
1028: DMPlexGetHeightStratum(plex,0,&cStart,&cEnd);
1029: DMLabelCreate(PETSC_COMM_SELF,"adapt",&adaptLabel);
1030: PetscFEGetQuadrature(fem, &quad);
1031: PetscQuadratureGetData(quad, NULL, NULL, &Nq, NULL, NULL);
1032: if (Nq >LANDAU_MAX_NQ) SETERRQ2(ctx->comm,PETSC_ERR_ARG_WRONG,"Order too high. Nq = %D > LANDAU_MAX_NQ (%D)",Nq,LANDAU_MAX_NQ);
1033: PetscDSGetDimensions(prob, &Nb);
1034: if (type==4) {
1035: for (c = cStart; c < cEnd; c++) {
1036: DMLabelSetValue(adaptLabel, c, DM_ADAPT_REFINE);
1037: }
1038: PetscInfo1(sol, "Phase:%s: Uniform refinement\n","adaptToleranceFEM");
1039: } else if (type==2) {
1040: PetscInt rCellIdx[8], eCellIdx[64], iCellIdx[64], eMaxIdx = -1, iMaxIdx = -1, nr = 0, nrmax = (dim==3) ? 8 : 2;
1041: PetscReal minRad = PETSC_INFINITY, r, eMinRad = PETSC_INFINITY, iMinRad = PETSC_INFINITY;
1042: for (c = 0; c < 64; c++) { eCellIdx[c] = iCellIdx[c] = -1; }
1043: for (c = cStart; c < cEnd; c++) {
1044: PetscReal tt, v0[LANDAU_MAX_NQ*3], detJ[LANDAU_MAX_NQ];
1045: DMPlexComputeCellGeometryFEM(plex, c, quad, v0, NULL, NULL, detJ);
1046: for (qj = 0; qj < Nq; ++qj) {
1047: tt = PetscSqr(v0[dim*qj+0]) + PetscSqr(v0[dim*qj+1]) + PetscSqr(((dim==3) ? v0[dim*qj+2] : 0));
1048: r = PetscSqrtReal(tt);
1049: if (r < minRad - PETSC_SQRT_MACHINE_EPSILON*10.) {
1050: minRad = r;
1051: nr = 0;
1052: rCellIdx[nr++]= c;
1053: PetscInfo4(sol, "\t\tPhase: adaptToleranceFEM Found first inner r=%e, cell %D, qp %D/%D\n", r, c, qj+1, Nq);
1054: } else if ((r-minRad) < PETSC_SQRT_MACHINE_EPSILON*100. && nr < nrmax) {
1055: for (k=0;k<nr;k++) if (c == rCellIdx[k]) break;
1056: if (k==nr) {
1057: rCellIdx[nr++]= c;
1058: PetscInfo5(sol, "\t\t\tPhase: adaptToleranceFEM Found another inner r=%e, cell %D, qp %D/%D, d=%e\n", r, c, qj+1, Nq, r-minRad);
1059: }
1060: }
1061: if (ctx->sphere) {
1062: if ((tt=r-ctx->e_radius) > 0) {
1063: PetscInfo2(sol, "\t\t\t %D cell r=%g\n",c,tt);
1064: if (tt < eMinRad - PETSC_SQRT_MACHINE_EPSILON*100.) {
1065: eMinRad = tt;
1066: eMaxIdx = 0;
1067: eCellIdx[eMaxIdx++] = c;
1068: } else if (eMaxIdx > 0 && (tt-eMinRad) <= PETSC_SQRT_MACHINE_EPSILON && c != eCellIdx[eMaxIdx-1]) {
1069: eCellIdx[eMaxIdx++] = c;
1070: }
1071: }
1072: if ((tt=r-ctx->i_radius) > 0) {
1073: if (tt < iMinRad - 1.e-5) {
1074: iMinRad = tt;
1075: iMaxIdx = 0;
1076: iCellIdx[iMaxIdx++] = c;
1077: } else if (iMaxIdx > 0 && (tt-iMinRad) <= PETSC_SQRT_MACHINE_EPSILON && c != iCellIdx[iMaxIdx-1]) {
1078: iCellIdx[iMaxIdx++] = c;
1079: }
1080: }
1081: }
1082: }
1083: }
1084: for (k=0;k<nr;k++) {
1085: DMLabelSetValue(adaptLabel, rCellIdx[k], DM_ADAPT_REFINE);
1086: }
1087: if (ctx->sphere) {
1088: for (c = 0; c < eMaxIdx; c++) {
1089: DMLabelSetValue(adaptLabel, eCellIdx[c], DM_ADAPT_REFINE);
1090: PetscInfo3(sol, "\t\tPhase:%s: refine sphere e cell %D r=%g\n","adaptToleranceFEM",eCellIdx[c],eMinRad);
1091: }
1092: for (c = 0; c < iMaxIdx; c++) {
1093: DMLabelSetValue(adaptLabel, iCellIdx[c], DM_ADAPT_REFINE);
1094: PetscInfo3(sol, "\t\tPhase:%s: refine sphere i cell %D r=%g\n","adaptToleranceFEM",iCellIdx[c],iMinRad);
1095: }
1096: }
1097: PetscInfo4(sol, "Phase:%s: Adaptive refine origin cells %D,%D r=%g\n","adaptToleranceFEM",rCellIdx[0],rCellIdx[1],minRad);
1098: } else if (type==0 || type==1 || type==3) { /* refine along r=0 axis */
1099: PetscScalar *coef = NULL;
1100: Vec coords;
1101: PetscInt csize,Nv,d,nz;
1102: DM cdm;
1103: PetscSection cs;
1104: DMGetCoordinatesLocal(dm, &coords);
1105: DMGetCoordinateDM(dm, &cdm);
1106: DMGetLocalSection(cdm, &cs);
1107: for (c = cStart; c < cEnd; c++) {
1108: PetscInt doit = 0, outside = 0;
1109: DMPlexVecGetClosure(cdm, cs, coords, c, &csize, &coef);
1110: Nv = csize/dim;
1111: for (nz = d = 0; d < Nv; d++) {
1112: PetscReal z = PetscRealPart(coef[d*dim + (dim-1)]), x = PetscSqr(PetscRealPart(coef[d*dim + 0])) + ((dim==3) ? PetscSqr(PetscRealPart(coef[d*dim + 1])) : 0);
1113: x = PetscSqrtReal(x);
1114: if (x < PETSC_MACHINE_EPSILON*10. && PetscAbs(z)<PETSC_MACHINE_EPSILON*10.) doit = 1; /* refine origin */
1115: else if (type==0 && (z < -PETSC_MACHINE_EPSILON*10. || z > ctx->re_radius+PETSC_MACHINE_EPSILON*10.)) outside++; /* first pass don't refine bottom */
1116: else if (type==1 && (z > ctx->vperp0_radius1 || z < -ctx->vperp0_radius1)) outside++; /* don't refine outside electron refine radius */
1117: else if (type==3 && (z > ctx->vperp0_radius2 || z < -ctx->vperp0_radius2)) outside++; /* don't refine outside ion refine radius */
1118: if (x < PETSC_MACHINE_EPSILON*10.) nz++;
1119: }
1120: DMPlexVecRestoreClosure(cdm, cs, coords, c, &csize, &coef);
1121: if (doit || (outside<Nv && nz)) {
1122: DMLabelSetValue(adaptLabel, c, DM_ADAPT_REFINE);
1123: }
1124: }
1125: PetscInfo1(sol, "Phase:%s: RE refinement\n","adaptToleranceFEM");
1126: }
1127: /* VecDestroy(&locX); */
1128: DMDestroy(&plex);
1129: DMAdaptLabel(dm, adaptLabel, &adaptedDM);
1130: DMLabelDestroy(&adaptLabel);
1131: *newDM = adaptedDM;
1132: if (adaptedDM) {
1133: if (isForest) {
1134: DMForestSetAdaptivityForest(adaptedDM,NULL);
1135: }
1136: DMConvert(adaptedDM, DMPLEX, &plex);
1137: DMPlexGetHeightStratum(plex,0,&cStart,&cEnd);
1138: PetscInfo2(sol, "\tPhase: adaptToleranceFEM: %D cells, %d total quadrature points\n",cEnd-cStart,Nq*(cEnd-cStart));
1139: DMDestroy(&plex);
1140: }
1141: return(0);
1142: }
1144: static PetscErrorCode adapt(DM *dm, LandauCtx *ctx, Vec *uu)
1145: {
1146: PetscErrorCode ierr;
1147: PetscInt type, limits[5] = {ctx->numRERefine,ctx->nZRefine1,ctx->maxRefIts,ctx->nZRefine2,ctx->postAMRRefine};
1148: PetscInt adaptIter;
1151: for (type=0;type<5;type++) {
1152: for (adaptIter = 0; adaptIter<limits[type];adaptIter++) {
1153: DM dmNew = NULL;
1154: adaptToleranceFEM(ctx->fe[0], *uu, ctx->refineTol, ctx->coarsenTol, type, ctx, &dmNew);
1155: if (!dmNew) SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"should not happen");
1156: else {
1157: DMDestroy(dm);
1158: VecDestroy(uu);
1159: DMCreateGlobalVector(dmNew,uu);
1160: PetscObjectSetName((PetscObject) *uu, "u");
1161: LandauSetInitialCondition(dmNew, *uu, ctx);
1162: *dm = dmNew;
1163: }
1164: }
1165: }
1166: return(0);
1167: }
1169: static PetscErrorCode ProcessOptions(LandauCtx *ctx, const char prefix[])
1170: {
1171: PetscErrorCode ierr;
1172: PetscBool flg, sph_flg;
1173: PetscInt ii,nt,nm,nc;
1174: DM dummy;
1177: DMCreate(ctx->comm,&dummy);
1178: /* get options - initialize context */
1179: ctx->normJ = 0;
1180: ctx->verbose = 1;
1181: ctx->interpolate = PETSC_TRUE;
1182: ctx->gpu_assembly = PETSC_TRUE;
1183: ctx->sphere = PETSC_FALSE;
1184: ctx->inflate = PETSC_FALSE;
1185: ctx->electronShift = 0;
1186: ctx->errorIndicator = NULL;
1187: ctx->radius = 5.; /* electron thermal radius (velocity) */
1188: ctx->re_radius = 0.;
1189: ctx->vperp0_radius1 = 0;
1190: ctx->vperp0_radius2 = 0;
1191: ctx->e_radius = .1;
1192: ctx->i_radius = .01;
1193: ctx->maxRefIts = 5;
1194: ctx->postAMRRefine = 0;
1195: ctx->nZRefine1 = 0;
1196: ctx->nZRefine2 = 0;
1197: ctx->numRERefine = 0;
1198: ctx->aux_bool = PETSC_FALSE;
1199: ctx->num_sections = 3; /* 2, 3 or 4 */
1200: /* species - [0] electrons, [1] one ion species eg, duetarium, [2] heavy impurity ion, ... */
1201: ctx->charges[0] = -1; /* electron charge (MKS) */
1202: ctx->masses[0] = 1/1835.5; /* temporary value in proton mass */
1203: ctx->n[0] = 1;
1204: ctx->thermal_temps[0] = 1;
1205: /* constants, etc. */
1206: ctx->epsilon0 = 8.8542e-12; /* permittivity of free space (MKS) F/m */
1207: ctx->k = 1.38064852e-23; /* Boltzmann constant (MKS) J/K */
1208: ctx->lnLam = 10; /* cross section ratio large - small angle collisions */
1209: ctx->n_0 = 1.e20; /* typical plasma n, but could set it to 1 */
1210: ctx->Ez = 0;
1211: ctx->v_0 = 1; /* in electron thermal velocity */
1212: ctx->subThreadBlockSize = 1; /* for device and maybe OMP */
1213: ctx->numConcurrency = 1; /* for device */
1214: PetscOptionsBegin(ctx->comm, prefix, "Options for Fokker-Plank-Landau collision operator", "none");
1215: {
1216: char opstring[256];
1217: #if defined(PETSC_HAVE_KOKKOS)
1218: ctx->deviceType = LANDAU_KOKKOS;
1219: PetscStrcpy(opstring,"kokkos");
1220: #if defined(PETSC_HAVE_CUDA)
1221: ctx->subThreadBlockSize = 8;
1222: #endif
1223: #elif defined(PETSC_HAVE_CUDA)
1224: ctx->deviceType = LANDAU_CUDA;
1225: PetscStrcpy(opstring,"cuda");
1226: #else
1227: ctx->deviceType = LANDAU_CPU;
1228: PetscStrcpy(opstring,"cpu");
1229: ctx->subThreadBlockSize = 0;
1230: #endif
1231: PetscOptionsString("-dm_landau_device_type","Use kernels on 'cpu', 'cuda', or 'kokkos'","plexland.c",opstring,opstring,256,NULL);
1232: PetscStrcmp("cpu",opstring,&flg);
1233: if (flg) {
1234: ctx->deviceType = LANDAU_CPU;
1235: ctx->subThreadBlockSize = 0;
1236: } else {
1237: PetscStrcmp("cuda",opstring,&flg);
1238: if (flg) {
1239: ctx->deviceType = LANDAU_CUDA;
1240: ctx->subThreadBlockSize = 0;
1241: } else {
1242: PetscStrcmp("kokkos",opstring,&flg);
1243: if (flg) ctx->deviceType = LANDAU_KOKKOS;
1244: else SETERRQ1(ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_device_type %s",opstring);
1245: }
1246: }
1247: }
1248: PetscOptionsBool("-dm_landau_gpu_assembly", "Assemble Jacobian on GPU", "plexland.c", ctx->gpu_assembly, &ctx->gpu_assembly, NULL);
1249: PetscOptionsReal("-dm_landau_electron_shift","Shift in thermal velocity of electrons","none",ctx->electronShift,&ctx->electronShift, NULL);
1250: PetscOptionsBool("-dm_landau_sphere", "use sphere/semi-circle domain instead of rectangle", "plexland.c", ctx->sphere, &ctx->sphere, &sph_flg);
1251: PetscOptionsBool("-dm_landau_inflate", "With sphere, inflate for curved edges (no AMR)", "plexland.c", ctx->inflate, &ctx->inflate, NULL);
1252: PetscOptionsInt("-dm_landau_amr_re_levels", "Number of levels to refine along v_perp=0, z>0", "plexland.c", ctx->numRERefine, &ctx->numRERefine, NULL);
1253: PetscOptionsInt("-dm_landau_amr_z_refine1", "Number of levels to refine along v_perp=0", "plexland.c", ctx->nZRefine1, &ctx->nZRefine1, NULL);
1254: PetscOptionsInt("-dm_landau_amr_z_refine2", "Number of levels to refine along v_perp=0", "plexland.c", ctx->nZRefine2, &ctx->nZRefine2, NULL);
1255: PetscOptionsInt("-dm_landau_amr_levels_max", "Number of AMR levels of refinement around origin after r=0 refinements", "plexland.c", ctx->maxRefIts, &ctx->maxRefIts, NULL);
1256: PetscOptionsInt("-dm_landau_amr_post_refine", "Number of levels to uniformly refine after AMR", "plexland.c", ctx->postAMRRefine, &ctx->postAMRRefine, NULL);
1257: PetscOptionsInt("-dm_landau_verbose", "", "plexland.c", ctx->verbose, &ctx->verbose, NULL);
1258: PetscOptionsReal("-dm_landau_re_radius","velocity range to refine on positive (z>0) r=0 axis for runaways","plexland.c",ctx->re_radius,&ctx->re_radius, &flg);
1259: PetscOptionsReal("-dm_landau_z_radius1","velocity range to refine r=0 axis (for electrons)","plexland.c",ctx->vperp0_radius1,&ctx->vperp0_radius1, &flg);
1260: PetscOptionsReal("-dm_landau_z_radius2","velocity range to refine r=0 axis (for ions) after origin AMR","plexland.c",ctx->vperp0_radius2,&ctx->vperp0_radius2, &flg);
1261: PetscOptionsReal("-dm_landau_Ez","Initial parallel electric field in unites of Conner-Hastie criticle field","plexland.c",ctx->Ez,&ctx->Ez, NULL);
1262: PetscOptionsReal("-dm_landau_n_0","Normalization constant for number density","plexland.c",ctx->n_0,&ctx->n_0, NULL);
1263: PetscOptionsReal("-dm_landau_ln_lambda","Cross section parameter","plexland.c",ctx->lnLam,&ctx->lnLam, NULL);
1264: PetscOptionsInt("-dm_landau_num_sections", "Number of tangential section in (2D) grid, 2, 3, of 4", "plexland.c", ctx->num_sections, &ctx->num_sections, NULL);
1265: PetscOptionsInt("-dm_landau_num_thread_teams", "The number of other concurrent runs to make room for", "plexland.c", ctx->numConcurrency, &ctx->numConcurrency, NULL);
1267: /* get num species with tempurature*/
1268: {
1269: PetscReal arr[100];
1270: nt = 100;
1271: PetscOptionsRealArray("-dm_landau_thermal_temps", "Temperature of each species [e,i_0,i_1,...] in keV", "plexland.c", arr, &nt, &flg);
1272: if (flg && nt > LANDAU_MAX_SPECIES) SETERRQ2(ctx->comm,PETSC_ERR_ARG_WRONG,"-thermal_temps ,t1,t2,.. number of species %D > MAX %D",nt,LANDAU_MAX_SPECIES);
1273: }
1274: nt = LANDAU_MAX_SPECIES;
1275: for (ii=1;ii<LANDAU_MAX_SPECIES;ii++) {
1276: ctx->thermal_temps[ii] = 1.;
1277: ctx->charges[ii] = 1;
1278: ctx->masses[ii] = 1;
1279: ctx->n[ii] = (ii==1) ? 1 : 0;
1280: }
1281: PetscOptionsRealArray("-dm_landau_thermal_temps", "Temperature of each species [e,i_0,i_1,...] in keV (must be set to set number of species)", "plexland.c", ctx->thermal_temps, &nt, &flg);
1282: if (flg) {
1283: PetscInfo1(dummy, "num_species set to number of thermal temps provided (%D)\n",nt);
1284: ctx->num_species = nt;
1285: } else SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"-dm_landau_thermal_temps ,t1,t2,.. must be provided to set the number of species");
1286: for (ii=0;ii<ctx->num_species;ii++) ctx->thermal_temps[ii] *= 1.1604525e7; /* convert to Kelvin */
1287: nm = LANDAU_MAX_SPECIES-1;
1288: PetscOptionsRealArray("-dm_landau_ion_masses", "Mass of each species in units of proton mass [i_0=2,i_1=40...]", "plexland.c", &ctx->masses[1], &nm, &flg);
1289: if (flg && nm != ctx->num_species-1) {
1290: SETERRQ2(ctx->comm,PETSC_ERR_ARG_WRONG,"num ion masses %D != num species %D",nm,ctx->num_species-1);
1291: }
1292: nm = LANDAU_MAX_SPECIES;
1293: PetscOptionsRealArray("-dm_landau_n", "Normalized (by -n_0) number density of each species", "plexland.c", ctx->n, &nm, &flg);
1294: if (flg && nm != ctx->num_species) SETERRQ2(ctx->comm,PETSC_ERR_ARG_WRONG,"wrong num n: %D != num species %D",nm,ctx->num_species);
1295: ctx->n_0 *= ctx->n[0]; /* normalized number density */
1296: for (ii=1;ii<ctx->num_species;ii++) ctx->n[ii] = ctx->n[ii]/ctx->n[0];
1297: ctx->n[0] = 1;
1298: for (ii=0;ii<LANDAU_MAX_SPECIES;ii++) ctx->masses[ii] *= 1.6720e-27; /* scale by proton mass kg */
1299: ctx->masses[0] = 9.10938356e-31; /* electron mass kg (should be about right already) */
1300: ctx->m_0 = ctx->masses[0]; /* arbitrary reference mass, electrons */
1301: PetscOptionsReal("-dm_landau_v_0","Velocity to normalize with in units of initial electrons thermal velocity (not recommended to change default)","plexland.c",ctx->v_0,&ctx->v_0, NULL);
1302: ctx->v_0 *= PetscSqrtReal(ctx->k*ctx->thermal_temps[0]/(ctx->masses[0])); /* electron mean velocity in 1D (need 3D form in computing T from FE integral) */
1303: nc = LANDAU_MAX_SPECIES-1;
1304: PetscOptionsRealArray("-dm_landau_ion_charges", "Charge of each species in units of proton charge [i_0=2,i_1=18,...]", "plexland.c", &ctx->charges[1], &nc, &flg);
1305: if (flg && nc != ctx->num_species-1) SETERRQ2(ctx->comm,PETSC_ERR_ARG_WRONG,"num charges %D != num species %D",nc,ctx->num_species-1);
1306: for (ii=0;ii<LANDAU_MAX_SPECIES;ii++) ctx->charges[ii] *= 1.6022e-19; /* electron/proton charge (MKS) */
1307: ctx->t_0 = 8*PETSC_PI*PetscSqr(ctx->epsilon0*ctx->m_0/PetscSqr(ctx->charges[0]))/ctx->lnLam/ctx->n_0*PetscPowReal(ctx->v_0,3); /* note, this t_0 makes nu[0,0]=1 */
1308: /* geometry */
1309: for (ii=0;ii<ctx->num_species;ii++) ctx->refineTol[ii] = PETSC_MAX_REAL;
1310: for (ii=0;ii<ctx->num_species;ii++) ctx->coarsenTol[ii] = 0.;
1311: ii = LANDAU_MAX_SPECIES;
1312: PetscOptionsRealArray("-dm_landau_refine_tol","tolerance for refining cells in AMR","plexland.c",ctx->refineTol, &ii, &flg);
1313: if (flg && ii != ctx->num_species) PetscInfo2(dummy, "Phase: Warning, #refine_tol %D != num_species %D\n",ii,ctx->num_species);
1314: ii = LANDAU_MAX_SPECIES;
1315: PetscOptionsRealArray("-dm_landau_coarsen_tol","tolerance for coarsening cells in AMR","plexland.c",ctx->coarsenTol, &ii, &flg);
1316: if (flg && ii != ctx->num_species) PetscInfo2(dummy, "Phase: Warning, #coarsen_tol %D != num_species %D\n",ii,ctx->num_species);
1317: PetscOptionsReal("-dm_landau_domain_radius","Phase space size in units of electron thermal velocity","plexland.c",ctx->radius,&ctx->radius, &flg);
1318: if (flg && ctx->radius <= 0) { /* negative is ratio of c */
1319: if (ctx->radius == 0) ctx->radius = 0.75;
1320: else ctx->radius = -ctx->radius;
1321: ctx->radius = ctx->radius*299792458.0/ctx->v_0;
1322: PetscInfo1(dummy, "Change domain radius to %e\n",ctx->radius);
1323: }
1324: PetscOptionsReal("-dm_landau_i_radius","Ion thermal velocity, used for circular meshes","plexland.c",ctx->i_radius,&ctx->i_radius, &flg);
1325: if (flg && !sph_flg) ctx->sphere = PETSC_TRUE; /* you gave me an ion radius but did not set sphere, user error really */
1326: if (!flg) {
1327: ctx->i_radius = 1.5*PetscSqrtReal(8*ctx->k*ctx->thermal_temps[1]/ctx->masses[1]/PETSC_PI)/ctx->v_0; /* normalized radius with thermal velocity of first ion */
1328: }
1329: PetscOptionsReal("-dm_landau_e_radius","Electron thermal velocity, used for circular meshes","plexland.c",ctx->e_radius,&ctx->e_radius, &flg);
1330: if (flg && !sph_flg) ctx->sphere = PETSC_TRUE; /* you gave me an e radius but did not set sphere, user error really */
1331: if (!flg) {
1332: ctx->e_radius = 1.5*PetscSqrtReal(8*ctx->k*ctx->thermal_temps[0]/ctx->masses[0]/PETSC_PI)/ctx->v_0; /* normalized radius with thermal velocity of electrons */
1333: }
1334: if (ctx->sphere && (ctx->e_radius <= ctx->i_radius || ctx->radius <= ctx->e_radius)) SETERRQ3(ctx->comm,PETSC_ERR_ARG_WRONG,"bad radii: %g < %g < %g",ctx->i_radius,ctx->e_radius,ctx->radius);
1335: PetscOptionsInt("-dm_landau_sub_thread_block_size", "Number of threads in Kokkos integration point subblock", "plexland.c", ctx->subThreadBlockSize, &ctx->subThreadBlockSize, NULL);
1336: PetscOptionsEnd();
1337: for (ii=ctx->num_species;ii<LANDAU_MAX_SPECIES;ii++) ctx->masses[ii] = ctx->thermal_temps[ii] = ctx->charges[ii] = 0;
1338: if (ctx->verbose > 0) {
1339: PetscPrintf(ctx->comm, "masses: e=%10.3e; ions in proton mass units: %10.3e %10.3e ...\n",ctx->masses[0],ctx->masses[1]/1.6720e-27,ctx->num_species>2 ? ctx->masses[2]/1.6720e-27 : 0);
1340: PetscPrintf(ctx->comm, "charges: e=%10.3e; charges in elementary units: %10.3e %10.3e\n", ctx->charges[0],-ctx->charges[1]/ctx->charges[0],ctx->num_species>2 ? -ctx->charges[2]/ctx->charges[0] : 0);
1341: PetscPrintf(ctx->comm, "thermal T (K): e=%10.3e i=%10.3e imp=%10.3e. v_0=%10.3e n_0=%10.3e t_0=%10.3e domain=%10.3e\n",ctx->thermal_temps[0],ctx->thermal_temps[1],ctx->num_species>2 ? ctx->thermal_temps[2] : 0,ctx->v_0,ctx->n_0,ctx->t_0,ctx->radius);
1342: }
1343: DMDestroy(&dummy);
1344: {
1345: PetscMPIInt rank;
1346: MPI_Comm_rank(ctx->comm, &rank);
1347: /* PetscLogStage setup_stage; */
1348: PetscLogEventRegister("Landau Jacobian", DM_CLASSID, &ctx->events[0]); /* 0 */
1349: PetscLogEventRegister(" Initialize", DM_CLASSID, &ctx->events[10]); /* 10 */
1350: PetscLogEventRegister(" IP Data-jac", DM_CLASSID, &ctx->events[7]); /* 7 */
1351: PetscLogEventRegister(" Kernal-init", DM_CLASSID, &ctx->events[3]); /* 3 */
1352: PetscLogEventRegister(" GPU Kernel", DM_CLASSID, &ctx->events[4]); /* 4 */
1353: PetscLogEventRegister(" Copy to CPU", DM_CLASSID, &ctx->events[5]); /* 5 */
1354: PetscLogEventRegister(" Jac-assemble", DM_CLASSID, &ctx->events[6]); /* 6 */
1355: PetscLogEventRegister(" Jac-f-df", DM_CLASSID, &ctx->events[8]); /* 8 */
1356: PetscLogEventRegister(" Jac asmbl setup", DM_CLASSID, &ctx->events[2]); /* 2 */
1357: PetscLogEventRegister("Mass Operator", DM_CLASSID, &ctx->events[9]); /* 9 */
1358: PetscLogEventRegister(" IP Data-mass", DM_CLASSID, &ctx->events[1]); /* 1 */
1360: if (rank) { /* turn off output stuff for duplicate runs - do we need to add the prefix to all this? */
1361: PetscOptionsClearValue(NULL,"-snes_converged_reason");
1362: PetscOptionsClearValue(NULL,"-ksp_converged_reason");
1363: PetscOptionsClearValue(NULL,"-snes_monitor");
1364: PetscOptionsClearValue(NULL,"-ksp_monitor");
1365: PetscOptionsClearValue(NULL,"-ts_monitor");
1366: PetscOptionsClearValue(NULL,"-ts_adapt_monitor");
1367: PetscOptionsClearValue(NULL,"-dm_landau_amr_dm_view");
1368: PetscOptionsClearValue(NULL,"-dm_landau_amr_vec_view");
1369: PetscOptionsClearValue(NULL,"-dm_landau_pre_dm_view");
1370: PetscOptionsClearValue(NULL,"-dm_landau_pre_vec_view");
1371: PetscOptionsClearValue(NULL,"-info");
1372: }
1373: }
1374: return(0);
1375: }
1377: /*@C
1378: LandauCreateVelocitySpace - Create a DMPlex velocity space mesh
1380: Collective on comm
1382: Input Parameters:
1383: + comm - The MPI communicator
1384: . dim - velocity space dimension (2 for axisymmetric, 3 for full 3X + 3V solver)
1385: - prefix - prefix for options
1387: Output Parameter:
1388: . dm - The DM object representing the mesh
1389: + X - A vector (user destroys)
1390: - J - Optional matrix (object destroys)
1392: Level: beginner
1394: .keywords: mesh
1395: .seealso: DMPlexCreate(), LandauDestroyVelocitySpace()
1396: @*/
1397: PetscErrorCode LandauCreateVelocitySpace(MPI_Comm comm, PetscInt dim, const char prefix[], Vec *X, Mat *J, DM *dm)
1398: {
1400: LandauCtx *ctx;
1401: PetscBool prealloc_only,flg;
1404: if (dim!=2 && dim!=3) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "Only 2D and 3D supported");
1405: ctx = (LandauCtx*)malloc(sizeof(LandauCtx));
1406: ctx->comm = comm; /* used for diagnostics and global errors */
1407: /* process options */
1408: ProcessOptions(ctx,prefix);
1409: /* Create Mesh */
1410: LandauDMCreateVMesh(PETSC_COMM_SELF, dim, prefix, ctx, dm);
1411: prealloc_only = (*dm)->prealloc_only;
1412: DMViewFromOptions(*dm,NULL,"-dm_landau_pre_dm_view");
1413: DMSetApplicationContext(*dm, ctx);
1414: /* create FEM */
1415: SetupDS(*dm,dim,ctx);
1416: /* set initial state */
1417: DMCreateGlobalVector(*dm,X);
1418: PetscObjectSetName((PetscObject) *X, "u");
1419: /* initial static refinement, no solve */
1420: LandauSetInitialCondition(*dm, *X, ctx);
1421: VecViewFromOptions(*X, NULL, "-dm_landau_pre_vec_view");
1422: /* forest refinement */
1423: if (ctx->errorIndicator) {
1424: /* AMR */
1425: adapt(dm,ctx,X);
1426: if ((*dm)->prealloc_only != prealloc_only) SETERRQ(PetscObjectComm((PetscObject)dm),PETSC_ERR_SUP,"(*dm)->prealloc_only != prealloc_only");
1427: DMViewFromOptions(*dm,NULL,"-dm_landau_amr_dm_view");
1428: VecViewFromOptions(*X, NULL, "-dm_landau_amr_vec_view");
1429: }
1430: DMSetApplicationContext(*dm, ctx);
1431: ctx->dmv = *dm;
1432: if (ctx->dmv->prealloc_only != prealloc_only) SETERRQ(PetscObjectComm((PetscObject)dm),PETSC_ERR_SUP,"ctx->dmv->prealloc_only != prealloc_only");
1433: DMCreateMatrix(ctx->dmv, &ctx->J);
1434: MatSetOption(ctx->J, MAT_IGNORE_ZERO_ENTRIES, PETSC_TRUE);
1435: MatSetOption(ctx->J, MAT_STRUCTURALLY_SYMMETRIC, PETSC_TRUE);
1436: if (J) *J = ctx->J;
1437: /* check for types that we need */
1438: #if defined(PETSC_HAVE_KOKKOS)
1439: if (ctx->deviceType == LANDAU_CPU) {
1440: PetscObjectTypeCompareAny((PetscObject)ctx->J,&flg,MATSEQAIJKOKKOS,MATMPIAIJKOKKOS,MATAIJKOKKOS,"");
1441: //if (flg) SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"with device=cpu must not use '-dm_mat_type aijkokkos -dm_vec_type kokkos' for GPU assembly and Kokkos");
1442: }
1443: #elif defined(PETSC_HAVE_CUDA)
1444: if (ctx->deviceType == LANDAU_CPU) {
1445: PetscObjectTypeCompareAny((PetscObject)ctx->J,&flg,MATSEQAIJCUSPARSE,MATMPIAIJCUSPARSE,MATAIJCUSPARSE,"");
1446: //if (flg) SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"with device=cpu must not use '-dm_mat_type aijcusparse -dm_vec_type cuda' for GPU assembly and Cuda");
1447: }
1448: #endif
1449: if (ctx->gpu_assembly) { /* we need GPU object with GPU assembly */
1450: if (ctx->deviceType == LANDAU_CUDA) {
1451: PetscObjectTypeCompareAny((PetscObject)ctx->J,&flg,MATSEQAIJCUSPARSE,MATMPIAIJCUSPARSE,MATAIJCUSPARSE,"");
1452: if (!flg) SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"must use '-dm_mat_type aijcusparse -dm_vec_type cuda' for GPU assembly and Cuda");
1453: } else if (ctx->deviceType == LANDAU_KOKKOS) {
1454: PetscObjectTypeCompareAny((PetscObject)ctx->J,&flg,MATSEQAIJKOKKOS,MATMPIAIJKOKKOS,MATAIJKOKKOS,"");
1455: #if defined(PETSC_HAVE_KOKKOS_KERNELS)
1456: if (!flg) SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"must use '-dm_mat_type aijkokkos -dm_vec_type kokkos' for GPU assembly and Kokkos");
1457: #else
1458: if (!flg) SETERRQ(ctx->comm,PETSC_ERR_ARG_WRONG,"must configure with '--download-kokkos-kernels=1' for GPU assembly and Kokkos");
1459: #endif
1460: }
1461: }
1462: return(0);
1463: }
1465: /*@
1466: LandauDestroyVelocitySpace - Destroy a DMPlex velocity space mesh
1468: Collective on dm
1470: Input/Output Parameters:
1471: . dm - the dm to destroy
1473: Level: beginner
1475: .keywords: mesh
1476: .seealso: LandauCreateVelocitySpace()
1477: @*/
1478: PetscErrorCode LandauDestroyVelocitySpace(DM *dm)
1479: {
1480: PetscErrorCode ierr,ii;
1481: LandauCtx *ctx;
1482: PetscContainer container = NULL;
1484: DMGetApplicationContext(*dm, &ctx);
1485: PetscObjectQuery((PetscObject)ctx->J,"coloring", (PetscObject*)&container);
1486: if (container) {
1487: PetscContainerDestroy(&container);
1488: }
1489: MatDestroy(&ctx->M);
1490: MatDestroy(&ctx->J);
1491: for (ii=0;ii<ctx->num_species;ii++) {
1492: PetscFEDestroy(&ctx->fe[ii]);
1493: }
1494: free(ctx);
1495: DMDestroy(dm);
1496: return(0);
1497: }
1499: /* < v, ru > */
1500: static void f0_s_den(PetscInt dim, PetscInt Nf, PetscInt NfAux,
1501: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
1502: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
1503: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
1504: {
1505: PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
1506: f0[0] = u[ii];
1507: }
1509: /* < v, ru > */
1510: static void f0_s_mom(PetscInt dim, PetscInt Nf, PetscInt NfAux,
1511: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
1512: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
1513: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
1514: {
1515: PetscInt ii = (PetscInt)PetscRealPart(constants[0]), jj = (PetscInt)PetscRealPart(constants[1]);
1516: f0[0] = x[jj]*u[ii]; /* x momentum */
1517: }
1519: static void f0_s_v2(PetscInt dim, PetscInt Nf, PetscInt NfAux,
1520: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
1521: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
1522: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
1523: {
1524: PetscInt i, ii = (PetscInt)PetscRealPart(constants[0]);
1525: double tmp1 = 0.;
1526: for (i = 0; i < dim; ++i) tmp1 += x[i]*x[i];
1527: f0[0] = tmp1*u[ii];
1528: }
1530: /* < v, ru > */
1531: static void f0_s_rden(PetscInt dim, PetscInt Nf, PetscInt NfAux,
1532: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
1533: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
1534: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
1535: {
1536: PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
1537: f0[0] = 2.*PETSC_PI*x[0]*u[ii];
1538: }
1540: /* < v, ru > */
1541: static void f0_s_rmom(PetscInt dim, PetscInt Nf, PetscInt NfAux,
1542: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
1543: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
1544: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
1545: {
1546: PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
1547: f0[0] = 2.*PETSC_PI*x[0]*x[1]*u[ii];
1548: }
1550: static void f0_s_rv2(PetscInt dim, PetscInt Nf, PetscInt NfAux,
1551: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
1552: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
1553: PetscReal t, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar *f0)
1554: {
1555: PetscInt ii = (PetscInt)PetscRealPart(constants[0]);
1556: f0[0] = 2.*PETSC_PI*x[0]*(x[0]*x[0] + x[1]*x[1])*u[ii];
1557: }
1559: /*@
1560: LandauPrintNorms - collects moments and prints them
1562: Collective on dm
1564: Input Parameters:
1565: + X - the state
1566: - stepi - current step to print
1568: Level: beginner
1570: .keywords: mesh
1571: .seealso: LandauCreateVelocitySpace()
1572: @*/
1573: PetscErrorCode LandauPrintNorms(Vec X, PetscInt stepi)
1574: {
1576: LandauCtx *ctx;
1577: PetscDS prob;
1578: DM plex,dm;
1579: PetscInt cStart, cEnd, dim, ii;
1580: PetscScalar xmomentumtot=0, ymomentumtot=0, zmomentumtot=0, energytot=0, densitytot=0, tt[LANDAU_MAX_SPECIES];
1581: PetscScalar xmomentum[LANDAU_MAX_SPECIES], ymomentum[LANDAU_MAX_SPECIES], zmomentum[LANDAU_MAX_SPECIES], energy[LANDAU_MAX_SPECIES], density[LANDAU_MAX_SPECIES];
1584: VecGetDM(X, &dm);
1585: if (!dm) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "no DM");
1586: DMGetDimension(dm, &dim);
1587: DMGetApplicationContext(dm, &ctx);
1588: if (!ctx) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
1589: DMConvert(ctx->dmv, DMPLEX, &plex);
1590: DMCreateDS(plex);
1591: DMGetDS(plex, &prob);
1592: /* print momentum and energy */
1593: for (ii=0;ii<ctx->num_species;ii++) {
1594: PetscScalar user[2] = { (PetscScalar)ii, (PetscScalar)ctx->charges[ii]};
1595: PetscDSSetConstants(prob, 2, user);
1596: if (dim==2) { /* 2/3X + 3V (cylindrical coordinates) */
1597: PetscDSSetObjective(prob, 0, &f0_s_rden);
1598: DMPlexComputeIntegralFEM(plex,X,tt,ctx);
1599: density[ii] = tt[0]*ctx->n_0*ctx->charges[ii];
1600: PetscDSSetObjective(prob, 0, &f0_s_rmom);
1601: DMPlexComputeIntegralFEM(plex,X,tt,ctx);
1602: zmomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii];
1603: PetscDSSetObjective(prob, 0, &f0_s_rv2);
1604: DMPlexComputeIntegralFEM(plex,X,tt,ctx);
1605: energy[ii] = tt[0]*0.5*ctx->n_0*ctx->v_0*ctx->v_0*ctx->masses[ii];
1606: zmomentumtot += zmomentum[ii];
1607: energytot += energy[ii];
1608: densitytot += density[ii];
1609: PetscPrintf(ctx->comm, "%3D) species-%D: charge density= %20.13e z-momentum= %20.13e energy= %20.13e",stepi,ii,PetscRealPart(density[ii]),PetscRealPart(zmomentum[ii]),PetscRealPart(energy[ii]));
1610: } else { /* 2/3X + 3V */
1611: PetscDSSetObjective(prob, 0, &f0_s_den);
1612: DMPlexComputeIntegralFEM(plex,X,tt,ctx);
1613: density[ii] = tt[0]*ctx->n_0*ctx->charges[ii];
1614: PetscDSSetObjective(prob, 0, &f0_s_mom);
1615: user[1] = 0;
1616: DMPlexComputeIntegralFEM(plex,X,tt,ctx);
1617: xmomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii];
1618: user[1] = 1;
1619: DMPlexComputeIntegralFEM(plex,X,tt,ctx);
1620: ymomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii];
1621: user[1] = 2;
1622: DMPlexComputeIntegralFEM(plex,X,tt,ctx);
1623: zmomentum[ii] = tt[0]*ctx->n_0*ctx->v_0*ctx->masses[ii];
1624: PetscDSSetObjective(prob, 0, &f0_s_v2);
1625: DMPlexComputeIntegralFEM(plex,X,tt,ctx);
1626: energy[ii] = 0.5*tt[0]*ctx->n_0*ctx->v_0*ctx->v_0*ctx->masses[ii];
1627: PetscPrintf(ctx->comm, "%3D) species %D: density=%20.13e, x-momentum=%20.13e, y-momentum=%20.13e, z-momentum=%20.13e, energy=%21.13e",
1628: stepi,ii,PetscRealPart(density[ii]),PetscRealPart(xmomentum[ii]),PetscRealPart(ymomentum[ii]),PetscRealPart(zmomentum[ii]),PetscRealPart(energy[ii]));
1629: xmomentumtot += xmomentum[ii];
1630: ymomentumtot += ymomentum[ii];
1631: zmomentumtot += zmomentum[ii];
1632: energytot += energy[ii];
1633: densitytot += density[ii];
1634: }
1635: if (ctx->num_species>1) PetscPrintf(ctx->comm, "\n");
1636: }
1637: /* totals */
1638: DMPlexGetHeightStratum(plex,0,&cStart,&cEnd);
1639: DMDestroy(&plex);
1640: if (ctx->num_species>1) {
1641: if (dim==2) {
1642: PetscPrintf(ctx->comm, "\t%3D) Total: charge density=%21.13e, momentum=%21.13e, energy=%21.13e (m_i[0]/m_e = %g, %D cells)",
1643: stepi,(double)PetscRealPart(densitytot),(double)PetscRealPart(zmomentumtot),(double)PetscRealPart(energytot),(double)(ctx->masses[1]/ctx->masses[0]),cEnd-cStart);
1644: } else {
1645: PetscPrintf(ctx->comm, "\t%3D) Total: charge density=%21.13e, x-momentum=%21.13e, y-momentum=%21.13e, z-momentum=%21.13e, energy=%21.13e (m_i[0]/m_e = %g, %D cells)",
1646: stepi,(double)PetscRealPart(densitytot),(double)PetscRealPart(xmomentumtot),(double)PetscRealPart(ymomentumtot),(double)PetscRealPart(zmomentumtot),(double)PetscRealPart(energytot),(double)(ctx->masses[1]/ctx->masses[0]),cEnd-cStart);
1647: }
1648: } else {
1649: PetscPrintf(ctx->comm, " -- %D cells",cEnd-cStart);
1650: }
1651: if (ctx->verbose > 1) {PetscPrintf(ctx->comm,", %D sub (vector) threads\n",ctx->subThreadBlockSize);}
1652: else {PetscPrintf(ctx->comm,"\n");}
1653: return(0);
1654: }
1656: static PetscErrorCode destroy_coloring (void *is)
1657: {
1658: ISColoring tmp = (ISColoring)is;
1659: return ISColoringDestroy(&tmp);
1660: }
1662: /*@
1663: LandauCreateColoring - create a coloring and add to matrix (Landau context used just for 'print' flag, should be in DMPlex)
1665: Collective on JacP
1667: Input Parameters:
1668: + JacP - matrix to add coloring to
1669: - plex - The DM
1671: Output Parameter:
1672: . container - Container with coloring
1674: Level: beginner
1676: .keywords: mesh
1677: .seealso: LandauCreateVelocitySpace()
1678: @*/
1679: PetscErrorCode LandauCreateColoring(Mat JacP, DM plex, PetscContainer *container)
1680: {
1681: PetscErrorCode ierr;
1682: PetscInt dim,cell,i,ej,nc,Nv,totDim,numGCells,cStart,cEnd;
1683: ISColoring iscoloring = NULL;
1684: Mat G,Q;
1685: PetscScalar ones[128];
1686: MatColoring mc;
1687: IS *is;
1688: PetscInt csize,colour,j,k;
1689: const PetscInt *indices;
1690: PetscInt numComp[1];
1691: PetscInt numDof[4];
1692: PetscFE fe;
1693: DM colordm;
1694: PetscSection csection, section, globalSection;
1695: PetscDS prob;
1696: LandauCtx *ctx;
1699: DMGetApplicationContext(plex, &ctx);
1700: DMGetLocalSection(plex, §ion);
1701: DMGetGlobalSection(plex, &globalSection);
1702: DMGetDimension(plex, &dim);
1703: DMGetDS(plex, &prob);
1704: PetscDSGetTotalDimension(prob, &totDim);
1705: DMPlexGetHeightStratum(plex,0,&cStart,&cEnd);
1706: numGCells = cEnd - cStart;
1707: /* create cell centered DM */
1708: DMClone(plex, &colordm);
1709: PetscFECreateDefault(PetscObjectComm((PetscObject) plex), dim, 1, PETSC_FALSE, "color_", PETSC_DECIDE, &fe);
1710: PetscObjectSetName((PetscObject) fe, "color");
1711: DMSetField(colordm, 0, NULL, (PetscObject)fe);
1712: PetscFEDestroy(&fe);
1713: for (i = 0; i < (dim+1); ++i) numDof[i] = 0;
1714: numDof[dim] = 1;
1715: numComp[0] = 1;
1716: DMPlexCreateSection(colordm, NULL, numComp, numDof, 0, NULL, NULL, NULL, NULL, &csection);
1717: PetscSectionSetFieldName(csection, 0, "color");
1718: DMSetLocalSection(colordm, csection);
1719: DMViewFromOptions(colordm,NULL,"-color_dm_view");
1720: /* get vertex to element map Q and colroing graph G */
1721: MatGetSize(JacP,NULL,&Nv);
1722: MatCreateAIJ(PETSC_COMM_SELF,PETSC_DECIDE,PETSC_DECIDE,numGCells,Nv,totDim,NULL,0,NULL,&Q);
1723: for (i=0;i<128;i++) ones[i] = 1.0;
1724: for (cell = cStart, ej = 0 ; cell < cEnd; ++cell, ++ej) {
1725: PetscInt numindices,*indices;
1726: DMPlexGetClosureIndices(plex, section, globalSection, cell, PETSC_TRUE, &numindices, &indices, NULL, NULL);
1727: if (numindices>128) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_PLIB, "too many indices. %D > %D",numindices,128);
1728: MatSetValues(Q,1,&ej,numindices,indices,ones,ADD_VALUES);
1729: DMPlexRestoreClosureIndices(plex, section, globalSection, cell, PETSC_TRUE, &numindices, &indices, NULL, NULL);
1730: }
1731: MatAssemblyBegin(Q, MAT_FINAL_ASSEMBLY);
1732: MatAssemblyEnd(Q, MAT_FINAL_ASSEMBLY);
1733: MatMatTransposeMult(Q,Q,MAT_INITIAL_MATRIX,4.0,&G);
1734: PetscObjectSetName((PetscObject) Q, "Q");
1735: PetscObjectSetName((PetscObject) G, "coloring graph");
1736: MatViewFromOptions(G,NULL,"-coloring_mat_view");
1737: MatViewFromOptions(Q,NULL,"-coloring_mat_view");
1738: MatDestroy(&Q);
1739: /* coloring */
1740: MatColoringCreate(G,&mc);
1741: MatColoringSetDistance(mc,1);
1742: MatColoringSetType(mc,MATCOLORINGJP);
1743: MatColoringSetFromOptions(mc);
1744: MatColoringApply(mc,&iscoloring);
1745: MatColoringDestroy(&mc);
1746: /* view */
1747: ISColoringViewFromOptions(iscoloring,NULL,"-coloring_is_view");
1748: ISColoringGetIS(iscoloring,PETSC_USE_POINTER,&nc,&is);
1749: if (ctx && ctx->verbose > 2) {
1750: PetscViewer viewer;
1751: Vec color_vec, eidx_vec;
1752: DMGetGlobalVector(colordm, &color_vec);
1753: DMGetGlobalVector(colordm, &eidx_vec);
1754: for (colour=0; colour<nc; colour++) {
1755: ISGetLocalSize(is[colour],&csize);
1756: ISGetIndices(is[colour],&indices);
1757: for (j=0; j<csize; j++) {
1758: PetscScalar v = (PetscScalar)colour;
1759: k = indices[j];
1760: VecSetValues(color_vec,1,&k,&v,INSERT_VALUES);
1761: v = (PetscScalar)k;
1762: VecSetValues(eidx_vec,1,&k,&v,INSERT_VALUES);
1763: }
1764: ISRestoreIndices(is[colour],&indices);
1765: }
1766: /* view */
1767: PetscViewerVTKOpen(ctx->comm, "color.vtu", FILE_MODE_WRITE, &viewer);
1768: PetscObjectSetName((PetscObject) color_vec, "color");
1769: VecView(color_vec, viewer);
1770: PetscViewerDestroy(&viewer);
1771: PetscViewerVTKOpen(ctx->comm, "eidx.vtu", FILE_MODE_WRITE, &viewer);
1772: PetscObjectSetName((PetscObject) eidx_vec, "element-idx");
1773: VecView(eidx_vec, viewer);
1774: PetscViewerDestroy(&viewer);
1775: DMRestoreGlobalVector(colordm, &color_vec);
1776: DMRestoreGlobalVector(colordm, &eidx_vec);
1777: }
1778: PetscSectionDestroy(&csection);
1779: DMDestroy(&colordm);
1780: ISColoringRestoreIS(iscoloring,PETSC_USE_POINTER,&is);
1781: MatDestroy(&G);
1782: /* stash coloring */
1783: PetscContainerCreate(PETSC_COMM_SELF, container);
1784: PetscContainerSetPointer(*container,(void*)iscoloring);
1785: PetscContainerSetUserDestroy(*container, destroy_coloring);
1786: PetscObjectCompose((PetscObject)JacP,"coloring",(PetscObject)*container);
1787: if (ctx && ctx->verbose > 0) {
1788: PetscPrintf(ctx->comm, "Made coloring with %D colors\n", nc);
1789: }
1790: return(0);
1791: }
1793: PetscErrorCode LandauAssembleOpenMP(PetscInt cStart, PetscInt cEnd, PetscInt totDim, DM plex, PetscSection section, PetscSection globalSection, Mat JacP, PetscScalar elemMats[], PetscContainer container)
1794: {
1795: PetscErrorCode ierr;
1796: IS *is;
1797: PetscInt nc,colour,j;
1798: const PetscInt *clr_idxs;
1799: ISColoring iscoloring;
1801: PetscContainerGetPointer(container,(void**)&iscoloring);
1802: ISColoringGetIS(iscoloring,PETSC_USE_POINTER,&nc,&is);
1803: for (colour=0; colour<nc; colour++) {
1804: PetscInt *idx_arr[1024]; /* need to make dynamic for general use */
1805: PetscScalar *new_el_mats[1024];
1806: PetscInt idx_size[1024],csize;
1807: ISGetLocalSize(is[colour],&csize);
1808: if (csize>1024) SETERRQ2(PETSC_COMM_SELF, PETSC_ERR_PLIB, "too many elements in color. %D > %D",csize,1024);
1809: ISGetIndices(is[colour],&clr_idxs);
1810: /* get indices and mats */
1811: for (j=0; j<csize; j++) {
1812: PetscInt cell = cStart + clr_idxs[j];
1813: PetscInt numindices,*indices;
1814: PetscScalar *elMat = &elemMats[clr_idxs[j]*totDim*totDim];
1815: PetscScalar *valuesOrig = elMat;
1816: DMPlexGetClosureIndices(plex, section, globalSection, cell, PETSC_TRUE, &numindices, &indices, NULL, (PetscScalar **) &elMat);
1817: idx_size[j] = numindices;
1818: PetscMalloc2(numindices,&idx_arr[j],numindices*numindices,&new_el_mats[j]);
1819: PetscMemcpy(idx_arr[j],indices,numindices*sizeof(PetscInt));
1820: PetscMemcpy(new_el_mats[j],elMat,numindices*numindices*sizeof(PetscScalar));
1821: DMPlexRestoreClosureIndices(plex, section, globalSection, cell, PETSC_TRUE, &numindices, &indices, NULL, (PetscScalar **) &elMat);
1822: if (elMat != valuesOrig) {DMRestoreWorkArray(plex, numindices*numindices, MPIU_SCALAR, &elMat);}
1823: }
1824: /* assemble matrix - pragmas break CI ? */
1825: //#pragma omp parallel default(JacP,idx_size,idx_arr,new_el_mats,colour,clr_idxs) private(j)
1826: //#pragma omp parallel for private(j)
1827: for (j=0; j<csize; j++) {
1828: PetscInt numindices = idx_size[j], *indices = idx_arr[j];
1829: PetscScalar *elMat = new_el_mats[j];
1830: MatSetValues(JacP,numindices,indices,numindices,indices,elMat,ADD_VALUES);
1831: }
1832: /* free */
1833: ISRestoreIndices(is[colour],&clr_idxs);
1834: for (j=0; j<csize; j++) {
1835: PetscFree2(idx_arr[j],new_el_mats[j]);
1836: }
1837: }
1838: ISColoringRestoreIS(iscoloring,PETSC_USE_POINTER,&is);
1839: return(0);
1840: }
1842: /* < v, u > */
1843: static void g0_1(PetscInt dim, PetscInt Nf, PetscInt NfAux,
1844: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
1845: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
1846: PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
1847: {
1848: g0[0] = 1.;
1849: }
1851: /* < v, u > */
1852: static void g0_r(PetscInt dim, PetscInt Nf, PetscInt NfAux,
1853: const PetscInt uOff[], const PetscInt uOff_x[], const PetscScalar u[], const PetscScalar u_t[], const PetscScalar u_x[],
1854: const PetscInt aOff[], const PetscInt aOff_x[], const PetscScalar a[], const PetscScalar a_t[], const PetscScalar a_x[],
1855: PetscReal t, PetscReal u_tShift, const PetscReal x[], PetscInt numConstants, const PetscScalar constants[], PetscScalar g0[])
1856: {
1857: g0[0] = 2.*PETSC_PI*x[0];
1858: }
1860: /*@
1861: LandauCreateMassMatrix - Create mass matrix for Landau
1863: Collective on dm
1865: Input Parameters:
1866: . dm - the DM object
1868: Output Parameters:
1869: . Amat - The mass matrix (optional), mass matrix is added to the DM context
1871: Level: beginner
1873: .keywords: mesh
1874: .seealso: LandauCreateVelocitySpace()
1875: @*/
1876: PetscErrorCode LandauCreateMassMatrix(DM dm, Mat *Amat)
1877: {
1878: DM massDM;
1879: PetscDS prob;
1880: PetscInt ii,dim,N1=1,N2;
1882: LandauCtx *ctx;
1883: Mat M;
1888: DMGetApplicationContext(dm, &ctx);
1889: if (!ctx) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
1890: DMGetDimension(dm, &dim);
1891: DMClone(dm, &massDM);
1892: DMCopyFields(dm, massDM);
1893: DMCreateDS(massDM);
1894: DMGetDS(massDM, &prob);
1895: for (ii=0;ii<ctx->num_species;ii++) {
1896: if (dim==3) {PetscDSSetJacobian(prob, ii, ii, g0_1, NULL, NULL, NULL);}
1897: else {PetscDSSetJacobian(prob, ii, ii, g0_r, NULL, NULL, NULL);}
1898: }
1899: DMViewFromOptions(massDM,NULL,"-dm_landau_mass_dm_view");
1900: DMCreateMatrix(massDM, &M);
1901: MatSetOption(M, MAT_IGNORE_ZERO_ENTRIES, PETSC_TRUE);
1902: {
1903: Vec locX;
1904: DM plex;
1905: DMConvert(massDM, DMPLEX, &plex);
1906: DMGetLocalVector(massDM, &locX);
1907: /* Mass matrix is independent of the input, so no need to fill locX */
1908: if (plex->prealloc_only != dm->prealloc_only) SETERRQ2(PetscObjectComm((PetscObject) dm), PETSC_ERR_PLIB, "plex->prealloc_only = massDM->prealloc_only %D, =%D",plex->prealloc_only,massDM->prealloc_only);
1909: DMPlexSNESComputeJacobianFEM(plex, locX, M, M, ctx);
1910: DMRestoreLocalVector(massDM, &locX);
1911: DMDestroy(&plex);
1912: }
1913: DMDestroy(&massDM);
1914: MatGetSize(ctx->J, &N1, NULL);
1915: MatGetSize(M, &N2, NULL);
1916: if (N1 != N2) SETERRQ2(PetscObjectComm((PetscObject) dm), PETSC_ERR_PLIB, "Incorrect matrix sizes: |Jacobian| = %D, |Mass|=%D",N1,N2);
1917: PetscObjectSetName((PetscObject)M, "mass");
1918: MatViewFromOptions(M,NULL,"-dm_landau_mass_mat_view");
1919: ctx->M = M; /* this could be a noop, a = a */
1920: if (Amat) *Amat = M;
1921: return(0);
1922: }
1924: /*@
1925: LandauIFunction - TS residual calculation
1927: Collective on ts
1929: Input Parameters:
1930: + TS - The time stepping context
1931: . time_dummy - current time (not used)
1932: - X - Current state
1933: + X_t - Time derivative of current state
1934: . actx - Landau context
1936: Output Parameter:
1937: . F - The residual
1939: Level: beginner
1941: .keywords: mesh
1942: .seealso: LandauCreateVelocitySpace(), LandauIJacobian()
1943: @*/
1944: PetscErrorCode LandauIFunction(TS ts, PetscReal time_dummy, Vec X, Vec X_t, Vec F, void *actx)
1945: {
1947: LandauCtx *ctx=(LandauCtx*)actx;
1948: PetscInt dim;
1949: DM dm;
1952: TSGetDM(ts,&dm);
1953: DMGetApplicationContext(dm, &ctx);
1954: if (!ctx) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
1955: PetscLogEventBegin(ctx->events[0],0,0,0,0);
1956: DMGetDimension(ctx->dmv, &dim);
1957: PetscInfo3(ts, "Create Landau Jacobian t=%g X'=%p %s\n",time_dummy,X_t,ctx->aux_bool ? " -- seems to be in line search" : "");
1958: LandauFormJacobian_Internal(X,ctx->J,dim,0.0,(void*)ctx);
1959: ctx->aux_bool = PETSC_TRUE;
1960: MatViewFromOptions(ctx->J,NULL,"-landau_jacobian_mat_view");
1961: /* mat vec for op */
1962: MatMult(ctx->J,X,F); /* C*f */
1963: /* add time term */
1964: if (X_t) {
1965: MatMultAdd(ctx->M,X_t,F,F);
1966: }
1967: PetscLogEventEnd(ctx->events[0],0,0,0,0);
1968: return(0);
1969: }
1970: static PetscErrorCode MatrixNfDestroy(void *ptr)
1971: {
1972: PetscInt *nf = (PetscInt *)ptr;
1973: PetscErrorCode ierr;
1975: PetscFree(nf);
1976: return(0);
1977: }
1978: /*@
1979: LandauIJacobian - TS Jacobian construction
1981: Collective on ts
1983: Input Parameters:
1984: + TS - The time stepping context
1985: . time_dummy - current time (not used)
1986: - X - Current state
1987: + U_tdummy - Time derivative of current state (not used)
1988: . shift - shift for du/dt term
1989: - actx - Landau context
1991: Output Parameter:
1992: . Amat - Jacobian
1993: + Pmat - same as Amat
1995: Level: beginner
1997: .keywords: mesh
1998: .seealso: LandauCreateVelocitySpace(), LandauIFunction()
1999: @*/
2000: PetscErrorCode LandauIJacobian(TS ts, PetscReal time_dummy, Vec X, Vec U_tdummy, PetscReal shift, Mat Amat, Mat Pmat, void *actx)
2001: {
2003: LandauCtx *ctx=(LandauCtx*)actx;
2004: PetscInt dim;
2005: DM dm;
2006: PetscContainer container;
2008: TSGetDM(ts,&dm);
2009: DMGetApplicationContext(dm, &ctx);
2010: if (!ctx) SETERRQ(PETSC_COMM_SELF, PETSC_ERR_PLIB, "no context");
2011: if (Amat!=Pmat || Amat!=ctx->J) SETERRQ(ctx->comm, PETSC_ERR_PLIB, "Amat!=Pmat || Amat!=ctx->J");
2012: DMGetDimension(ctx->dmv, &dim);
2013: /* get collision Jacobian into A */
2014: PetscLogEventBegin(ctx->events[9],0,0,0,0);
2015: PetscInfo2(ts, "Adding mass to Jacobian t=%g, shift=%g\n",(double)time_dummy,(double)shift);
2016: if (shift==0.0) SETERRQ(ctx->comm, PETSC_ERR_PLIB, "zero shift");
2017: if (!ctx->aux_bool) SETERRQ(ctx->comm, PETSC_ERR_PLIB, "wrong state");
2018: LandauFormJacobian_Internal(X,ctx->J,dim,shift,(void*)ctx);
2019: ctx->aux_bool = PETSC_FALSE;
2020: MatViewFromOptions(Pmat,NULL,"-landau_mat_view");
2021: PetscLogEventEnd(ctx->events[9],0,0,0,0);
2022: /* set number species in Jacobian */
2023: PetscObjectQuery((PetscObject) ctx->J, "Nf", (PetscObject *) &container);
2024: if (!container) {
2025: PetscInt *pNf;
2026: PetscContainerCreate(PETSC_COMM_SELF, &container);
2027: PetscMalloc(sizeof(PetscInt), &pNf);
2028: *pNf = ctx->num_species + 1000*ctx->numConcurrency;
2029: PetscContainerSetPointer(container, (void *)pNf);
2030: PetscContainerSetUserDestroy(container, MatrixNfDestroy);
2031: PetscObjectCompose((PetscObject)ctx->J, "Nf", (PetscObject) container);
2032: PetscContainerDestroy(&container);
2033: }
2035: return(0);
2036: }