Actual source code: fnexp.c
slepc-3.17.0 2022-03-31
1: /*
2: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
3: SLEPc - Scalable Library for Eigenvalue Problem Computations
4: Copyright (c) 2002-, Universitat Politecnica de Valencia, Spain
6: This file is part of SLEPc.
7: SLEPc is distributed under a 2-clause BSD license (see LICENSE).
8: - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
9: */
10: /*
11: Exponential function exp(x)
12: */
14: #include <slepc/private/fnimpl.h>
15: #include <slepcblaslapack.h>
17: PetscErrorCode FNEvaluateFunction_Exp(FN fn,PetscScalar x,PetscScalar *y)
18: {
19: *y = PetscExpScalar(x);
20: PetscFunctionReturn(0);
21: }
23: PetscErrorCode FNEvaluateDerivative_Exp(FN fn,PetscScalar x,PetscScalar *y)
24: {
25: *y = PetscExpScalar(x);
26: PetscFunctionReturn(0);
27: }
29: #define MAX_PADE 6
30: #define SWAP(a,b,t) {t=a;a=b;b=t;}
32: PetscErrorCode FNEvaluateFunctionMat_Exp_Pade(FN fn,Mat A,Mat B)
33: {
34: PetscBLASInt n=0,ld,ld2,*ipiv,info,inc=1;
35: PetscInt m,j,k,sexp;
36: PetscBool odd;
37: const PetscInt p=MAX_PADE;
38: PetscReal c[MAX_PADE+1],s,*rwork;
39: PetscScalar scale,mone=-1.0,one=1.0,two=2.0,zero=0.0;
40: PetscScalar *Ba,*As,*A2,*Q,*P,*W,*aux;
41: const PetscScalar *Aa;
43: MatDenseGetArrayRead(A,&Aa);
44: MatDenseGetArray(B,&Ba);
45: MatGetSize(A,&m,NULL);
46: PetscBLASIntCast(m,&n);
47: ld = n;
48: ld2 = ld*ld;
49: P = Ba;
50: PetscMalloc6(m*m,&Q,m*m,&W,m*m,&As,m*m,&A2,ld,&rwork,ld,&ipiv);
51: PetscArraycpy(As,Aa,ld2);
53: /* Pade' coefficients */
54: c[0] = 1.0;
55: for (k=1;k<=p;k++) c[k] = c[k-1]*(p+1-k)/(k*(2*p+1-k));
57: /* Scaling */
58: s = LAPACKlange_("I",&n,&n,As,&ld,rwork);
59: PetscLogFlops(1.0*n*n);
60: if (s>0.5) {
61: sexp = PetscMax(0,(int)(PetscLogReal(s)/PetscLogReal(2.0))+2);
62: scale = PetscPowRealInt(2.0,-sexp);
63: PetscStackCallBLAS("BLASscal",BLASscal_(&ld2,&scale,As,&inc));
64: PetscLogFlops(1.0*n*n);
65: } else sexp = 0;
67: /* Horner evaluation */
68: PetscStackCallBLAS("BLASgemm",BLASgemm_("N","N",&n,&n,&n,&one,As,&ld,As,&ld,&zero,A2,&ld));
69: PetscLogFlops(2.0*n*n*n);
70: PetscArrayzero(Q,ld2);
71: PetscArrayzero(P,ld2);
72: for (j=0;j<n;j++) {
73: Q[j+j*ld] = c[p];
74: P[j+j*ld] = c[p-1];
75: }
77: odd = PETSC_TRUE;
78: for (k=p-1;k>0;k--) {
79: if (odd) {
80: PetscStackCallBLAS("BLASgemm",BLASgemm_("N","N",&n,&n,&n,&one,Q,&ld,A2,&ld,&zero,W,&ld));
81: SWAP(Q,W,aux);
82: for (j=0;j<n;j++) Q[j+j*ld] += c[k-1];
83: odd = PETSC_FALSE;
84: } else {
85: PetscStackCallBLAS("BLASgemm",BLASgemm_("N","N",&n,&n,&n,&one,P,&ld,A2,&ld,&zero,W,&ld));
86: SWAP(P,W,aux);
87: for (j=0;j<n;j++) P[j+j*ld] += c[k-1];
88: odd = PETSC_TRUE;
89: }
90: PetscLogFlops(2.0*n*n*n);
91: }
92: /*if (odd) {
93: PetscStackCallBLAS("BLASgemm",BLASgemm_("N","N",&n,&n,&n,&one,Q,&ld,As,&ld,&zero,W,&ld));
94: SWAP(Q,W,aux);
95: PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&ld2,&mone,P,&inc,Q,&inc));
96: PetscStackCallBLAS("LAPACKgesv",LAPACKgesv_(&n,&n,Q,&ld,ipiv,P,&ld,&info));
97: SlepcCheckLapackInfo("gesv",info);
98: PetscStackCallBLAS("BLASscal",BLASscal_(&ld2,&two,P,&inc));
99: for (j=0;j<n;j++) P[j+j*ld] += 1.0;
100: PetscStackCallBLAS("BLASscal",BLASscal_(&ld2,&mone,P,&inc));
101: } else {*/
102: PetscStackCallBLAS("BLASgemm",BLASgemm_("N","N",&n,&n,&n,&one,P,&ld,As,&ld,&zero,W,&ld));
103: SWAP(P,W,aux);
104: PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&ld2,&mone,P,&inc,Q,&inc));
105: PetscStackCallBLAS("LAPACKgesv",LAPACKgesv_(&n,&n,Q,&ld,ipiv,P,&ld,&info));
106: SlepcCheckLapackInfo("gesv",info);
107: PetscStackCallBLAS("BLASscal",BLASscal_(&ld2,&two,P,&inc));
108: for (j=0;j<n;j++) P[j+j*ld] += 1.0;
109: /*}*/
110: PetscLogFlops(2.0*n*n*n+2.0*n*n*n/3.0+4.0*n*n);
112: for (k=1;k<=sexp;k++) {
113: PetscStackCallBLAS("BLASgemm",BLASgemm_("N","N",&n,&n,&n,&one,P,&ld,P,&ld,&zero,W,&ld));
114: PetscArraycpy(P,W,ld2);
115: }
116: if (P!=Ba) PetscArraycpy(Ba,P,ld2);
117: PetscLogFlops(2.0*n*n*n*sexp);
119: PetscFree6(Q,W,As,A2,rwork,ipiv);
120: MatDenseRestoreArrayRead(A,&Aa);
121: MatDenseRestoreArray(B,&Ba);
122: PetscFunctionReturn(0);
123: }
125: /*
126: * Set scaling factor (s) and Pade degree (k,m)
127: */
128: static PetscErrorCode sexpm_params(PetscReal nrm,PetscInt *s,PetscInt *k,PetscInt *m)
129: {
130: if (nrm>1) {
131: if (nrm<200) {*s = 4; *k = 5; *m = *k-1;}
132: else if (nrm<1e4) {*s = 4; *k = 4; *m = *k+1;}
133: else if (nrm<1e6) {*s = 4; *k = 3; *m = *k+1;}
134: else if (nrm<1e9) {*s = 3; *k = 3; *m = *k+1;}
135: else if (nrm<1e11) {*s = 2; *k = 3; *m = *k+1;}
136: else if (nrm<1e12) {*s = 2; *k = 2; *m = *k+1;}
137: else if (nrm<1e14) {*s = 2; *k = 1; *m = *k+1;}
138: else {*s = 1; *k = 1; *m = *k+1;}
139: } else { /* nrm<1 */
140: if (nrm>0.5) {*s = 4; *k = 4; *m = *k-1;}
141: else if (nrm>0.3) {*s = 3; *k = 4; *m = *k-1;}
142: else if (nrm>0.15) {*s = 2; *k = 4; *m = *k-1;}
143: else if (nrm>0.07) {*s = 1; *k = 4; *m = *k-1;}
144: else if (nrm>0.01) {*s = 0; *k = 4; *m = *k-1;}
145: else if (nrm>3e-4) {*s = 0; *k = 3; *m = *k-1;}
146: else if (nrm>1e-5) {*s = 0; *k = 3; *m = 0;}
147: else if (nrm>1e-8) {*s = 0; *k = 2; *m = 0;}
148: else {*s = 0; *k = 1; *m = 0;}
149: }
150: PetscFunctionReturn(0);
151: }
153: #if defined(PETSC_HAVE_COMPLEX)
154: /*
155: * Partial fraction form coefficients.
156: * If query, the function returns the size necessary to store the coefficients.
157: */
158: static PetscErrorCode getcoeffs(PetscInt k,PetscInt m,PetscComplex *r,PetscComplex *q,PetscComplex *remain,PetscBool query)
159: {
160: PetscInt i;
161: const PetscComplex /* m == k+1 */
162: p1r4[5] = {-1.582680186458572e+01 - 2.412564578224361e+01*PETSC_i,
163: -1.582680186458572e+01 + 2.412564578224361e+01*PETSC_i,
164: 1.499984465975511e+02 + 6.804227952202417e+01*PETSC_i,
165: 1.499984465975511e+02 - 6.804227952202417e+01*PETSC_i,
166: -2.733432894659307e+02 },
167: p1q4[5] = { 3.655694325463550e+00 + 6.543736899360086e+00*PETSC_i,
168: 3.655694325463550e+00 - 6.543736899360086e+00*PETSC_i,
169: 5.700953298671832e+00 + 3.210265600308496e+00*PETSC_i,
170: 5.700953298671832e+00 - 3.210265600308496e+00*PETSC_i,
171: 6.286704751729261e+00 },
172: p1r3[4] = {-1.130153999597152e+01 + 1.247167585025031e+01*PETSC_i,
173: -1.130153999597152e+01 - 1.247167585025031e+01*PETSC_i,
174: 1.330153999597152e+01 - 6.007173273704750e+01*PETSC_i,
175: 1.330153999597152e+01 + 6.007173273704750e+01*PETSC_i},
176: p1q3[4] = { 3.212806896871536e+00 + 4.773087433276636e+00*PETSC_i,
177: 3.212806896871536e+00 - 4.773087433276636e+00*PETSC_i,
178: 4.787193103128464e+00 + 1.567476416895212e+00*PETSC_i,
179: 4.787193103128464e+00 - 1.567476416895212e+00*PETSC_i},
180: p1r2[3] = { 7.648749087422928e+00 + 4.171640244747463e+00*PETSC_i,
181: 7.648749087422928e+00 - 4.171640244747463e+00*PETSC_i,
182: -1.829749817484586e+01 },
183: p1q2[3] = { 2.681082873627756e+00 + 3.050430199247411e+00*PETSC_i,
184: 2.681082873627756e+00 - 3.050430199247411e+00*PETSC_i,
185: 3.637834252744491e+00 },
186: p1r1[2] = { 1.000000000000000e+00 - 3.535533905932738e+00*PETSC_i,
187: 1.000000000000000e+00 + 3.535533905932738e+00*PETSC_i},
188: p1q1[2] = { 2.000000000000000e+00 + 1.414213562373095e+00*PETSC_i,
189: 2.000000000000000e+00 - 1.414213562373095e+00*PETSC_i};
190: const PetscComplex /* m == k-1 */
191: m1r5[4] = {-1.423367961376821e+02 - 1.385465094833037e+01*PETSC_i,
192: -1.423367961376821e+02 + 1.385465094833037e+01*PETSC_i,
193: 2.647367961376822e+02 - 4.814394493714596e+02*PETSC_i,
194: 2.647367961376822e+02 + 4.814394493714596e+02*PETSC_i},
195: m1q5[4] = { 5.203941240131764e+00 + 5.805856841805367e+00*PETSC_i,
196: 5.203941240131764e+00 - 5.805856841805367e+00*PETSC_i,
197: 6.796058759868242e+00 + 1.886649260140217e+00*PETSC_i,
198: 6.796058759868242e+00 - 1.886649260140217e+00*PETSC_i},
199: m1r4[3] = { 2.484269593165883e+01 + 7.460342395992306e+01*PETSC_i,
200: 2.484269593165883e+01 - 7.460342395992306e+01*PETSC_i,
201: -1.734353918633177e+02 },
202: m1q4[3] = { 4.675757014491557e+00 + 3.913489560603711e+00*PETSC_i,
203: 4.675757014491557e+00 - 3.913489560603711e+00*PETSC_i,
204: 5.648485971016893e+00 },
205: m1r3[2] = { 2.533333333333333e+01 - 2.733333333333333e+01*PETSC_i,
206: 2.533333333333333e+01 + 2.733333333333333e+01*PETSC_i},
207: m1q3[2] = { 4.000000000000000e+00 + 2.000000000000000e+00*PETSC_i,
208: 4.000000000000000e+00 - 2.000000000000000e+00*PETSC_i};
209: const PetscScalar /* m == k-1 */
210: m1remain5[2] = { 2.000000000000000e-01, 9.800000000000000e+00},
211: m1remain4[2] = {-2.500000000000000e-01, -7.750000000000000e+00},
212: m1remain3[2] = { 3.333333333333333e-01, 5.666666666666667e+00},
213: m1remain2[2] = {-0.5, -3.5},
214: remain3[4] = {1.0/6.0, 1.0/2.0, 1, 1},
215: remain2[3] = {1.0/2.0, 1, 1};
217: if (query) { /* query about buffer's size */
218: if (m==k+1) {
219: *remain = 0;
220: *r = *q = k+1;
221: PetscFunctionReturn(0); /* quick return */
222: }
223: if (m==k-1) {
224: *remain = 2;
225: if (k==5) *r = *q = 4;
226: else if (k==4) *r = *q = 3;
227: else if (k==3) *r = *q = 2;
228: else if (k==2) *r = *q = 1;
229: }
230: if (m==0) {
231: *r = *q = 0;
232: *remain = k+1;
233: }
234: } else {
235: if (m==k+1) {
236: if (k==4) {
237: for (i=0;i<5;i++) { r[i] = p1r4[i]; q[i] = p1q4[i]; }
238: } else if (k==3) {
239: for (i=0;i<4;i++) { r[i] = p1r3[i]; q[i] = p1q3[i]; }
240: } else if (k==2) {
241: for (i=0;i<3;i++) { r[i] = p1r2[i]; q[i] = p1q2[i]; }
242: } else if (k==1) {
243: for (i=0;i<2;i++) { r[i] = p1r1[i]; q[i] = p1q1[i]; }
244: }
245: PetscFunctionReturn(0); /* quick return */
246: }
247: if (m==k-1) {
248: if (k==5) {
249: for (i=0;i<4;i++) { r[i] = m1r5[i]; q[i] = m1q5[i]; }
250: for (i=0;i<2;i++) remain[i] = m1remain5[i];
251: } else if (k==4) {
252: for (i=0;i<3;i++) { r[i] = m1r4[i]; q[i] = m1q4[i]; }
253: for (i=0;i<2;i++) remain[i] = m1remain4[i];
254: } else if (k==3) {
255: for (i=0;i<2;i++) { r[i] = m1r3[i]; q[i] = m1q3[i]; remain[i] = m1remain3[i]; }
256: } else if (k==2) {
257: r[0] = -13.5; q[0] = 3;
258: for (i=0;i<2;i++) remain[i] = m1remain2[i];
259: }
260: }
261: if (m==0) {
262: r = q = 0;
263: if (k==3) {
264: for (i=0;i<4;i++) remain[i] = remain3[i];
265: } else if (k==2) {
266: for (i=0;i<3;i++) remain[i] = remain2[i];
267: }
268: }
269: }
270: PetscFunctionReturn(0);
271: }
273: /*
274: * Product form coefficients.
275: * If query, the function returns the size necessary to store the coefficients.
276: */
277: static PetscErrorCode getcoeffsproduct(PetscInt k,PetscInt m,PetscComplex *p,PetscComplex *q,PetscComplex *mult,PetscBool query)
278: {
279: PetscInt i;
280: const PetscComplex /* m == k+1 */
281: p1p4[4] = {-5.203941240131764e+00 + 5.805856841805367e+00*PETSC_i,
282: -5.203941240131764e+00 - 5.805856841805367e+00*PETSC_i,
283: -6.796058759868242e+00 + 1.886649260140217e+00*PETSC_i,
284: -6.796058759868242e+00 - 1.886649260140217e+00*PETSC_i},
285: p1q4[5] = { 3.655694325463550e+00 + 6.543736899360086e+00*PETSC_i,
286: 3.655694325463550e+00 - 6.543736899360086e+00*PETSC_i,
287: 6.286704751729261e+00 ,
288: 5.700953298671832e+00 + 3.210265600308496e+00*PETSC_i,
289: 5.700953298671832e+00 - 3.210265600308496e+00*PETSC_i},
290: p1p3[3] = {-4.675757014491557e+00 + 3.913489560603711e+00*PETSC_i,
291: -4.675757014491557e+00 - 3.913489560603711e+00*PETSC_i,
292: -5.648485971016893e+00 },
293: p1q3[4] = { 3.212806896871536e+00 + 4.773087433276636e+00*PETSC_i,
294: 3.212806896871536e+00 - 4.773087433276636e+00*PETSC_i,
295: 4.787193103128464e+00 + 1.567476416895212e+00*PETSC_i,
296: 4.787193103128464e+00 - 1.567476416895212e+00*PETSC_i},
297: p1p2[2] = {-4.00000000000000e+00 + 2.000000000000000e+00*PETSC_i,
298: -4.00000000000000e+00 - 2.000000000000000e+00*PETSC_i},
299: p1q2[3] = { 2.681082873627756e+00 + 3.050430199247411e+00*PETSC_i,
300: 2.681082873627756e+00 - 3.050430199247411e+00*PETSC_i,
301: 3.637834252744491e+00 },
302: p1q1[2] = { 2.000000000000000e+00 + 1.414213562373095e+00*PETSC_i,
303: 2.000000000000000e+00 - 1.414213562373095e+00*PETSC_i};
304: const PetscComplex /* m == k-1 */
305: m1p5[5] = {-3.655694325463550e+00 + 6.543736899360086e+00*PETSC_i,
306: -3.655694325463550e+00 - 6.543736899360086e+00*PETSC_i,
307: -6.286704751729261e+00 ,
308: -5.700953298671832e+00 + 3.210265600308496e+00*PETSC_i,
309: -5.700953298671832e+00 - 3.210265600308496e+00*PETSC_i},
310: m1q5[4] = { 5.203941240131764e+00 + 5.805856841805367e+00*PETSC_i,
311: 5.203941240131764e+00 - 5.805856841805367e+00*PETSC_i,
312: 6.796058759868242e+00 + 1.886649260140217e+00*PETSC_i,
313: 6.796058759868242e+00 - 1.886649260140217e+00*PETSC_i},
314: m1p4[4] = {-3.212806896871536e+00 + 4.773087433276636e+00*PETSC_i,
315: -3.212806896871536e+00 - 4.773087433276636e+00*PETSC_i,
316: -4.787193103128464e+00 + 1.567476416895212e+00*PETSC_i,
317: -4.787193103128464e+00 - 1.567476416895212e+00*PETSC_i},
318: m1q4[3] = { 4.675757014491557e+00 + 3.913489560603711e+00*PETSC_i,
319: 4.675757014491557e+00 - 3.913489560603711e+00*PETSC_i,
320: 5.648485971016893e+00 },
321: m1p3[3] = {-2.681082873627756e+00 + 3.050430199247411e+00*PETSC_i,
322: -2.681082873627756e+00 - 3.050430199247411e+00*PETSC_i,
323: -3.637834252744491e+00 },
324: m1q3[2] = { 4.000000000000000e+00 + 2.000000000000000e+00*PETSC_i,
325: 4.000000000000000e+00 - 2.000000000000001e+00*PETSC_i},
326: m1p2[2] = {-2.000000000000000e+00 + 1.414213562373095e+00*PETSC_i,
327: -2.000000000000000e+00 - 1.414213562373095e+00*PETSC_i};
329: if (query) {
330: if (m == k+1) {
331: *mult = 1;
332: *p = k;
333: *q = k+1;
334: PetscFunctionReturn(0);
335: }
336: if (m==k-1) {
337: *mult = 1;
338: *p = k;
339: *q = k-1;
340: }
341: } else {
342: if (m == k+1) {
343: *mult = PetscPowInt(-1,m);
344: *mult *= m;
345: if (k==4) {
346: for (i=0;i<4;i++) { p[i] = p1p4[i]; q[i] = p1q4[i]; }
347: q[4] = p1q4[4];
348: } else if (k==3) {
349: for (i=0;i<3;i++) { p[i] = p1p3[i]; q[i] = p1q3[i]; }
350: q[3] = p1q3[3];
351: } else if (k==2) {
352: for (i=0;i<2;i++) { p[i] = p1p2[i]; q[i] = p1q2[i]; }
353: q[2] = p1q2[2];
354: } else if (k==1) {
355: p[0] = -3;
356: for (i=0;i<2;i++) q[i] = p1q1[i];
357: }
358: PetscFunctionReturn(0);
359: }
360: if (m==k-1) {
361: *mult = PetscPowInt(-1,m);
362: *mult /= k;
363: if (k==5) {
364: for (i=0;i<4;i++) { p[i] = m1p5[i]; q[i] = m1q5[i]; }
365: p[4] = m1p5[4];
366: } else if (k==4) {
367: for (i=0;i<3;i++) { p[i] = m1p4[i]; q[i] = m1q4[i]; }
368: p[3] = m1p4[3];
369: } else if (k==3) {
370: for (i=0;i<2;i++) { p[i] = m1p3[i]; q[i] = m1q3[i]; }
371: p[2] = m1p3[2];
372: } else if (k==2) {
373: for (i=0;i<2;i++) p[i] = m1p2[i];
374: q[0] = 3;
375: }
376: }
377: }
378: PetscFunctionReturn(0);
379: }
380: #endif /* PETSC_HAVE_COMPLEX */
382: #if defined(PETSC_USE_COMPLEX)
383: static PetscErrorCode getisreal(PetscInt n,PetscComplex *a,PetscBool *result)
384: {
385: PetscInt i;
387: *result=PETSC_TRUE;
388: for (i=0;i<n&&*result;i++) {
389: if (PetscImaginaryPartComplex(a[i])) *result=PETSC_FALSE;
390: }
391: PetscFunctionReturn(0);
392: }
393: #endif
395: /*
396: * Matrix exponential implementation based on algorithm and matlab code by Stefan Guettel
397: * and Yuji Nakatsukasa
398: *
399: * Stefan Guettel and Yuji Nakatsukasa, "Scaled and Squared Subdiagonal Pade
400: * Approximation for the Matrix Exponential",
401: * SIAM J. Matrix Anal. Appl. 37(1):145-170, 2016.
402: * https://doi.org/10.1137/15M1027553
403: */
404: PetscErrorCode FNEvaluateFunctionMat_Exp_GuettelNakatsukasa(FN fn,Mat A,Mat B)
405: {
406: #if !defined(PETSC_HAVE_COMPLEX)
407: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP,"This function requires C99 or C++ complex support");
408: #else
409: PetscInt i,j,n_,s,k,m,mod;
410: PetscBLASInt n=0,n2=0,irsize=0,rsizediv2,ipsize=0,iremainsize=0,info,*piv,minlen,lwork=0,one=1;
411: PetscReal nrm,shift=0.0;
412: #if defined(PETSC_USE_COMPLEX)
413: PetscReal *rwork=NULL;
414: #endif
415: PetscComplex *As,*RR,*RR2,*expmA,*expmA2,*Maux,*Maux2,rsize,*r,psize,*p,remainsize,*remainterm,*rootp,*rootq,mult=0.0,scale,cone=1.0,czero=0.0,*aux;
416: PetscScalar *Ba,*Ba2,*sMaux,*wr,*wi,expshift,sone=1.0,szero=0.0,*saux;
417: const PetscScalar *Aa;
418: PetscBool isreal,flg;
419: PetscBLASInt query=-1;
420: PetscScalar work1,*work;
422: MatGetSize(A,&n_,NULL);
423: PetscBLASIntCast(n_,&n);
424: MatDenseGetArrayRead(A,&Aa);
425: MatDenseGetArray(B,&Ba);
426: Ba2 = Ba;
427: PetscBLASIntCast(n*n,&n2);
429: PetscMalloc2(n2,&sMaux,n2,&Maux);
430: Maux2 = Maux;
431: PetscOptionsGetReal(NULL,NULL,"-fn_expm_estimated_eig",&shift,&flg);
432: if (!flg) {
433: PetscMalloc2(n,&wr,n,&wi);
434: PetscArraycpy(sMaux,Aa,n2);
435: /* estimate rightmost eigenvalue and shift A with it */
436: #if !defined(PETSC_USE_COMPLEX)
437: PetscStackCallBLAS("LAPACKgeev",LAPACKgeev_("N","N",&n,sMaux,&n,wr,wi,NULL,&n,NULL,&n,&work1,&query,&info));
438: SlepcCheckLapackInfo("geev",info);
439: PetscBLASIntCast((PetscInt)work1,&lwork);
440: PetscMalloc1(lwork,&work);
441: PetscStackCallBLAS("LAPACKgeev",LAPACKgeev_("N","N",&n,sMaux,&n,wr,wi,NULL,&n,NULL,&n,work,&lwork,&info));
442: PetscFree(work);
443: #else
444: PetscArraycpy(Maux,Aa,n2);
445: PetscStackCallBLAS("LAPACKgeev",LAPACKgeev_("N","N",&n,Maux,&n,wr,NULL,&n,NULL,&n,&work1,&query,rwork,&info));
446: SlepcCheckLapackInfo("geev",info);
447: PetscBLASIntCast((PetscInt)PetscRealPart(work1),&lwork);
448: PetscMalloc2(2*n,&rwork,lwork,&work);
449: PetscStackCallBLAS("LAPACKgeev",LAPACKgeev_("N","N",&n,Maux,&n,wr,NULL,&n,NULL,&n,work,&lwork,rwork,&info));
450: PetscFree2(rwork,work);
451: #endif
452: SlepcCheckLapackInfo("geev",info);
453: PetscLogFlops(25.0*n*n*n+(n*n*n)/3.0+1.0*n*n*n);
455: shift = PetscRealPart(wr[0]);
456: for (i=1;i<n;i++) {
457: if (PetscRealPart(wr[i]) > shift) shift = PetscRealPart(wr[i]);
458: }
459: PetscFree2(wr,wi);
460: }
461: /* shift so that largest real part is (about) 0 */
462: PetscArraycpy(sMaux,Aa,n2);
463: if (shift) {
464: for (i=0;i<n;i++) sMaux[i+i*n] -= shift;
465: PetscLogFlops(1.0*n);
466: }
467: #if defined(PETSC_USE_COMPLEX)
468: PetscArraycpy(Maux,Aa,n2);
469: if (shift) {
470: for (i=0;i<n;i++) Maux[i+i*n] -= shift;
471: PetscLogFlops(1.0*n);
472: }
473: #endif
475: /* estimate norm(A) and select the scaling factor */
476: nrm = LAPACKlange_("O",&n,&n,sMaux,&n,NULL);
477: PetscLogFlops(1.0*n*n);
478: sexpm_params(nrm,&s,&k,&m);
479: if (s==0 && k==1 && m==0) { /* exp(A) = I+A to eps! */
480: if (shift) expshift = PetscExpReal(shift);
481: for (i=0;i<n;i++) sMaux[i+i*n] += 1.0;
482: if (shift) {
483: PetscStackCallBLAS("BLASscal",BLASscal_(&n2,&expshift,sMaux,&one));
484: PetscLogFlops(1.0*(n+n2));
485: } else PetscLogFlops(1.0*n);
486: PetscArraycpy(Ba,sMaux,n2);
487: PetscFree2(sMaux,Maux);
488: MatDenseRestoreArrayRead(A,&Aa);
489: MatDenseRestoreArray(B,&Ba);
490: PetscFunctionReturn(0); /* quick return */
491: }
493: PetscMalloc4(n2,&expmA,n2,&As,n2,&RR,n,&piv);
494: expmA2 = expmA; RR2 = RR;
495: /* scale matrix */
496: #if !defined(PETSC_USE_COMPLEX)
497: for (i=0;i<n2;i++) {
498: As[i] = sMaux[i];
499: }
500: #else
501: PetscArraycpy(As,sMaux,n2);
502: #endif
503: scale = 1.0/PetscPowRealInt(2.0,s);
504: PetscStackCallBLAS("BLASCOMPLEXscal",BLASCOMPLEXscal_(&n2,&scale,As,&one));
505: SlepcLogFlopsComplex(1.0*n2);
507: /* evaluate Pade approximant (partial fraction or product form) */
508: if (fn->method==3 || !m) { /* partial fraction */
509: getcoeffs(k,m,&rsize,&psize,&remainsize,PETSC_TRUE);
510: PetscBLASIntCast((PetscInt)PetscRealPartComplex(rsize),&irsize);
511: PetscBLASIntCast((PetscInt)PetscRealPartComplex(psize),&ipsize);
512: PetscBLASIntCast((PetscInt)PetscRealPartComplex(remainsize),&iremainsize);
513: PetscMalloc3(irsize,&r,ipsize,&p,iremainsize,&remainterm);
514: getcoeffs(k,m,r,p,remainterm,PETSC_FALSE);
516: PetscArrayzero(expmA,n2);
517: #if !defined(PETSC_USE_COMPLEX)
518: isreal = PETSC_TRUE;
519: #else
520: getisreal(n2,Maux,&isreal);
521: #endif
522: if (isreal) {
523: rsizediv2 = irsize/2;
524: for (i=0;i<rsizediv2;i++) { /* use partial fraction to get R(As) */
525: PetscArraycpy(Maux,As,n2);
526: PetscArrayzero(RR,n2);
527: for (j=0;j<n;j++) {
528: Maux[j+j*n] -= p[2*i];
529: RR[j+j*n] = r[2*i];
530: }
531: PetscStackCallBLAS("LAPACKCOMPLEXgesv",LAPACKCOMPLEXgesv_(&n,&n,Maux,&n,piv,RR,&n,&info));
532: SlepcCheckLapackInfo("gesv",info);
533: for (j=0;j<n2;j++) {
534: expmA[j] += RR[j] + PetscConj(RR[j]);
535: }
536: /* loop(n) + gesv + loop(n2) */
537: SlepcLogFlopsComplex(1.0*n+(2.0*n*n*n/3.0+2.0*n*n*n)+2.0*n2);
538: }
540: mod = ipsize % 2;
541: if (mod) {
542: PetscArraycpy(Maux,As,n2);
543: PetscArrayzero(RR,n2);
544: for (j=0;j<n;j++) {
545: Maux[j+j*n] -= p[ipsize-1];
546: RR[j+j*n] = r[irsize-1];
547: }
548: PetscStackCallBLAS("LAPACKCOMPLEXgesv",LAPACKCOMPLEXgesv_(&n,&n,Maux,&n,piv,RR,&n,&info));
549: SlepcCheckLapackInfo("gesv",info);
550: for (j=0;j<n2;j++) {
551: expmA[j] += RR[j];
552: }
553: SlepcLogFlopsComplex(1.0*n+(2.0*n*n*n/3.0+2.0*n*n*n)+1.0*n2);
554: }
555: } else { /* complex */
556: for (i=0;i<irsize;i++) { /* use partial fraction to get R(As) */
557: PetscArraycpy(Maux,As,n2);
558: PetscArrayzero(RR,n2);
559: for (j=0;j<n;j++) {
560: Maux[j+j*n] -= p[i];
561: RR[j+j*n] = r[i];
562: }
563: PetscStackCallBLAS("LAPACKCOMPLEXgesv",LAPACKCOMPLEXgesv_(&n,&n,Maux,&n,piv,RR,&n,&info));
564: SlepcCheckLapackInfo("gesv",info);
565: for (j=0;j<n2;j++) {
566: expmA[j] += RR[j];
567: }
568: SlepcLogFlopsComplex(1.0*n+(2.0*n*n*n/3.0+2.0*n*n*n)+1.0*n2);
569: }
570: }
571: for (i=0;i<iremainsize;i++) {
572: if (!i) {
573: PetscArrayzero(RR,n2);
574: for (j=0;j<n;j++) {
575: RR[j+j*n] = remainterm[iremainsize-1];
576: }
577: } else {
578: PetscArraycpy(RR,As,n2);
579: for (j=1;j<i;j++) {
580: PetscStackCallBLAS("BLASCOMPLEXgemm",BLASCOMPLEXgemm_("N","N",&n,&n,&n,&cone,RR,&n,RR,&n,&czero,Maux,&n));
581: SWAP(RR,Maux,aux);
582: SlepcLogFlopsComplex(2.0*n*n*n);
583: }
584: PetscStackCallBLAS("BLASCOMPLEXscal",BLASCOMPLEXscal_(&n2,&remainterm[iremainsize-1-i],RR,&one));
585: SlepcLogFlopsComplex(1.0*n2);
586: }
587: for (j=0;j<n2;j++) {
588: expmA[j] += RR[j];
589: }
590: SlepcLogFlopsComplex(1.0*n2);
591: }
592: PetscFree3(r,p,remainterm);
593: } else { /* product form, default */
594: getcoeffsproduct(k,m,&rsize,&psize,&mult,PETSC_TRUE);
595: PetscBLASIntCast((PetscInt)PetscRealPartComplex(rsize),&irsize);
596: PetscBLASIntCast((PetscInt)PetscRealPartComplex(psize),&ipsize);
597: PetscMalloc2(irsize,&rootp,ipsize,&rootq);
598: getcoeffsproduct(k,m,rootp,rootq,&mult,PETSC_FALSE);
600: PetscArrayzero(expmA,n2);
601: for (i=0;i<n;i++) { /* initialize */
602: expmA[i+i*n] = 1.0;
603: }
604: minlen = PetscMin(irsize,ipsize);
605: for (i=0;i<minlen;i++) {
606: PetscArraycpy(RR,As,n2);
607: for (j=0;j<n;j++) {
608: RR[j+j*n] -= rootp[i];
609: }
610: PetscStackCallBLAS("BLASCOMPLEXgemm",BLASCOMPLEXgemm_("N","N",&n,&n,&n,&cone,RR,&n,expmA,&n,&czero,Maux,&n));
611: SWAP(expmA,Maux,aux);
612: PetscArraycpy(RR,As,n2);
613: for (j=0;j<n;j++) {
614: RR[j+j*n] -= rootq[i];
615: }
616: PetscStackCallBLAS("LAPACKCOMPLEXgesv",LAPACKCOMPLEXgesv_(&n,&n,RR,&n,piv,expmA,&n,&info));
617: SlepcCheckLapackInfo("gesv",info);
618: /* loop(n) + gemm + loop(n) + gesv */
619: SlepcLogFlopsComplex(1.0*n+(2.0*n*n*n)+1.0*n+(2.0*n*n*n/3.0+2.0*n*n*n));
620: }
621: /* extra numerator */
622: for (i=minlen;i<irsize;i++) {
623: PetscArraycpy(RR,As,n2);
624: for (j=0;j<n;j++) {
625: RR[j+j*n] -= rootp[i];
626: }
627: PetscStackCallBLAS("BLASCOMPLEXgemm",BLASCOMPLEXgemm_("N","N",&n,&n,&n,&cone,RR,&n,expmA,&n,&czero,Maux,&n));
628: SWAP(expmA,Maux,aux);
629: SlepcLogFlopsComplex(1.0*n+2.0*n*n*n);
630: }
631: /* extra denominator */
632: for (i=minlen;i<ipsize;i++) {
633: PetscArraycpy(RR,As,n2);
634: for (j=0;j<n;j++) RR[j+j*n] -= rootq[i];
635: PetscStackCallBLAS("LAPACKCOMPLEXgesv",LAPACKCOMPLEXgesv_(&n,&n,RR,&n,piv,expmA,&n,&info));
636: SlepcCheckLapackInfo("gesv",info);
637: SlepcLogFlopsComplex(1.0*n+(2.0*n*n*n/3.0+2.0*n*n*n));
638: }
639: PetscStackCallBLAS("BLASCOMPLEXscal",BLASCOMPLEXscal_(&n2,&mult,expmA,&one));
640: SlepcLogFlopsComplex(1.0*n2);
641: PetscFree2(rootp,rootq);
642: }
644: #if !defined(PETSC_USE_COMPLEX)
645: for (i=0;i<n2;i++) {
646: Ba2[i] = PetscRealPartComplex(expmA[i]);
647: }
648: #else
649: PetscArraycpy(Ba2,expmA,n2);
650: #endif
652: /* perform repeated squaring */
653: for (i=0;i<s;i++) { /* final squaring */
654: PetscStackCallBLAS("BLASgemm",BLASgemm_("N","N",&n,&n,&n,&sone,Ba2,&n,Ba2,&n,&szero,sMaux,&n));
655: SWAP(Ba2,sMaux,saux);
656: PetscLogFlops(2.0*n*n*n);
657: }
658: if (Ba2!=Ba) {
659: PetscArraycpy(Ba,Ba2,n2);
660: sMaux = Ba2;
661: }
662: if (shift) {
663: expshift = PetscExpReal(shift);
664: PetscStackCallBLAS("BLASscal",BLASscal_(&n2,&expshift,Ba,&one));
665: PetscLogFlops(1.0*n2);
666: }
668: /* restore pointers */
669: Maux = Maux2; expmA = expmA2; RR = RR2;
670: PetscFree2(sMaux,Maux);
671: PetscFree4(expmA,As,RR,piv);
672: MatDenseRestoreArrayRead(A,&Aa);
673: MatDenseRestoreArray(B,&Ba);
674: PetscFunctionReturn(0);
675: #endif
676: }
678: #define SMALLN 100
680: /*
681: * Function needed to compute optimal parameters (required workspace is 3*n*n)
682: */
683: static PetscInt ell(PetscBLASInt n,PetscScalar *A,PetscReal coeff,PetscInt m,PetscScalar *work,PetscRandom rand)
684: {
685: PetscScalar *Ascaled=work;
686: PetscReal nrm,alpha,beta,rwork[1];
687: PetscInt t;
688: PetscBLASInt i,j;
690: beta = PetscPowReal(coeff,1.0/(2*m+1));
691: for (i=0;i<n;i++)
692: for (j=0;j<n;j++)
693: Ascaled[i+j*n] = beta*PetscAbsScalar(A[i+j*n]);
694: nrm = LAPACKlange_("O",&n,&n,A,&n,rwork);
695: PetscLogFlops(2.0*n*n);
696: SlepcNormAm(n,Ascaled,2*m+1,work+n*n,rand,&alpha);
697: alpha /= nrm;
698: t = PetscMax((PetscInt)PetscCeilReal(PetscLogReal(2.0*alpha/PETSC_MACHINE_EPSILON)/PetscLogReal(2.0)/(2*m)),0);
699: PetscFunctionReturn(t);
700: }
702: /*
703: * Compute scaling parameter (s) and order of Pade approximant (m) (required workspace is 4*n*n)
704: */
705: static PetscErrorCode expm_params(PetscInt n,PetscScalar **Apowers,PetscInt *s,PetscInt *m,PetscScalar *work)
706: {
707: PetscScalar sfactor,sone=1.0,szero=0.0,*A=Apowers[0],*Ascaled;
708: PetscReal d4,d6,d8,d10,eta1,eta3,eta4,eta5,rwork[1];
709: PetscBLASInt n_=0,n2,one=1;
710: PetscRandom rand;
711: const PetscReal coeff[5] = { 9.92063492063492e-06, 9.94131285136576e-11, /* backward error function */
712: 2.22819456055356e-16, 1.69079293431187e-22, 8.82996160201868e-36 };
713: const PetscReal theta[5] = { 1.495585217958292e-002, /* m = 3 */
714: 2.539398330063230e-001, /* m = 5 */
715: 9.504178996162932e-001, /* m = 7 */
716: 2.097847961257068e+000, /* m = 9 */
717: 5.371920351148152e+000 }; /* m = 13 */
719: *s = 0;
720: *m = 13;
721: PetscBLASIntCast(n,&n_);
722: PetscRandomCreate(PETSC_COMM_SELF,&rand);
723: d4 = PetscPowReal(LAPACKlange_("O",&n_,&n_,Apowers[2],&n_,rwork),1.0/4.0);
724: if (d4==0.0) { /* safeguard for the case A = 0 */
725: *m = 3;
726: goto done;
727: }
728: d6 = PetscPowReal(LAPACKlange_("O",&n_,&n_,Apowers[3],&n_,rwork),1.0/6.0);
729: PetscLogFlops(2.0*n*n);
730: eta1 = PetscMax(d4,d6);
731: if (eta1<=theta[0] && !ell(n_,A,coeff[0],3,work,rand)) {
732: *m = 3;
733: goto done;
734: }
735: if (eta1<=theta[1] && !ell(n_,A,coeff[1],5,work,rand)) {
736: *m = 5;
737: goto done;
738: }
739: if (n<SMALLN) {
740: PetscStackCallBLAS("BLASgemm",BLASgemm_("N","N",&n_,&n_,&n_,&sone,Apowers[2],&n_,Apowers[2],&n_,&szero,work,&n_));
741: d8 = PetscPowReal(LAPACKlange_("O",&n_,&n_,work,&n_,rwork),1.0/8.0);
742: PetscLogFlops(2.0*n*n*n+1.0*n*n);
743: } else {
744: SlepcNormAm(n_,Apowers[2],2,work,rand,&d8);
745: d8 = PetscPowReal(d8,1.0/8.0);
746: }
747: eta3 = PetscMax(d6,d8);
748: if (eta3<=theta[2] && !ell(n_,A,coeff[2],7,work,rand)) {
749: *m = 7;
750: goto done;
751: }
752: if (eta3<=theta[3] && !ell(n_,A,coeff[3],9,work,rand)) {
753: *m = 9;
754: goto done;
755: }
756: if (n<SMALLN) {
757: PetscStackCallBLAS("BLASgemm",BLASgemm_("N","N",&n_,&n_,&n_,&sone,Apowers[2],&n_,Apowers[3],&n_,&szero,work,&n_));
758: d10 = PetscPowReal(LAPACKlange_("O",&n_,&n_,work,&n_,rwork),1.0/10.0);
759: PetscLogFlops(2.0*n*n*n+1.0*n*n);
760: } else {
761: SlepcNormAm(n_,Apowers[1],5,work,rand,&d10);
762: d10 = PetscPowReal(d10,1.0/10.0);
763: }
764: eta4 = PetscMax(d8,d10);
765: eta5 = PetscMin(eta3,eta4);
766: *s = PetscMax((PetscInt)PetscCeilReal(PetscLogReal(eta5/theta[4])/PetscLogReal(2.0)),0);
767: if (*s) {
768: Ascaled = work+3*n*n;
769: n2 = n_*n_;
770: PetscStackCallBLAS("BLAScopy",BLAScopy_(&n2,A,&one,Ascaled,&one));
771: sfactor = PetscPowRealInt(2.0,-(*s));
772: PetscStackCallBLAS("BLASscal",BLASscal_(&n2,&sfactor,Ascaled,&one));
773: PetscLogFlops(1.0*n*n);
774: } else Ascaled = A;
775: *s += ell(n_,Ascaled,coeff[4],13,work,rand);
776: done:
777: PetscRandomDestroy(&rand);
778: PetscFunctionReturn(0);
779: }
781: /*
782: * Matrix exponential implementation based on algorithm and matlab code by N. Higham and co-authors
783: *
784: * N. J. Higham, "The scaling and squaring method for the matrix exponential
785: * revisited", SIAM J. Matrix Anal. Appl. 26(4):1179-1193, 2005.
786: */
787: PetscErrorCode FNEvaluateFunctionMat_Exp_Higham(FN fn,Mat A,Mat B)
788: {
789: PetscBLASInt n_=0,n2,*ipiv,info,one=1;
790: PetscInt n,m,j,s;
791: PetscScalar scale,smone=-1.0,sone=1.0,stwo=2.0,szero=0.0;
792: PetscScalar *Ba,*Apowers[5],*Q,*P,*W,*work,*aux;
793: const PetscScalar *Aa,*c;
794: const PetscScalar c3[4] = { 120, 60, 12, 1 };
795: const PetscScalar c5[6] = { 30240, 15120, 3360, 420, 30, 1 };
796: const PetscScalar c7[8] = { 17297280, 8648640, 1995840, 277200, 25200, 1512, 56, 1 };
797: const PetscScalar c9[10] = { 17643225600.0, 8821612800.0, 2075673600, 302702400, 30270240,
798: 2162160, 110880, 3960, 90, 1 };
799: const PetscScalar c13[14] = { 64764752532480000.0, 32382376266240000.0, 7771770303897600.0,
800: 1187353796428800.0, 129060195264000.0, 10559470521600.0,
801: 670442572800.0, 33522128640.0, 1323241920.0,
802: 40840800, 960960, 16380, 182, 1 };
804: MatDenseGetArrayRead(A,&Aa);
805: MatDenseGetArray(B,&Ba);
806: MatGetSize(A,&n,NULL);
807: PetscBLASIntCast(n,&n_);
808: n2 = n_*n_;
809: PetscMalloc2(8*n*n,&work,n,&ipiv);
811: /* Matrix powers */
812: Apowers[0] = work; /* Apowers[0] = A */
813: Apowers[1] = Apowers[0] + n*n; /* Apowers[1] = A^2 */
814: Apowers[2] = Apowers[1] + n*n; /* Apowers[2] = A^4 */
815: Apowers[3] = Apowers[2] + n*n; /* Apowers[3] = A^6 */
816: Apowers[4] = Apowers[3] + n*n; /* Apowers[4] = A^8 */
818: PetscArraycpy(Apowers[0],Aa,n2);
819: PetscStackCallBLAS("BLASgemm",BLASgemm_("N","N",&n_,&n_,&n_,&sone,Apowers[0],&n_,Apowers[0],&n_,&szero,Apowers[1],&n_));
820: PetscStackCallBLAS("BLASgemm",BLASgemm_("N","N",&n_,&n_,&n_,&sone,Apowers[1],&n_,Apowers[1],&n_,&szero,Apowers[2],&n_));
821: PetscStackCallBLAS("BLASgemm",BLASgemm_("N","N",&n_,&n_,&n_,&sone,Apowers[1],&n_,Apowers[2],&n_,&szero,Apowers[3],&n_));
822: PetscLogFlops(6.0*n*n*n);
824: /* Compute scaling parameter and order of Pade approximant */
825: expm_params(n,Apowers,&s,&m,Apowers[4]);
827: if (s) { /* rescale */
828: for (j=0;j<4;j++) {
829: scale = PetscPowRealInt(2.0,-PetscMax(2*j,1)*s);
830: PetscStackCallBLAS("BLASscal",BLASscal_(&n2,&scale,Apowers[j],&one));
831: }
832: PetscLogFlops(4.0*n*n);
833: }
835: /* Evaluate the Pade approximant */
836: switch (m) {
837: case 3: c = c3; break;
838: case 5: c = c5; break;
839: case 7: c = c7; break;
840: case 9: c = c9; break;
841: case 13: c = c13; break;
842: default: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Wrong value of m %" PetscInt_FMT,m);
843: }
844: P = Ba;
845: Q = Apowers[4] + n*n;
846: W = Q + n*n;
847: switch (m) {
848: case 3:
849: case 5:
850: case 7:
851: case 9:
852: if (m==9) PetscStackCallBLAS("BLASgemm",BLASgemm_("N","N",&n_,&n_,&n_,&sone,Apowers[1],&n_,Apowers[3],&n_,&szero,Apowers[4],&n_));
853: PetscArrayzero(P,n2);
854: PetscArrayzero(Q,n2);
855: for (j=0;j<n;j++) {
856: P[j+j*n] = c[1];
857: Q[j+j*n] = c[0];
858: }
859: for (j=m;j>=3;j-=2) {
860: PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&n2,&c[j],Apowers[(j+1)/2-1],&one,P,&one));
861: PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&n2,&c[j-1],Apowers[(j+1)/2-1],&one,Q,&one));
862: PetscLogFlops(4.0*n*n);
863: }
864: PetscStackCallBLAS("BLASgemm",BLASgemm_("N","N",&n_,&n_,&n_,&sone,Apowers[0],&n_,P,&n_,&szero,W,&n_));
865: PetscLogFlops(2.0*n*n*n);
866: SWAP(P,W,aux);
867: break;
868: case 13:
869: /* P = A*(Apowers[3]*(c[13]*Apowers[3] + c[11]*Apowers[2] + c[9]*Apowers[1])
870: + c[7]*Apowers[3] + c[5]*Apowers[2] + c[3]*Apowers[1] + c[1]*I) */
871: PetscStackCallBLAS("BLAScopy",BLAScopy_(&n2,Apowers[3],&one,P,&one));
872: PetscStackCallBLAS("BLASscal",BLASscal_(&n2,&c[13],P,&one));
873: PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&n2,&c[11],Apowers[2],&one,P,&one));
874: PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&n2,&c[9],Apowers[1],&one,P,&one));
875: PetscStackCallBLAS("BLASgemm",BLASgemm_("N","N",&n_,&n_,&n_,&sone,Apowers[3],&n_,P,&n_,&szero,W,&n_));
876: PetscLogFlops(5.0*n*n+2.0*n*n*n);
877: PetscArrayzero(P,n2);
878: for (j=0;j<n;j++) P[j+j*n] = c[1];
879: PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&n2,&c[7],Apowers[3],&one,P,&one));
880: PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&n2,&c[5],Apowers[2],&one,P,&one));
881: PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&n2,&c[3],Apowers[1],&one,P,&one));
882: PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&n2,&sone,P,&one,W,&one));
883: PetscStackCallBLAS("BLASgemm",BLASgemm_("N","N",&n_,&n_,&n_,&sone,Apowers[0],&n_,W,&n_,&szero,P,&n_));
884: PetscLogFlops(7.0*n*n+2.0*n*n*n);
885: /* Q = Apowers[3]*(c[12]*Apowers[3] + c[10]*Apowers[2] + c[8]*Apowers[1])
886: + c[6]*Apowers[3] + c[4]*Apowers[2] + c[2]*Apowers[1] + c[0]*I */
887: PetscStackCallBLAS("BLAScopy",BLAScopy_(&n2,Apowers[3],&one,Q,&one));
888: PetscStackCallBLAS("BLASscal",BLASscal_(&n2,&c[12],Q,&one));
889: PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&n2,&c[10],Apowers[2],&one,Q,&one));
890: PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&n2,&c[8],Apowers[1],&one,Q,&one));
891: PetscStackCallBLAS("BLASgemm",BLASgemm_("N","N",&n_,&n_,&n_,&sone,Apowers[3],&n_,Q,&n_,&szero,W,&n_));
892: PetscLogFlops(5.0*n*n+2.0*n*n*n);
893: PetscArrayzero(Q,n2);
894: for (j=0;j<n;j++) Q[j+j*n] = c[0];
895: PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&n2,&c[6],Apowers[3],&one,Q,&one));
896: PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&n2,&c[4],Apowers[2],&one,Q,&one));
897: PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&n2,&c[2],Apowers[1],&one,Q,&one));
898: PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&n2,&sone,W,&one,Q,&one));
899: PetscLogFlops(7.0*n*n);
900: break;
901: default: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Wrong value of m %" PetscInt_FMT,m);
902: }
903: PetscStackCallBLAS("BLASaxpy",BLASaxpy_(&n2,&smone,P,&one,Q,&one));
904: PetscStackCallBLAS("LAPACKgesv",LAPACKgesv_(&n_,&n_,Q,&n_,ipiv,P,&n_,&info));
905: SlepcCheckLapackInfo("gesv",info);
906: PetscStackCallBLAS("BLASscal",BLASscal_(&n2,&stwo,P,&one));
907: for (j=0;j<n;j++) P[j+j*n] += 1.0;
908: PetscLogFlops(2.0*n*n*n/3.0+4.0*n*n);
910: /* Squaring */
911: for (j=1;j<=s;j++) {
912: PetscStackCallBLAS("BLASgemm",BLASgemm_("N","N",&n_,&n_,&n_,&sone,P,&n_,P,&n_,&szero,W,&n_));
913: SWAP(P,W,aux);
914: }
915: if (P!=Ba) PetscArraycpy(Ba,P,n2);
916: PetscLogFlops(2.0*n*n*n*s);
918: PetscFree2(work,ipiv);
919: MatDenseRestoreArrayRead(A,&Aa);
920: MatDenseRestoreArray(B,&Ba);
921: PetscFunctionReturn(0);
922: }
924: #if defined(PETSC_HAVE_CUDA)
925: #include "../src/sys/classes/fn/impls/cuda/fnutilcuda.h"
926: #include <slepccublas.h>
928: PetscErrorCode FNEvaluateFunctionMat_Exp_Pade_CUDA(FN fn,Mat A,Mat B)
929: {
930: PetscBLASInt n=0,ld,ld2,*d_ipiv,*d_info,info,one=1,zero=0;
931: PetscInt m,k,sexp;
932: PetscBool odd;
933: const PetscInt p=MAX_PADE;
934: PetscReal c[MAX_PADE+1],s;
935: PetscScalar scale,smone=-1.0,sone=1.0,stwo=2.0,szero=0.0;
936: PetscScalar *Aa,*Ba;
937: PetscScalar *d_Ba,*d_As,*d_A2,*d_Q,*d_P,*d_W,*aux,**ppP,**d_ppP,**ppQ,**d_ppQ;
938: cublasHandle_t cublasv2handle;
940: PetscDeviceInitialize(PETSC_DEVICE_CUDA); /* For CUDA event timers */
941: PetscCUBLASGetHandle(&cublasv2handle);
942: MatDenseGetArray(A,&Aa);
943: MatDenseGetArray(B,&Ba);
944: MatGetSize(A,&m,NULL);
945: PetscBLASIntCast(m,&n);
946: ld = n;
947: ld2 = ld*ld;
949: cudaMalloc((void **)&d_Ba,sizeof(PetscScalar)*m*m);
950: cudaMalloc((void **)&d_Q,sizeof(PetscScalar)*m*m);
951: cudaMalloc((void **)&d_W,sizeof(PetscScalar)*m*m);
952: cudaMalloc((void **)&d_As,sizeof(PetscScalar)*m*m);
953: cudaMalloc((void **)&d_A2,sizeof(PetscScalar)*m*m);
954: cudaMalloc((void **)&d_ipiv,sizeof(PetscBLASInt)*ld);
955: cudaMalloc((void **)&d_info,sizeof(PetscBLASInt));
956: cudaMalloc((void **)&d_ppP,sizeof(PetscScalar*));
957: cudaMalloc((void **)&d_ppQ,sizeof(PetscScalar*));
959: PetscMalloc1(1,&ppP);
960: PetscMalloc1(1,&ppQ);
962: cudaMemcpy(d_As,Aa,sizeof(PetscScalar)*ld2,cudaMemcpyHostToDevice);
963: d_P = d_Ba;
964: PetscLogGpuTimeBegin();
966: /* Pade' coefficients */
967: c[0] = 1.0;
968: for (k=1;k<=p;k++) c[k] = c[k-1]*(p+1-k)/(k*(2*p+1-k));
970: /* Scaling */
971: cublasXnrm2(cublasv2handle,ld2,d_As,one,&s);
972: if (s>0.5) {
973: sexp = PetscMax(0,(int)(PetscLogReal(s)/PetscLogReal(2.0))+2);
974: scale = PetscPowRealInt(2.0,-sexp);
975: cublasXscal(cublasv2handle,ld2,&scale,d_As,one);
976: PetscLogGpuFlops(1.0*n*n);
977: } else sexp = 0;
979: /* Horner evaluation */
980: cublasXgemm(cublasv2handle,CUBLAS_OP_N,CUBLAS_OP_N,n,n,n,&sone,d_As,ld,d_As,ld,&szero,d_A2,ld);
981: PetscLogGpuFlops(2.0*n*n*n);
982: cudaMemset(d_Q,zero,sizeof(PetscScalar)*ld2);
983: cudaMemset(d_P,zero,sizeof(PetscScalar)*ld2);
984: set_diagonal(n,d_Q,ld,c[p]);
985: set_diagonal(n,d_P,ld,c[p-1]);
987: odd = PETSC_TRUE;
988: for (k=p-1;k>0;k--) {
989: if (odd) {
990: cublasXgemm(cublasv2handle,CUBLAS_OP_N,CUBLAS_OP_N,n,n,n,&sone,d_Q,ld,d_A2,ld,&szero,d_W,ld);
991: SWAP(d_Q,d_W,aux);
992: shift_diagonal(n,d_Q,ld,c[k-1]);
993: odd = PETSC_FALSE;
994: } else {
995: cublasXgemm(cublasv2handle,CUBLAS_OP_N,CUBLAS_OP_N,n,n,n,&sone,d_P,ld,d_A2,ld,&szero,d_W,ld);
996: SWAP(d_P,d_W,aux);
997: shift_diagonal(n,d_P,ld,c[k-1]);
998: odd = PETSC_TRUE;
999: }
1000: PetscLogGpuFlops(2.0*n*n*n);
1001: }
1002: if (odd) {
1003: cublasXgemm(cublasv2handle,CUBLAS_OP_N,CUBLAS_OP_N,n,n,n,&sone,d_Q,ld,d_As,ld,&szero,d_W,ld);
1004: SWAP(d_Q,d_W,aux);
1005: cublasXaxpy(cublasv2handle,ld2,&smone,d_P,one,d_Q,one);
1007: ppQ[0] = d_Q;
1008: ppP[0] = d_P;
1009: cudaMemcpy(d_ppQ,ppQ,sizeof(PetscScalar*),cudaMemcpyHostToDevice);
1010: cudaMemcpy(d_ppP,ppP,sizeof(PetscScalar*),cudaMemcpyHostToDevice);
1012: cublasXgetrfBatched(cublasv2handle,n,d_ppQ,ld,d_ipiv,d_info,one);
1013: cudaMemcpy(&info,d_info,sizeof(PetscBLASInt),cudaMemcpyDeviceToHost);
1016: cublasXgetrsBatched(cublasv2handle,CUBLAS_OP_N,n,n,(const PetscScalar **)d_ppQ,ld,d_ipiv,d_ppP,ld,&info,one);
1019: cublasXscal(cublasv2handle,ld2,&stwo,d_P,one);
1020: shift_diagonal(n,d_P,ld,sone);
1021: cublasXscal(cublasv2handle,ld2,&smone,d_P,one);
1022: } else {
1023: cublasXgemm(cublasv2handle,CUBLAS_OP_N,CUBLAS_OP_N,n,n,n,&sone,d_P,ld,d_As,ld,&szero,d_W,ld);
1024: SWAP(d_P,d_W,aux);
1025: cublasXaxpy(cublasv2handle,ld2,&smone,d_P,one,d_Q,one);
1027: ppQ[0] = d_Q;
1028: ppP[0] = d_P;
1029: cudaMemcpy(d_ppQ,ppQ,sizeof(PetscScalar*),cudaMemcpyHostToDevice);
1030: cudaMemcpy(d_ppP,ppP,sizeof(PetscScalar*),cudaMemcpyHostToDevice);
1032: cublasXgetrfBatched(cublasv2handle,n,d_ppQ,ld,d_ipiv,d_info,one);
1033: cudaMemcpy(&info,d_info,sizeof(PetscBLASInt),cudaMemcpyDeviceToHost);
1036: cublasXgetrsBatched(cublasv2handle,CUBLAS_OP_N,n,n,(const PetscScalar **)d_ppQ,ld,d_ipiv,d_ppP,ld,&info,one);
1039: cublasXscal(cublasv2handle,ld2,&stwo,d_P,one);
1040: shift_diagonal(n,d_P,ld,sone);
1041: }
1042: PetscLogGpuFlops(2.0*n*n*n+2.0*n*n*n/3.0+4.0*n*n);
1044: for (k=1;k<=sexp;k++) {
1045: cublasXgemm(cublasv2handle,CUBLAS_OP_N,CUBLAS_OP_N,n,n,n,&sone,d_P,ld,d_P,ld,&szero,d_W,ld);
1046: cudaMemcpy(d_P,d_W,sizeof(PetscScalar)*ld2,cudaMemcpyDeviceToDevice);
1047: }
1048: if (d_P!=d_Ba) cudaMemcpy(Ba,d_P,sizeof(PetscScalar)*ld2,cudaMemcpyDeviceToHost);
1049: else cudaMemcpy(Ba,d_Ba,sizeof(PetscScalar)*ld2,cudaMemcpyDeviceToHost);
1050: PetscLogGpuFlops(2.0*n*n*n*sexp);
1052: PetscLogGpuTimeEnd();
1053: cudaFree(d_Ba);
1054: cudaFree(d_Q);
1055: cudaFree(d_W);
1056: cudaFree(d_As);
1057: cudaFree(d_A2);
1058: cudaFree(d_ipiv);
1059: cudaFree(d_info);
1060: cudaFree(d_ppP);
1061: cudaFree(d_ppQ);
1063: PetscFree(ppP);
1064: PetscFree(ppQ);
1066: MatDenseRestoreArray(A,&Aa);
1067: MatDenseRestoreArray(B,&Ba);
1068: PetscFunctionReturn(0);
1069: }
1071: #if defined(PETSC_HAVE_MAGMA)
1072: #include <slepcmagma.h>
1074: PetscErrorCode FNEvaluateFunctionMat_Exp_Pade_CUDAm(FN fn,Mat A,Mat B)
1075: {
1076: PetscBLASInt n=0,ld,ld2,*piv,one=1,zero=0;
1077: PetscInt m,k,sexp;
1078: PetscBool odd;
1079: const PetscInt p=MAX_PADE;
1080: PetscReal c[MAX_PADE+1],s;
1081: PetscScalar scale,smone=-1.0,sone=1.0,stwo=2.0,szero=0.0;
1082: PetscScalar *Aa,*Ba;
1083: PetscScalar *d_Ba,*d_As,*d_A2,*d_Q,*d_P,*d_W,*aux;
1084: cublasHandle_t cublasv2handle;
1086: PetscDeviceInitialize(PETSC_DEVICE_CUDA); /* For CUDA event timers */
1087: PetscCUBLASGetHandle(&cublasv2handle);
1088: magma_init();
1089: MatDenseGetArray(A,&Aa);
1090: MatDenseGetArray(B,&Ba);
1091: MatGetSize(A,&m,NULL);
1092: PetscBLASIntCast(m,&n);
1093: ld = n;
1094: ld2 = ld*ld;
1096: cudaMalloc((void **)&d_Ba,sizeof(PetscScalar)*m*m);
1097: cudaMalloc((void **)&d_Q,sizeof(PetscScalar)*m*m);
1098: cudaMalloc((void **)&d_W,sizeof(PetscScalar)*m*m);
1099: cudaMalloc((void **)&d_As,sizeof(PetscScalar)*m*m);
1100: cudaMalloc((void **)&d_A2,sizeof(PetscScalar)*m*m);
1102: PetscMalloc1(n,&piv);
1104: cudaMemcpy(d_As,Aa,sizeof(PetscScalar)*ld2,cudaMemcpyHostToDevice);
1105: d_P = d_Ba;
1106: PetscLogGpuTimeBegin();
1108: /* Pade' coefficients */
1109: c[0] = 1.0;
1110: for (k=1;k<=p;k++) c[k] = c[k-1]*(p+1-k)/(k*(2*p+1-k));
1112: /* Scaling */
1113: cublasXnrm2(cublasv2handle,ld2,d_As,one,&s);
1114: PetscLogGpuFlops(1.0*n*n);
1116: if (s>0.5) {
1117: sexp = PetscMax(0,(int)(PetscLogReal(s)/PetscLogReal(2.0))+2);
1118: scale = PetscPowRealInt(2.0,-sexp);
1119: cublasXscal(cublasv2handle,ld2,&scale,d_As,one);
1120: PetscLogGpuFlops(1.0*n*n);
1121: } else sexp = 0;
1123: /* Horner evaluation */
1124: cublasXgemm(cublasv2handle,CUBLAS_OP_N,CUBLAS_OP_N,n,n,n,&sone,d_As,ld,d_As,ld,&szero,d_A2,ld);
1125: PetscLogGpuFlops(2.0*n*n*n);
1126: cudaMemset(d_Q,zero,sizeof(PetscScalar)*ld2);
1127: cudaMemset(d_P,zero,sizeof(PetscScalar)*ld2);
1128: set_diagonal(n,d_Q,ld,c[p]);
1129: set_diagonal(n,d_P,ld,c[p-1]);
1131: odd = PETSC_TRUE;
1132: for (k=p-1;k>0;k--) {
1133: if (odd) {
1134: cublasXgemm(cublasv2handle,CUBLAS_OP_N,CUBLAS_OP_N,n,n,n,&sone,d_Q,ld,d_A2,ld,&szero,d_W,ld);
1135: SWAP(d_Q,d_W,aux);
1136: shift_diagonal(n,d_Q,ld,c[k-1]);
1137: odd = PETSC_FALSE;
1138: } else {
1139: cublasXgemm(cublasv2handle,CUBLAS_OP_N,CUBLAS_OP_N,n,n,n,&sone,d_P,ld,d_A2,ld,&szero,d_W,ld);
1140: SWAP(d_P,d_W,aux);
1141: shift_diagonal(n,d_P,ld,c[k-1]);
1142: odd = PETSC_TRUE;
1143: }
1144: PetscLogGpuFlops(2.0*n*n*n);
1145: }
1146: if (odd) {
1147: cublasXgemm(cublasv2handle,CUBLAS_OP_N,CUBLAS_OP_N,n,n,n,&sone,d_Q,ld,d_As,ld,&szero,d_W,ld);
1148: SWAP(d_Q,d_W,aux);
1149: cublasXaxpy(cublasv2handle,ld2,&smone,d_P,one,d_Q,one);
1150: magma_xgesv_gpu,n,n,d_Q,ld,piv,d_P,ld;
1151: cublasXscal(cublasv2handle,ld2,&stwo,d_P,one);
1152: shift_diagonal(n,d_P,ld,sone);
1153: cublasXscal(cublasv2handle,ld2,&smone,d_P,one);
1154: } else {
1155: cublasXgemm(cublasv2handle,CUBLAS_OP_N,CUBLAS_OP_N,n,n,n,&sone,d_P,ld,d_As,ld,&szero,d_W,ld);
1156: SWAP(d_P,d_W,aux);
1157: cublasXaxpy(cublasv2handle,ld2,&smone,d_P,one,d_Q,one);
1158: magma_xgesv_gpu,n,n,d_Q,ld,piv,d_P,ld;
1159: cublasXscal(cublasv2handle,ld2,&stwo,d_P,one);
1160: shift_diagonal(n,d_P,ld,sone);
1161: }
1162: PetscLogGpuFlops(2.0*n*n*n+2.0*n*n*n/3.0+4.0*n*n);
1164: for (k=1;k<=sexp;k++) {
1165: cublasXgemm(cublasv2handle,CUBLAS_OP_N,CUBLAS_OP_N,n,n,n,&sone,d_P,ld,d_P,ld,&szero,d_W,ld);
1166: cudaMemcpy(d_P,d_W,sizeof(PetscScalar)*ld2,cudaMemcpyDeviceToDevice);
1167: }
1168: if (d_P!=d_Ba) cudaMemcpy(Ba,d_P,sizeof(PetscScalar)*ld2,cudaMemcpyDeviceToHost);
1169: else cudaMemcpy(Ba,d_Ba,sizeof(PetscScalar)*ld2,cudaMemcpyDeviceToHost);
1170: PetscLogGpuFlops(2.0*n*n*n*sexp);
1172: PetscLogGpuTimeEnd();
1173: cudaFree(d_Ba);
1174: cudaFree(d_Q);
1175: cudaFree(d_W);
1176: cudaFree(d_As);
1177: cudaFree(d_A2);
1178: PetscFree(piv);
1180: MatDenseRestoreArray(A,&Aa);
1181: MatDenseRestoreArray(B,&Ba);
1182: magma_finalize();
1183: PetscFunctionReturn(0);
1184: }
1186: /*
1187: * Matrix exponential implementation based on algorithm and matlab code by N. Higham and co-authors
1188: *
1189: * N. J. Higham, "The scaling and squaring method for the matrix exponential
1190: * revisited", SIAM J. Matrix Anal. Appl. 26(4):1179-1193, 2005.
1191: */
1192: PetscErrorCode FNEvaluateFunctionMat_Exp_Higham_CUDAm(FN fn,Mat A,Mat B)
1193: {
1194: PetscBLASInt n_=0,n2,*ipiv,one=1;
1195: PetscInt n,m,j,s,zero=0;
1196: PetscScalar scale,smone=-1.0,sone=1.0,stwo=2.0,szero=0.0;
1197: PetscScalar *Aa,*Ba,*d_Ba,*Apowers[5],*d_Apowers[5],*d_Q,*d_P,*d_W,*work,*d_work,*aux;
1198: const PetscScalar *c;
1199: const PetscScalar c3[4] = { 120, 60, 12, 1 };
1200: const PetscScalar c5[6] = { 30240, 15120, 3360, 420, 30, 1 };
1201: const PetscScalar c7[8] = { 17297280, 8648640, 1995840, 277200, 25200, 1512, 56, 1 };
1202: const PetscScalar c9[10] = { 17643225600, 8821612800, 2075673600, 302702400, 30270240,
1203: 2162160, 110880, 3960, 90, 1 };
1204: const PetscScalar c13[14] = { 64764752532480000, 32382376266240000, 7771770303897600,
1205: 1187353796428800, 129060195264000, 10559470521600,
1206: 670442572800, 33522128640, 1323241920,
1207: 40840800, 960960, 16380, 182, 1 };
1208: cublasHandle_t cublasv2handle;
1210: PetscDeviceInitialize(PETSC_DEVICE_CUDA); /* For CUDA event timers */
1211: PetscCUBLASGetHandle(&cublasv2handle);
1212: magma_init();
1213: MatDenseGetArray(A,&Aa);
1214: MatDenseGetArray(B,&Ba);
1215: MatGetSize(A,&n,NULL);
1216: PetscBLASIntCast(n,&n_);
1217: n2 = n_*n_;
1218: PetscMalloc2(8*n*n,&work,n,&ipiv);
1219: cudaMalloc((void**)&d_work,8*n*n*sizeof(PetscScalar));
1220: cudaMalloc((void **)&d_Ba,sizeof(PetscScalar)*n*n);
1222: PetscLogGpuTimeBegin();
1224: /* Matrix powers */
1225: Apowers[0] = work; /* Apowers[0] = A */
1226: Apowers[1] = Apowers[0] + n*n; /* Apowers[1] = A^2 */
1227: Apowers[2] = Apowers[1] + n*n; /* Apowers[2] = A^4 */
1228: Apowers[3] = Apowers[2] + n*n; /* Apowers[3] = A^6 */
1229: Apowers[4] = Apowers[3] + n*n; /* Apowers[4] = A^8 */
1230: /* Matrix powers on device */
1231: d_Apowers[0] = d_work; /* d_Apowers[0] = A */
1232: d_Apowers[1] = d_Apowers[0] + n*n; /* d_Apowers[1] = A^2 */
1233: d_Apowers[2] = d_Apowers[1] + n*n; /* d_Apowers[2] = A^4 */
1234: d_Apowers[3] = d_Apowers[2] + n*n; /* d_Apowers[3] = A^6 */
1235: d_Apowers[4] = d_Apowers[3] + n*n; /* d_Apowers[4] = A^8 */
1237: cudaMemcpy(d_Apowers[0],Aa,n2*sizeof(PetscScalar),cudaMemcpyHostToDevice);
1238: cublasXgemm(cublasv2handle,CUBLAS_OP_N,CUBLAS_OP_N,n_,n_,n_,&sone,d_Apowers[0],n_,d_Apowers[0],n_,&szero,d_Apowers[1],n_);
1239: cublasXgemm(cublasv2handle,CUBLAS_OP_N,CUBLAS_OP_N,n_,n_,n_,&sone,d_Apowers[1],n_,d_Apowers[1],n_,&szero,d_Apowers[2],n_);
1240: cublasXgemm(cublasv2handle,CUBLAS_OP_N,CUBLAS_OP_N,n_,n_,n_,&sone,d_Apowers[1],n_,d_Apowers[2],n_,&szero,d_Apowers[3],n_);
1241: PetscLogGpuFlops(6.0*n*n*n);
1243: cudaMemcpy(Apowers[0],d_Apowers[0],4*n2*sizeof(PetscScalar),cudaMemcpyDeviceToHost);
1244: /* Compute scaling parameter and order of Pade approximant */
1245: expm_params(n,Apowers,&s,&m,Apowers[4]);
1247: if (s) { /* rescale */
1248: for (j=0;j<4;j++) {
1249: scale = PetscPowRealInt(2.0,-PetscMax(2*j,1)*s);
1250: cublasXscal(cublasv2handle,n2,&scale,d_Apowers[j],one);
1251: }
1252: PetscLogGpuFlops(4.0*n*n);
1253: }
1255: /* Evaluate the Pade approximant */
1256: switch (m) {
1257: case 3: c = c3; break;
1258: case 5: c = c5; break;
1259: case 7: c = c7; break;
1260: case 9: c = c9; break;
1261: case 13: c = c13; break;
1262: default: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Wrong value of m %" PetscInt_FMT,m);
1263: }
1264: d_P = d_Ba;
1265: d_Q = d_Apowers[4] + n*n;
1266: d_W = d_Q + n*n;
1267: switch (m) {
1268: case 3:
1269: case 5:
1270: case 7:
1271: case 9:
1272: if (m==9) cublasXgemm(cublasv2handle,CUBLAS_OP_N,CUBLAS_OP_N,n_,n_,n_,&sone,d_Apowers[1],n_,d_Apowers[3],n_,&szero,d_Apowers[4],n_);
1273: cudaMemset(d_P,zero,sizeof(PetscScalar)*n2);
1274: cudaMemset(d_Q,zero,sizeof(PetscScalar)*n2);
1275: set_diagonal(n,d_P,n,c[1]);
1276: set_diagonal(n,d_Q,n,c[0]);
1277: for (j=m;j>=3;j-=2) {
1278: cublasXaxpy(cublasv2handle,n2,&c[j],d_Apowers[(j+1)/2-1],one,d_P,one);
1279: cublasXaxpy(cublasv2handle,n2,&c[j-1],d_Apowers[(j+1)/2-1],one,d_Q,one);
1280: PetscLogGpuFlops(4.0*n*n);
1281: }
1282: cublasXgemm(cublasv2handle,CUBLAS_OP_N,CUBLAS_OP_N,n_,n_,n_,&sone,d_Apowers[0],n_,d_P,n_,&szero,d_W,n_);
1283: PetscLogGpuFlops(2.0*n*n*n);
1284: SWAP(d_P,d_W,aux);
1285: break;
1286: case 13:
1287: /* P = A*(Apowers[3]*(c[13]*Apowers[3] + c[11]*Apowers[2] + c[9]*Apowers[1])
1288: + c[7]*Apowers[3] + c[5]*Apowers[2] + c[3]*Apowers[1] + c[1]*I) */
1289: cudaMemcpy(d_P,d_Apowers[3],n2*sizeof(PetscScalar),cudaMemcpyDeviceToDevice);
1290: cublasXscal(cublasv2handle,n2,&c[13],d_P,one);
1291: cublasXaxpy(cublasv2handle,n2,&c[11],d_Apowers[2],one,d_P,one);
1292: cublasXaxpy(cublasv2handle,n2,&c[9],d_Apowers[1],one,d_P,one);
1293: cublasXgemm(cublasv2handle,CUBLAS_OP_N,CUBLAS_OP_N,n_,n_,n_,&sone,d_Apowers[3],n_,d_P,n_,&szero,d_W,n_);
1294: PetscLogGpuFlops(5.0*n*n+2.0*n*n*n);
1296: cudaMemset(d_P,zero,sizeof(PetscScalar)*n2);
1297: set_diagonal(n,d_P,n,c[1]);
1298: cublasXaxpy(cublasv2handle,n2,&c[7],d_Apowers[3],one,d_P,one);
1299: cublasXaxpy(cublasv2handle,n2,&c[5],d_Apowers[2],one,d_P,one);
1300: cublasXaxpy(cublasv2handle,n2,&c[3],d_Apowers[1],one,d_P,one);
1301: cublasXaxpy(cublasv2handle,n2,&sone,d_P,one,d_W,one);
1302: cublasXgemm(cublasv2handle,CUBLAS_OP_N,CUBLAS_OP_N,n_,n_,n_,&sone,d_Apowers[0],n_,d_W,n_,&szero,d_P,n_);
1303: PetscLogGpuFlops(7.0*n*n+2.0*n*n*n);
1304: /* Q = Apowers[3]*(c[12]*Apowers[3] + c[10]*Apowers[2] + c[8]*Apowers[1])
1305: + c[6]*Apowers[3] + c[4]*Apowers[2] + c[2]*Apowers[1] + c[0]*I */
1306: cudaMemcpy(d_Q,d_Apowers[3],n2*sizeof(PetscScalar),cudaMemcpyDeviceToDevice);
1307: cublasXscal(cublasv2handle,n2,&c[12],d_Q,one);
1308: cublasXaxpy(cublasv2handle,n2,&c[10],d_Apowers[2],one,d_Q,one);
1309: cublasXaxpy(cublasv2handle,n2,&c[8],d_Apowers[1],one,d_Q,one);
1310: cublasXgemm(cublasv2handle,CUBLAS_OP_N,CUBLAS_OP_N,n_,n_,n_,&sone,d_Apowers[3],n_,d_Q,n_,&szero,d_W,n_);
1311: PetscLogGpuFlops(5.0*n*n+2.0*n*n*n);
1312: cudaMemset(d_Q,zero,sizeof(PetscScalar)*n2);
1313: set_diagonal(n,d_Q,n,c[0]);
1314: cublasXaxpy(cublasv2handle,n2,&c[6],d_Apowers[3],one,d_Q,one);
1315: cublasXaxpy(cublasv2handle,n2,&c[4],d_Apowers[2],one,d_Q,one);
1316: cublasXaxpy(cublasv2handle,n2,&c[2],d_Apowers[1],one,d_Q,one);
1317: cublasXaxpy(cublasv2handle,n2,&sone,d_W,one,d_Q,one);
1318: PetscLogGpuFlops(7.0*n*n);
1319: break;
1320: default: SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Wrong value of m %" PetscInt_FMT,m);
1321: }
1322: cublasXaxpy(cublasv2handle,n2,&smone,d_P,one,d_Q,one);
1324: magma_xgesv_gpu,n_,n_,d_Q,n_,ipiv,d_P,n_;
1326: cublasXscal(cublasv2handle,n2,&stwo,d_P,one);
1327: shift_diagonal(n,d_P,n,sone);
1328: PetscLogGpuFlops(2.0*n*n*n/3.0+4.0*n*n);
1330: /* Squaring */
1331: for (j=1;j<=s;j++) {
1332: cublasXgemm(cublasv2handle,CUBLAS_OP_N,CUBLAS_OP_N,n_,n_,n_,&sone,d_P,n_,d_P,n_,&szero,d_W,n_);
1333: SWAP(d_P,d_W,aux);
1334: }
1335: if (d_P!=d_Ba) cudaMemcpy(Ba,d_P,n2*sizeof(PetscScalar),cudaMemcpyDeviceToHost);
1336: else cudaMemcpy(Ba,d_Ba,n2*sizeof(PetscScalar),cudaMemcpyDeviceToHost);
1337: PetscLogGpuFlops(2.0*n*n*n*s);
1338: PetscLogGpuTimeEnd();
1340: PetscFree2(work,ipiv);
1341: MatDenseRestoreArray(A,&Aa);
1342: MatDenseRestoreArray(B,&Ba);
1343: cudaFree(d_Ba);
1344: cudaFree(d_work);
1345: magma_finalize();
1346: PetscFunctionReturn(0);
1347: }
1349: /*
1350: * Matrix exponential implementation based on algorithm and matlab code by Stefan Guettel
1351: * and Yuji Nakatsukasa
1352: *
1353: * Stefan Guettel and Yuji Nakatsukasa, "Scaled and Squared Subdiagonal Pade'
1354: * Approximation for the Matrix Exponential",
1355: * SIAM J. Matrix Anal. Appl. 37(1):145-170, 2016.
1356: * https://doi.org/10.1137/15M1027553
1357: */
1358: PetscErrorCode FNEvaluateFunctionMat_Exp_GuettelNakatsukasa_CUDAm(FN fn,Mat A,Mat B)
1359: {
1360: PetscInt i,j,n_,s,k,m,zero=0,mod;
1361: PetscBLASInt n=0,n2=0,irsize=0,rsizediv2,ipsize=0,iremainsize=0,query=-1,*piv,minlen,lwork=0,one=1;
1362: PetscReal nrm,shift=0.0,rone=1.0,rzero=0.0;
1363: #if defined(PETSC_USE_COMPLEX)
1364: PetscReal *rwork=NULL;
1365: #endif
1366: PetscComplex *d_As,*d_RR,*d_RR2,*d_expmA,*d_expmA2,*d_Maux,*d_Maux2,rsize,*r,psize,*p,remainsize,*remainterm,*rootp,*rootq,mult=0.0,scale,cone=1.0,czero=0.0,*aux;
1367: PetscScalar *Aa,*Ba,*d_Ba,*d_Ba2,*Maux,*sMaux,*d_sMaux,*wr,*wi,expshift,sone=1.0,szero=0.0,*work,work1,*saux;
1368: PetscBool isreal,*d_isreal,flg;
1369: cublasHandle_t cublasv2handle;
1371: PetscDeviceInitialize(PETSC_DEVICE_CUDA); /* For CUDA event timers */
1372: PetscCUBLASGetHandle(&cublasv2handle);
1373: magma_init();
1374: MatGetSize(A,&n_,NULL);
1375: PetscBLASIntCast(n_,&n);
1376: MatDenseGetArray(A,&Aa);
1377: MatDenseGetArray(B,&Ba);
1378: PetscBLASIntCast(n*n,&n2);
1380: cudaMalloc((void **)&d_Ba,sizeof(PetscScalar)*n2);
1381: PetscLogGpuTimeBegin();
1382: d_Ba2 = d_Ba;
1384: PetscMalloc2(n2,&sMaux,n2,&Maux);
1385: cudaMalloc((void **)&d_isreal,sizeof(PetscBool));
1386: cudaMalloc((void **)&d_sMaux,sizeof(PetscScalar)*n2);
1387: cudaMalloc((void **)&d_Maux,sizeof(PetscComplex)*n2);
1389: cudaMemcpy(d_sMaux,Aa,sizeof(PetscScalar)*n2,cudaMemcpyHostToDevice);
1390: d_Maux2 = d_Maux;
1391: PetscOptionsGetReal(NULL,NULL,"-fn_expm_estimated_eig",&shift,&flg);
1392: if (!flg) {
1393: PetscMalloc2(n,&wr,n,&wi);
1394: PetscArraycpy(sMaux,Aa,n2);
1395: /* estimate rightmost eigenvalue and shift A with it */
1396: #if !defined(PETSC_USE_COMPLEX)
1397: magma_xgeev,MagmaNoVec,MagmaNoVec,n,sMaux,n,wr,wi,NULL,n,NULL,n,&work1,query;
1398: PetscBLASIntCast((PetscInt)PetscRealPart(work1),&lwork);
1399: PetscMalloc1(lwork,&work);
1400: magma_xgeev,MagmaNoVec,MagmaNoVec,n,sMaux,n,wr,wi,NULL,n,NULL,n,work,lwork;
1401: PetscFree(work);
1402: #else
1403: PetscArraycpy(Maux,Aa,n2);
1404: magma_xgeev,MagmaNoVec,MagmaNoVec,n,Maux,n,wr,NULL,n,NULL,n,&work1,query,rwork;
1405: PetscBLASIntCast((PetscInt)PetscRealPart(work1),&lwork);
1406: PetscMalloc2(2*n,&rwork,lwork,&work);
1407: magma_xgeev,MagmaNoVec,MagmaNoVec,n,Maux,n,wr,NULL,n,NULL,n,work,lwork,rwork;
1408: PetscFree2(rwork,work);
1409: #endif
1410: PetscLogGpuFlops(25.0*n*n*n+(n*n*n)/3.0+1.0*n*n*n);
1412: shift = PetscRealPart(wr[0]);
1413: for (i=1;i<n;i++) {
1414: if (PetscRealPart(wr[i]) > shift) shift = PetscRealPart(wr[i]);
1415: }
1416: PetscFree2(wr,wi);
1417: }
1418: /* shift so that largest real part is (about) 0 */
1419: cudaMemcpy(d_sMaux,Aa,sizeof(PetscScalar)*n2,cudaMemcpyHostToDevice);
1420: if (shift) {
1421: shift_diagonal(n,d_sMaux,n,-shift);
1422: PetscLogGpuFlops(1.0*n);
1423: }
1424: #if defined(PETSC_USE_COMPLEX)
1425: cudaMemcpy(d_Maux,Aa,sizeof(PetscScalar)*n2,cudaMemcpyHostToDevice);
1426: if (shift) {
1427: shift_diagonal(n,d_Maux,n,-shift);
1428: PetscLogGpuFlops(1.0*n);
1429: }
1430: #endif
1432: /* estimate norm(A) and select the scaling factor */
1433: cublasXnrm2(cublasv2handle,n2,d_sMaux,one,&nrm);
1434: PetscLogGpuFlops(2.0*n*n);
1435: sexpm_params(nrm,&s,&k,&m);
1436: if (s==0 && k==1 && m==0) { /* exp(A) = I+A to eps! */
1437: if (shift) expshift = PetscExpReal(shift);
1438: shift_Cdiagonal(n,d_Maux,n,rone,rzero);
1439: if (shift) {
1440: cublasXscal(cublasv2handle,n2,&expshift,d_sMaux,one);
1441: PetscLogGpuFlops(1.0*(n+n2));
1442: } else PetscLogGpuFlops(1.0*n);
1443: cudaMemcpy(Ba,d_sMaux,sizeof(PetscScalar)*n2,cudaMemcpyDeviceToHost);
1444: cudaFree(d_Ba);
1445: cudaFree(d_isreal);
1446: cudaFree(d_sMaux);
1447: cudaFree(d_Maux);
1448: MatDenseRestoreArray(A,&Aa);
1449: MatDenseRestoreArray(B,&Ba);
1450: PetscFunctionReturn(0); /* quick return */
1451: }
1453: cudaMalloc((void **)&d_expmA,sizeof(PetscComplex)*n2);
1454: cudaMalloc((void **)&d_As,sizeof(PetscComplex)*n2);
1455: cudaMalloc((void **)&d_RR,sizeof(PetscComplex)*n2);
1456: d_expmA2 = d_expmA; d_RR2 = d_RR;
1457: PetscMalloc1(n,&piv);
1458: /* scale matrix */
1459: #if !defined(PETSC_USE_COMPLEX)
1460: copy_array2D_S2C(n,n,d_As,n,d_sMaux,n);
1461: #else
1462: cudaMemcpy(d_As,d_sMaux,sizeof(PetscScalar)*n2,cudaMemcpyDeviceToDevice);
1463: #endif
1464: scale = 1.0/PetscPowRealInt(2.0,s);
1465: cublasXCscal(cublasv2handle,n2,(const cuComplex *)&scale,(cuComplex *)d_As,one);
1466: SlepcLogGpuFlopsComplex(1.0*n2);
1468: /* evaluate Pade approximant (partial fraction or product form) */
1469: if (fn->method==8 || !m) { /* partial fraction */
1470: getcoeffs(k,m,&rsize,&psize,&remainsize,PETSC_TRUE);
1471: PetscBLASIntCast((PetscInt)PetscRealPartComplex(rsize),&irsize);
1472: PetscBLASIntCast((PetscInt)PetscRealPartComplex(psize),&ipsize);
1473: PetscBLASIntCast((PetscInt)PetscRealPartComplex(remainsize),&iremainsize);
1474: PetscMalloc3(irsize,&r,ipsize,&p,iremainsize,&remainterm);
1475: getcoeffs(k,m,r,p,remainterm,PETSC_FALSE);
1477: cudaMemset(d_expmA,zero,sizeof(PetscComplex)*n2);
1478: #if !defined(PETSC_USE_COMPLEX)
1479: isreal = PETSC_TRUE;
1480: #else
1481: getisreal_array2D(n,n,d_Maux,n,d_isreal);
1482: cudaMemcpy(&isreal,d_isreal,sizeof(PetscBool),cudaMemcpyDeviceToHost);
1483: #endif
1484: if (isreal) {
1485: rsizediv2 = irsize/2;
1486: for (i=0;i<rsizediv2;i++) { /* use partial fraction to get R(As) */
1487: cudaMemcpy(d_Maux,d_As,sizeof(PetscComplex)*n2,cudaMemcpyDeviceToDevice);
1488: cudaMemset(d_RR,zero,sizeof(PetscComplex)*n2);
1489: shift_Cdiagonal(n,d_Maux,n,-PetscRealPartComplex(p[2*i]),-PetscImaginaryPartComplex(p[2*i]));
1490: set_Cdiagonal(n,d_RR,n,PetscRealPartComplex(r[2*i]),PetscImaginaryPartComplex(r[2*i]));
1491: magma_Cgesv_gpu,n,n,d_Maux,n,piv,d_RR,n;
1492: add_array2D_Conj(n,n,d_RR,n);
1493: cublasXCaxpy(cublasv2handle,n2,&cone,d_RR,one,d_expmA,one);
1494: /* shift(n) + gesv + axpy(n2) */
1495: SlepcLogGpuFlopsComplex(1.0*n+(2.0*n*n*n/3.0+2.0*n*n*n)+2.0*n2);
1496: }
1498: mod = ipsize % 2;
1499: if (mod) {
1500: cudaMemcpy(d_Maux,d_As,sizeof(PetscComplex)*n2,cudaMemcpyDeviceToDevice);
1501: cudaMemset(d_RR,zero,sizeof(PetscComplex)*n2);
1502: shift_Cdiagonal(n,d_Maux,n,-PetscRealPartComplex(p[ipsize-1]),-PetscImaginaryPartComplex(p[ipsize-1]));
1503: set_Cdiagonal(n,d_RR,n,PetscRealPartComplex(r[irsize-1]),PetscImaginaryPartComplex(r[irsize-1]));
1504: magma_Cgesv_gpu,n,n,d_Maux,n,piv,d_RR,n;
1505: cublasXCaxpy(cublasv2handle,n2,&cone,d_RR,one,d_expmA,one);
1506: SlepcLogGpuFlopsComplex(1.0*n+(2.0*n*n*n/3.0+2.0*n*n*n)+1.0*n2);
1507: }
1508: } else { /* complex */
1509: for (i=0;i<irsize;i++) { /* use partial fraction to get R(As) */
1510: cudaMemcpy(d_Maux,d_As,sizeof(PetscComplex)*n2,cudaMemcpyDeviceToDevice);
1511: cudaMemset(d_RR,zero,sizeof(PetscComplex)*n2);
1512: shift_Cdiagonal(n,d_Maux,n,-PetscRealPartComplex(p[i]),-PetscImaginaryPartComplex(p[i]));
1513: set_Cdiagonal(n,d_RR,n,PetscRealPartComplex(r[i]),PetscImaginaryPartComplex(r[i]));
1514: magma_Cgesv_gpu,n,n,d_Maux,n,piv,d_RR,n;
1515: cublasXCaxpy(cublasv2handle,n2,&cone,d_RR,one,d_expmA,one);
1516: SlepcLogGpuFlopsComplex(1.0*n+(2.0*n*n*n/3.0+2.0*n*n*n)+1.0*n2);
1517: }
1518: }
1519: for (i=0;i<iremainsize;i++) {
1520: if (!i) {
1521: cudaMemset(d_RR,zero,sizeof(PetscComplex)*n2);
1522: set_Cdiagonal(n,d_RR,n,PetscRealPartComplex(remainterm[iremainsize-1]),PetscImaginaryPartComplex(remainterm[iremainsize-1]));
1523: } else {
1524: cudaMemcpy(d_RR,d_As,sizeof(PetscComplex)*n2,cudaMemcpyDeviceToDevice);
1525: for (j=1;j<i;j++) {
1526: cublasXCgemm(cublasv2handle,CUBLAS_OP_N,CUBLAS_OP_N,n,n,n,&cone,d_RR,n,d_RR,n,&czero,d_Maux,n);
1527: SWAP(d_RR,d_Maux,aux);
1528: SlepcLogGpuFlopsComplex(2.0*n*n*n);
1529: }
1530: cublasXCscal(cublasv2handle,n2,&remainterm[iremainsize-1-i],d_RR,one);
1531: SlepcLogGpuFlopsComplex(1.0*n2);
1532: }
1533: cublasXCaxpy(cublasv2handle,n2,&cone,d_RR,one,d_expmA,one);
1534: SlepcLogGpuFlopsComplex(1.0*n2);
1535: }
1536: PetscFree3(r,p,remainterm);
1537: } else { /* product form, default */
1538: getcoeffsproduct(k,m,&rsize,&psize,&mult,PETSC_TRUE);
1539: PetscBLASIntCast((PetscInt)PetscRealPartComplex(rsize),&irsize);
1540: PetscBLASIntCast((PetscInt)PetscRealPartComplex(psize),&ipsize);
1541: PetscMalloc2(irsize,&rootp,ipsize,&rootq);
1542: getcoeffsproduct(k,m,rootp,rootq,&mult,PETSC_FALSE);
1544: cudaMemset(d_expmA,zero,sizeof(PetscComplex)*n2);
1545: set_Cdiagonal(n,d_expmA,n,rone,rzero); /* initialize */
1546: minlen = PetscMin(irsize,ipsize);
1547: for (i=0;i<minlen;i++) {
1548: cudaMemcpy(d_RR,d_As,sizeof(PetscComplex)*n2,cudaMemcpyDeviceToDevice);
1549: shift_Cdiagonal(n,d_RR,n,-PetscRealPartComplex(rootp[i]),-PetscImaginaryPartComplex(rootp[i]));
1550: cublasXCgemm(cublasv2handle,CUBLAS_OP_N,CUBLAS_OP_N,n,n,n,&cone,d_RR,n,d_expmA,n,&czero,d_Maux,n);
1551: SWAP(d_expmA,d_Maux,aux);
1552: cudaMemcpy(d_RR,d_As,sizeof(PetscComplex)*n2,cudaMemcpyDeviceToDevice);
1553: shift_Cdiagonal(n,d_RR,n,-PetscRealPartComplex(rootq[i]),-PetscImaginaryPartComplex(rootq[i]));
1554: magma_Cgesv_gpu,n,n,d_RR,n,piv,d_expmA,n;
1555: /* shift(n) + gemm + shift(n) + gesv */
1556: SlepcLogGpuFlopsComplex(1.0*n+(2.0*n*n*n)+1.0*n+(2.0*n*n*n/3.0+2.0*n*n*n));
1557: }
1558: /* extra enumerator */
1559: for (i=minlen;i<irsize;i++) {
1560: cudaMemcpy(d_RR,d_As,sizeof(PetscComplex)*n2,cudaMemcpyDeviceToDevice);
1561: shift_Cdiagonal(n,d_RR,n,-PetscRealPartComplex(rootp[i]),-PetscImaginaryPartComplex(rootp[i]));
1562: cublasXCgemm(cublasv2handle,CUBLAS_OP_N,CUBLAS_OP_N,n,n,n,&cone,d_RR,n,d_expmA,n,&czero,d_Maux,n);
1563: SWAP(d_expmA,d_Maux,aux);
1564: SlepcLogGpuFlopsComplex(1.0*n+2.0*n*n*n);
1565: }
1566: /* extra denominator */
1567: for (i=minlen;i<ipsize;i++) {
1568: cudaMemcpy(d_RR,d_As,sizeof(PetscComplex)*n2,cudaMemcpyDeviceToDevice);
1569: shift_Cdiagonal(n,d_RR,n,-PetscRealPartComplex(rootq[i]),-PetscImaginaryPartComplex(rootq[i]));
1570: magma_Cgesv_gpu,n,n,d_RR,n,piv,d_expmA,n;
1571: SlepcLogGpuFlopsComplex(1.0*n+(2.0*n*n*n/3.0+2.0*n*n*n));
1572: }
1573: cublasXCscal(cublasv2handle,n2,&mult,d_expmA,one);
1574: SlepcLogGpuFlopsComplex(1.0*n2);
1575: PetscFree2(rootp,rootq);
1576: }
1578: #if !defined(PETSC_USE_COMPLEX)
1579: copy_array2D_C2S(n,n,d_Ba2,n,d_expmA,n);
1580: #else
1581: cudaMemcpy(d_Ba2,d_expmA,sizeof(PetscScalar)*n2,cudaMemcpyDeviceToDevice);
1582: #endif
1584: /* perform repeated squaring */
1585: for (i=0;i<s;i++) { /* final squaring */
1586: cublasXgemm(cublasv2handle,CUBLAS_OP_N,CUBLAS_OP_N,n,n,n,&sone,d_Ba2,n,d_Ba2,n,&szero,d_sMaux,n);
1587: SWAP(d_Ba2,d_sMaux,saux);
1588: PetscLogGpuFlops(2.0*n*n*n);
1589: }
1590: if (d_Ba2!=d_Ba) {
1591: cudaMemcpy(d_Ba,d_Ba2,sizeof(PetscScalar)*n2,cudaMemcpyDeviceToDevice);
1592: d_sMaux = d_Ba2;
1593: }
1594: if (shift) {
1595: expshift = PetscExpReal(shift);
1596: cublasXscal(cublasv2handle,n2,&expshift,d_Ba,one);
1597: PetscLogGpuFlops(1.0*n2);
1598: }
1600: cudaMemcpy(Ba,d_Ba,sizeof(PetscScalar)*n2,cudaMemcpyDeviceToHost);
1601: PetscLogGpuTimeEnd();
1603: /* restore pointers */
1604: d_Maux = d_Maux2; d_expmA = d_expmA2; d_RR = d_RR2;
1605: cudaFree(d_Ba);
1606: cudaFree(d_isreal);
1607: cudaFree(d_sMaux);
1608: cudaFree(d_Maux);
1609: cudaFree(d_expmA);
1610: cudaFree(d_As);
1611: cudaFree(d_RR);
1612: PetscFree(piv);
1613: PetscFree2(sMaux,Maux);
1614: MatDenseRestoreArray(A,&Aa);
1615: MatDenseRestoreArray(B,&Ba);
1616: magma_finalize();
1617: PetscFunctionReturn(0);
1618: }
1619: #endif /* PETSC_HAVE_MAGMA */
1620: #endif /* PETSC_HAVE_CUDA */
1622: PetscErrorCode FNView_Exp(FN fn,PetscViewer viewer)
1623: {
1624: PetscBool isascii;
1625: char str[50];
1626: const char *methodname[] = {
1627: "scaling & squaring, [m/m] Pade approximant (Higham)",
1628: "scaling & squaring, [6/6] Pade approximant",
1629: "scaling & squaring, subdiagonal Pade approximant (product form)",
1630: "scaling & squaring, subdiagonal Pade approximant (partial fraction)"
1631: #if defined(PETSC_HAVE_CUDA)
1632: ,"scaling & squaring, [6/6] Pade approximant CUDA"
1633: #if defined(PETSC_HAVE_MAGMA)
1634: ,"scaling & squaring, [m/m] Pade approximant (Higham) CUDA/MAGMA",
1635: "scaling & squaring, [6/6] Pade approximant CUDA/MAGMA",
1636: "scaling & squaring, subdiagonal Pade approximant (product form) CUDA/MAGMA",
1637: "scaling & squaring, subdiagonal Pade approximant (partial fraction) CUDA/MAGMA",
1638: #endif
1639: #endif
1640: };
1641: const int nmeth=sizeof(methodname)/sizeof(methodname[0]);
1643: PetscObjectTypeCompare((PetscObject)viewer,PETSCVIEWERASCII,&isascii);
1644: if (isascii) {
1645: if (fn->beta==(PetscScalar)1.0) {
1646: if (fn->alpha==(PetscScalar)1.0) PetscViewerASCIIPrintf(viewer," Exponential: exp(x)\n");
1647: else {
1648: SlepcSNPrintfScalar(str,sizeof(str),fn->alpha,PETSC_TRUE);
1649: PetscViewerASCIIPrintf(viewer," Exponential: exp(%s*x)\n",str);
1650: }
1651: } else {
1652: SlepcSNPrintfScalar(str,sizeof(str),fn->beta,PETSC_TRUE);
1653: if (fn->alpha==(PetscScalar)1.0) PetscViewerASCIIPrintf(viewer," Exponential: %s*exp(x)\n",str);
1654: else {
1655: PetscViewerASCIIPrintf(viewer," Exponential: %s",str);
1656: PetscViewerASCIIUseTabs(viewer,PETSC_FALSE);
1657: SlepcSNPrintfScalar(str,sizeof(str),fn->alpha,PETSC_TRUE);
1658: PetscViewerASCIIPrintf(viewer,"*exp(%s*x)\n",str);
1659: PetscViewerASCIIUseTabs(viewer,PETSC_TRUE);
1660: }
1661: }
1662: if (fn->method<nmeth) PetscViewerASCIIPrintf(viewer," computing matrix functions with: %s\n",methodname[fn->method]);
1663: }
1664: PetscFunctionReturn(0);
1665: }
1667: SLEPC_EXTERN PetscErrorCode FNCreate_Exp(FN fn)
1668: {
1669: fn->ops->evaluatefunction = FNEvaluateFunction_Exp;
1670: fn->ops->evaluatederivative = FNEvaluateDerivative_Exp;
1671: fn->ops->evaluatefunctionmat[0] = FNEvaluateFunctionMat_Exp_Higham;
1672: fn->ops->evaluatefunctionmat[1] = FNEvaluateFunctionMat_Exp_Pade;
1673: fn->ops->evaluatefunctionmat[2] = FNEvaluateFunctionMat_Exp_GuettelNakatsukasa; /* product form */
1674: fn->ops->evaluatefunctionmat[3] = FNEvaluateFunctionMat_Exp_GuettelNakatsukasa; /* partial fraction */
1675: #if defined(PETSC_HAVE_CUDA)
1676: fn->ops->evaluatefunctionmat[4] = FNEvaluateFunctionMat_Exp_Pade_CUDA;
1677: #if defined(PETSC_HAVE_MAGMA)
1678: fn->ops->evaluatefunctionmat[5] = FNEvaluateFunctionMat_Exp_Higham_CUDAm;
1679: fn->ops->evaluatefunctionmat[6] = FNEvaluateFunctionMat_Exp_Pade_CUDAm;
1680: fn->ops->evaluatefunctionmat[7] = FNEvaluateFunctionMat_Exp_GuettelNakatsukasa_CUDAm; /* product form */
1681: fn->ops->evaluatefunctionmat[8] = FNEvaluateFunctionMat_Exp_GuettelNakatsukasa_CUDAm; /* partial fraction */
1682: #endif
1683: #endif
1684: fn->ops->view = FNView_Exp;
1685: PetscFunctionReturn(0);
1686: }