package integrators; import main.*; /** The SemiExplicitAPCIntegrator implements a custom-modified fourth-order Adams-Bashforth-Moulton predictor-corrector method, with a special way of writing the corrector formula that, dependent on the parameters of the underlying equations, allows the fixed-point iteration to converge for larger values of the partial derivatives of the system than would be possible for the straight Adams method. In a nutshell, all those terms of the derivative at y(t+h) that happen to be linear in y sub i are moved to the left hand side of the formula; then the formula is re- solved for y(t+h) to yield a partially-explicit formula. The advantage of this is that the coefficients of terms linear in y sub i now appear in the denominator of the right hand side, thus improving the likelihood that fixed-point iteration will converge for a large time step. In practice, this Integrator is typically able to take much larger time steps than the straight Adams method (even as implemented in AdamsIntegrator where convergence is assisted with a mild trick). On the other hand, if the coefficients of linear terms are not large, the Cash-Karp integration method generally takes larger time steps (although this method remains competetive because it uses fewer derivative evaluations). Like the AdamsIntegrator, this Integrator uses fourth-order Runge-Kutta steps to start off the integration or to restart it internally. See the documentation for the AdamsIntegrator for a description of the basic Adams-Bashforth-Moulton method. */ public class SemiExplicitAPCIntegrator extends Integrator { final String type = "SemiExplicitAPC"; // Coefficients for the Adams-Bashforth predictor (Ps) and Adams-Moulton (Cs) corrector, // also the coefficients for interpolations to halve step size, and estimating the error, // as given in Melvin Maron, Numerical Analysis: A Practical Approach c. 1982, p. 354-7. final float P1 = -9f/24f, P2 = 37f/24f, P3 = -59f/24f, P4 = 55f/24f; final float C1 = 1f/24f, C2 = -5f/24f, C3 = 19f/24f, C4 = 9f/24f; final float [] A = {-5f/128f, 28f/128f, -70f/128f, 140f/128f, 35f/128f}; final float [] B = {3f/64f, -16f/64f, 54f/64f, 24f/64f, -1f/64f}; final float D = 19f/270f; final float GAMMA = 1f/2f; final int MAXIT = 4; final float INIT_STEP = 0.02f; private float maxTimeStep=10f; // wishful thinking... private boolean started=false; private int countdown=0, countup=0; NodeValues [] yT; // current state NodeValues [] yP, yC, yErr; // prediction and corrected estimate NodeValues [][] f; // derivatives at t-6 through t NodeValues [] fhat; // f without terms linear in y sub i NodeValues [] dfdyi; // partial of f w.r.t. y sub i; coefficients of terms linear in y sub i NodeValues [][] m; // intermediates in Runge-Kutta steps NodeValues [] yStable; // used to store the most recent stable estimate of y(t+1), i.e. if restarting fails // named indices into f final int Tminus6 = 0, Tminus5 = 1, Tminus4 = 2, Tminus3 = 3, Tminus2 = 4, Tminus1 = 5, Tzero = 6; public SemiExplicitAPCIntegrator() { } public void init(Model model) { super.init(model); errorTolerance = 0.0001f; yT = new NodeValues[num_cells * num_nodes]; yP = new NodeValues[num_cells * num_nodes]; yC = new NodeValues[num_cells * num_nodes]; yErr = new NodeValues[num_cells * num_nodes]; f = new NodeValues[7][num_cells * num_nodes]; fhat = new NodeValues[num_cells * num_nodes]; dfdyi = new NodeValues[num_cells * num_nodes]; m = new NodeValues[4][num_cells * num_nodes]; yStable = new NodeValues[num_cells * num_nodes]; for(int i = 0; i < num_cells; i++) { for(int j = 0; j < num_nodes; j++) { int k = (i * num_nodes) + j; yT[k] = new NodeValues(); yT[k].numSides = model.cells[0].nodes[j].numSides; yP[k] = new NodeValues(); yP[k].numSides = model.cells[0].nodes[j].numSides; yC[k] = new NodeValues(); yC[k].numSides = model.cells[0].nodes[j].numSides; yErr[k] = new NodeValues(); yErr[k].numSides = model.cells[0].nodes[j].numSides; for(int l = 0; l < 7; l++) { f[l][k] = new NodeValues(); f[l][k].numSides = model.cells[0].nodes[j].numSides; } fhat[k] = new NodeValues(); fhat[k].numSides = model.cells[0].nodes[j].numSides; dfdyi[k] = new NodeValues(); dfdyi[k].numSides = model.cells[0].nodes[j].numSides; for(int l = 0; l < 4; l++) { m[l][k] = new NodeValues(); m[l][k].numSides = model.cells[0].nodes[j].numSides; } yStable[k] = new NodeValues(); yStable[k].numSides = model.cells[0].nodes[j].numSides; } } } public void reset() { started = false; eEst = 0; timestep = defaultStepsize; } public float IntegrateOneStep() { float delta = 0f, idealstep = 0f; float stepTaken = 0; int i,j,k,l; boolean done = true; int iterCount = 0; if(countdown > 0) countdown--; // this tells whether we can halve the step size yet; // decrement it now, because if the step size actually // changes during this call, countdown is reset. if(!started) { stepTaken += StartIntegrationRun(); // load up f to start the method off... } // Adams-Bashforth formula computes explicit prediction of y at t+1.... getCurrentValues(yT); getDerivatives(f[Tzero]); for(i = 0; i < num_cells; i++) { for(j = 0; j < num_nodes; j++) { k = (i*num_nodes)+j; for(l = 0; l < yP[k].numSides; l++) { yP[k].values[l] = yT[k].values[l] + timestep * ( P1 * f[Tminus3][k].values[l] + P2 * f[Tminus2][k].values[l] + P3 * f[Tminus1][k].values[l] + P4 * f[Tzero][k].values[l] ); } } } setIntegrationValues(yP); // implicit Adams-Moulton formula computes corrected estimate of y at t+1 getFhatAndDfdyi(fhat, dfdyi); for(i = 0; i < num_cells; i++) { for(j = 0; j < num_nodes; j++) { k = (i*num_nodes)+j; for(l = 0; l < yP[k].numSides; l++) { yC[k].values[l] = ((yT[k].values[l] + timestep * ( C1 * f[Tminus2][k].values[l] + C2 * f[Tminus1][k].values[l] + C3 * f[Tzero][k].values[l] + C4 * fhat[k].values[l] )) / (1f - C4 * timestep * dfdyi[k].values[l])) ; yErr[k].values[l] = Math.abs(yC[k].values[l] - yP[k].values[l]); delta += yErr[k].values[l] * yErr[k].values[l]; } } } iterCount++; delta = D * (float)Math.pow((double)delta,0.5); eEst = delta; if(eEst/errorTolerance > 1f) { done = false; do { setIntegrationValues(yC); float eta = 0; float yCold = 0; // another Adams-Moulton step to improve estimate of y at t+1 getFhatAndDfdyi(fhat, dfdyi); for(i = 0; i < num_cells; i++) { for(j = 0; j < num_nodes; j++) { k = (i*num_nodes)+j; for(l = 0; l < yP[k].numSides; l++) { yCold = yC[k].values[l]; yC[k].values[l] = ((yT[k].values[l] + timestep * ( C1 * f[Tminus2][k].values[l] + C2 * f[Tminus1][k].values[l] + C3 * f[Tzero][k].values[l] + C4 * fhat[k].values[l] )) / (1f - C4 * timestep * dfdyi[k].values[l])); yErr[k].values[l] = Math.abs(yC[k].values[l] - yCold); eta += yErr[k].values[l] * yErr[k].values[l]; } } } iterCount++; eta = (float)Math.pow((double)eta,0.5); if(eta <= errorTolerance) done = true; } while(iterCount < MAXIT && !done); } if(!done) { setIntegrationValues(yT); HalveStep(); // this doesn't check the countdown, just recursively halve the step until we get into a comfortable zone stepTaken += IntegrateOneStep(); } else { delta = 0; for(i = 0; i < num_cells; i++) { for(j = 0; j < num_nodes; j++) { k = (i*num_nodes)+j; for(l = 0; l < yP[k].numSides; l++) { yErr[k].values[l] = Math.abs(yC[k].values[l] - yP[k].values[l]); delta += yErr[k].values[l] * yErr[k].values[l]; } } } delta = D * (float)Math.pow((double)delta,0.5); idealstep = timestep * (float)Math.pow((double)((GAMMA * (errorTolerance))/delta),(1.0/5.0)); if(countup == 0 && delta <= errorTolerance/4f && idealstep > 2f*timestep && iterCount <= MAXIT/2) { // accept the step but try a larger step the easy way next time stepTaken += timestep; DoubleStep(); setFinalValues(yC); setIntegrationValues(yC); } else if(delta <= errorTolerance && iterCount > 1+MAXIT/2) { stepTaken += timestep; setFinalValues(yC); setIntegrationValues(yC); HalveStep(); // reduce for next time RotateF(); } else if(delta > errorTolerance) { // we'll reject this step and make a correction setIntegrationValues(yT); // reset the model to y(t)... if(countdown == 0 && idealstep > timestep/2f) { // we're free to simply halve the time step and try that HalveStep(); stepTaken += IntegrateOneStep(); } else { // oh well, we halved the timestep too recently, or the ideal step is too small, have to Restart() stepTaken += Restart(Math.min(timestep/10f,idealstep)); } } else { // accept this step and leave the timestep alone stepTaken += timestep; RotateF(); setFinalValues(yC); setIntegrationValues(yC); if(countup > 0) countup--; // one less step before we can try to double again... } } return stepTaken; } private void DoubleStep() { int i,j,k; // f[Tminus1] remains the same; f[Tminus2] = f[Tminus3]; f[Tminus3] = f[Tminus5]; f[Tminus4] = new NodeValues[num_cells * num_nodes]; f[Tminus5] = new NodeValues[num_cells * num_nodes]; f[Tminus6] = new NodeValues[num_cells * num_nodes]; for(i = 0; i < num_cells; i++) { for(j = 0; j < num_nodes; j++) { k = (i*num_nodes)+j; f[Tminus4][k] = new NodeValues(); f[Tminus4][k].numSides = yT[k].numSides; f[Tminus5][k] = new NodeValues(); f[Tminus5][k].numSides = yT[k].numSides; f[Tminus6][k] = new NodeValues(); f[Tminus6][k].numSides = yT[k].numSides; } } timestep *= 2f; countdown = 1; // since f(t-3) to f(t) all have real values, we only need 1 pass before we can halve the timestep... countup = 4; // ...but take at least 4 steps before doubling again } private void HalveStep() { int i,j,k,l; NodeValues [] a = new NodeValues[num_cells * num_nodes]; NodeValues [] b = new NodeValues[num_cells * num_nodes]; for(i = 0; i < num_cells; i++) { for(j = 0; j < num_nodes; j++) { k = (i*num_nodes)+j; a[k] = new NodeValues(); a[k].numSides = yT[k].numSides; b[k] = new NodeValues(); b[k].numSides = yT[k].numSides; } } for(i = 0; i < num_cells; i++) { for(j = 0; j < num_nodes; j++) { k = (i*num_nodes)+j; for(l = 0; l < a[k].numSides; l++) { a[k].values[l] = A[0] * f[Tminus4][k].values[l] + A[1] * f[Tminus3][k].values[l] + A[2] * f[Tminus2][k].values[l] + A[3] * f[Tminus1][k].values[l] + A[4] * f[Tzero][k].values[l]; b[k].values[l] = B[0] * f[Tminus4][k].values[l] + B[1] * f[Tminus3][k].values[l] + B[2] * f[Tminus2][k].values[l] + B[3] * f[Tminus1][k].values[l] + B[4] * f[Tzero][k].values[l]; } } } f[Tminus4] = f[Tminus2]; f[Tminus3] = b; f[Tminus2] = f[Tminus1]; f[Tminus1] = a; timestep /= 2f; countup = 4; // take at least 4 steps before trying to DOUBLE; IntegrateOneStep countdown = 4; // will restart using RK4 if the error is still too high. } private void RotateF() { NodeValues [] temp; temp = f[Tminus6]; f[Tminus6] = f[Tminus5]; f[Tminus5] = f[Tminus4]; f[Tminus4] = f[Tminus3]; f[Tminus3] = f[Tminus2]; f[Tminus2] = f[Tminus1]; f[Tminus1] = f[Tzero]; f[Tzero] = temp; } private float StartIntegrationRun() { float starterStep = 0; float intervalCrossed = 0; this.timestep = INIT_STEP; getCurrentValues(yT); // ...now get our first real position... getDerivatives(m[0]); // ...and the derivative... copyNVarray(m[0],f[Tminus6]); // ...copy it into f[0]... for(int i = 1; i <= Tzero; i++) { TakeRK4Step(timestep); // ...take each RK4 step... copyNVarray(m[0],f[i]); // ...and copy the result into the next f[] position. intervalCrossed += timestep; } countdown = 0; countup = 0; started = true; return intervalCrossed; } // This restarts the method using the fourth-order Runge-Kutta method... takes 6 fixed // steps starting from yT and stores the derivatives in the f[][] array. private float Restart(float ts) { copyNVarray(yT,yStable); // Store y(t) so we can go back if the Restart fails... copyNVarray(f[Tzero],m[0]); // ...re-use the already-computed derivative at y(t)... copyNVarray(f[Tzero],f[Tminus6]); // ...also copy it into f(t-6)... for(int i = 1; i <= Tzero; i++) { TakeRK4Step(ts); // ...take each RK4 step... copyNVarray(m[0],f[i]); // ...and copy the derivative into the next f[] position. } countdown = 0; // If we need to the timestep can be cut immediately,... countup = 4; // ...but take a breather before trying a double. timestep = ts; return 6f*ts; } private void TakeRK4Step(float ts) { int i,j,k,l; // If this is the first RK4 step, m[0] is a copy made in Restart(float) of f[Tzero], // which was already evaluated in IntegrateOneStep(). Otherwise, if this is the next // in a series of RK4 steps, m[0] will have been set at the end of the previous call // to TakeRK4Step(float). // First we use yP to store intermediate estimates as we compute m[1-3]. for(i = 0; i < num_cells; i++) { for(j = 0; j < num_nodes; j++) { k = (i*num_nodes)+j; for(l = 0; l < yP[k].numSides; l++) { yP[k].values[l] = yT[k].values[l] + ((ts/2f) * m[0][k].values[l]); } } } setIntegrationValues(yP); getDerivatives(m[1]); for(i = 0; i < num_cells; i++) { for(j = 0; j < num_nodes; j++) { k = (i*num_nodes)+j; for(l = 0; l < yP[k].numSides; l++) { yP[k].values[l] = yT[k].values[l] + ((ts/2f) * m[1][k].values[l]); } } } setIntegrationValues(yP); getDerivatives(m[2]); for(i = 0; i < num_cells; i++) { for(j = 0; j < num_nodes; j++) { k = (i*num_nodes)+j; for(l = 0; l < yP[k].numSides; l++) { yP[k].values[l] = yT[k].values[l] + (ts * m[2][k].values[l]); } } } setIntegrationValues(yP); getDerivatives(m[3]); // Next use yP to store the new estimate of the endpoint for(i = 0; i < num_cells; i++) { for(j = 0; j < num_nodes; j++) { k = (i*num_nodes)+j; for(l = 0; l < yP[k].numSides; l++) { yP[k].values[l] = yT[k].values[l] + ((ts/6f) * ( m[0][k].values[l] + 2f*(m[1][k].values[l] + m[2][k].values[l]) + m[3][k].values[l] )); } } } // Set our result... setFinalValues(yP); setIntegrationValues(yP); // ...and restock for next time getCurrentValues(yT); getDerivatives(m[0]); } protected void getFhatAndDfdyi(NodeValues [] fhat_vals, NodeValues [] dfdyi_vals) { for(int i = 0; i < num_cells; i++) { for(int j = 0; j < num_nodes; j++) { model.cells[i].nodes[j].getSeparatedDeriv(fhat_vals[(i*num_nodes)+j].values, dfdyi_vals[(i*num_nodes)+j].values); } } } }