//+------------------------------------------------------------------+
//| MLP_cannonball.mq4 |
//| Copyright © 2008, MetaQuotes Software Corp. |
//| http://www.metaquotes.net |
//+------------------------------------------------------------------+
#property copyright "Copyright © 2008, MetaQuotes Software Corp."
#property link "http://www.metaquotes.net"
/* This program implements a simple Multi Layer Perceptron to solve the canonball problem (regression)
Adapted by Sebastien Marcel (2003-2004) from D. Collobert
The goal is to estimate the position X of a bullet at time t (fixed) given the initial speed (v) and angle (a)
y
/ \
|
|
|
| . X
| .
| .
| .
| .
| .
| .
| .
| .
| .
|. (v,a)
--------------------------> x
*/
#define PI 3.14159265358979323846
#define RAND_MAX 32767.0
//
// Datasets
#define N_PATTERNS_TRAIN 500
#define N_PATTERNS_TEST 500
#define N_PATTERNS_TRAIN_TEST 1000 //N_PATTERNS_TRAIN + N_PATTERNS_TEST
//*****************************
// Inputs
//
double DataIn.v[N_PATTERNS_TRAIN_TEST]; // speed of bullet
double DataIn.a[N_PATTERNS_TRAIN_TEST]; // angle of bullet
// X[N_PATTERNS_TRAIN + N_PATTERNS_TEST];
//*****************************
// Targets
//
double DataOut.x[N_PATTERNS_TRAIN_TEST]; // x position at time t
double DataOut.y[N_PATTERNS_TRAIN_TEST]; // y position at time t
// Y[N_PATTERNS_TRAIN + N_PATTERNS_TEST];
//*****************************
// to create the data
//
double vmax;
double gravity;
//*****************************
// to train the MLP
//
double lambda = 0.01; // learning rate
double mu = 0.6; // inertia momentum rate
double mse_min = 0.001; // minimum Mean Squared Error
int max_iterations = 500; // maximum number of iterations
//*****************************
// MLP data
//
#define N_HU 3
#define N_HU1 4 //N_HU + 1
// weights between hidden neurons and inputs
double w1[N_HU1][3];
double w1old[N_HU1][3];
// weights between outputs and hidden neurons
double w2[2][N_HU1];
double w2old[2][N_HU1];
// values of integration function
double aHidden[N_HU1];
double aOutput[2];
// values of transfert function
double yHidden[N_HU1];
// constant value of bias
double xBias = 1.0;
int init()
{
vmax = 1.0/10;
gravity = 1.0/200;
return(0);
}
//+------------------------------------------------------------------+
//| script program start function |
//+------------------------------------------------------------------+
int start()
{
//----
main();
//----
return(0);
}
//+------------------------------------------------------------------+
//*****************************
// Sigmoid transfer function
//
double f(double x)
{
return (1.0 /(1.0 + MathExp(-x)));
}
//*****************************
// Derivative of Sigmoid
//
double f_prime(double x)
{
double z = MathExp(-x);
double one_plus_z = 1.0 + z;
return (z / (one_plus_z * one_plus_z));
}
//*****************************
// Compute the x position of bullet at time t
//
double xpos(double t, double v, double teta)
{
return( v * t * MathCos(teta*PI/180.0));
}
//*****************************
// Compute the y position of bullet at time t
//
double ypos(double t, double v, double teta)
{
return( -0.5 * gravity * t * t + v * t * MathSin(teta*PI/180.0));
}
//*****************************
// Compute the MSE
//
double MSE(
double a.x,
double a.y,
double b.x,
double b.y
)
{
return (0.5*((a.x - b.x)*(a.x - b.x) + (a.y - b.y)*(a.y - b.y)));
}
//use this first function to seed the random number generator,
//call this before any of the other functions
void initrand()
{
//srand((unsigned)(time(0)));
MathSrand(TimeLocal());
}
//generates a psuedo-random double between 0.0 and 0.999...
double randdouble0()
{
return (MathRand()/((RAND_MAX)+1));
}
//generates a psuedo-random double between min and max
double randdouble2(double min, double max)
{
if (min>max)
{
return (randdouble0()*(min-max)+max);
}
else
{
return (randdouble0()*(max-min)+min);
}
}
// Random generator
double Random(double inf, double sup)
{
return(randdouble2(inf,sup));
}
//*****************************
// Create the MLP
//
void createMLP()
{
int i, j;
for (i = 0; i<=2; i++)
{
for (j = 0; j<= N_HU; j++)
{
w1[j][i] = Random(-1.0, 1.0);
w1old[j][i] = w1[j][i];
}
}
for (i = 0; i<2; i++)
{
for (j = 0; j<= N_HU; j++)
{
w2[i][j] = Random(-1.0, 1.0);
w2old[i][j] = w2[i][j];
}
}
}
//*****************************
// Create the datasets
//
void createDatasets()
{
int i;
i = 0;
while (i < (N_PATTERNS_TRAIN + N_PATTERNS_TEST))
{
DataIn.v[i] = (Random(0.0, vmax)) / vmax;
DataIn.a[i] = (Random(0.0, 90.0)) / 90.0;
DataOut.x[i] = xpos(10, DataIn.v[i] * vmax, (DataIn.a[i])*90.0);
DataOut.y[i] = ypos(10, DataIn.v[i] * vmax, (DataIn.a[i])*90.0);
if (DataOut.y[i] > 0.0) i = i + 1;
}
}
//*****************************
// Forward the input in the MLP
//
void forward(
double In.v,
double In.a,
double &out.x,
double &out.y
)
{
int i;
aHidden[0] = xBias;
yHidden[0] = xBias;
for (i = 1; i <= N_HU; i++)
{
aHidden[i] = xBias*w1[i][0] + (In.v)*w1[i][1] + (In.a)*w1[i][2];
yHidden[i] = f(aHidden[i]);
}
aOutput[0] = 0.0;
aOutput[1] = 0.0;
for (i = 0; i <= N_HU; i++)
{
aOutput[0] = aOutput[0] + yHidden[i]*w2[0][i];
aOutput[1] = aOutput[1] + yHidden[i]*w2[1][i];
}
out.x = f(aOutput[0]);
out.y = f(aOutput[1]);
}
//*****************************
// Backward (back-propagate the gradient of error)
//
void backward(
double Yestimate.x,
double Yestimate.y,
double Outwant.x,
double Outwant.y,
double In.v,
double In.a,
double lambda,
double mu
)
{
double wnew, Goutput[2], Ghidden[N_HU1];
int i,j;
//
Goutput[0] = (Yestimate.x - Outwant.x) * f_prime(aOutput[0]);
Goutput[1] = (Yestimate.y - Outwant.y) * f_prime(aOutput[1]);
//
for (i = 0; i<=N_HU; i++)
Ghidden[i] = (Goutput[0]*w2[0][i] + Goutput[1]*w2[1][i])*f_prime(aHidden[i]);
//
for (j = 0; j<2; j++)
{
for (i = 0; i<=N_HU; i++)
{
wnew = w2[j][i] - lambda * yHidden[i] * Goutput[j] + mu * (w2[j][i] - w2old[j][i]);
w2old[j][i] = w2[j][i];
w2[j][i] = wnew;
}
}
//
for (i = 0; i<=N_HU; i++)
{
wnew = w1[i][0] - lambda * xBias * Ghidden[i] + mu * (w1[i][0] - w1old[i][0]);
w1old[i][0] = w1[i][0];
w1[i][0] = wnew;
wnew = w1[i][1] - lambda * In.v * Ghidden[i] + mu * (w1[i][1] - w1old[i][1]);
w1old[i][1] = w1[i][1];
w1[i][1] = wnew;
wnew = w1[i][2] - lambda * In.a * Ghidden[i] + mu * (w1[i][2] - w1old[i][2]);
w1old[i][2] = w1[i][2];
w1[i][2] = wnew;
}
}
//*****************************
// Stochastic gradient training
//
double train(int P)
{
double Yestimate.x;
double Yestimate.y;
double mse_;
double mse_total;
mse_total = 0.0;
// For each train patterns
for(int p = 0 ; p < P ; p++)
{
// Forward current train pattern into the MLP
forward(
//X[p],
DataIn.v[p],
DataIn.a[p],
//&Yestimate
Yestimate.x,
Yestimate.y
);
// Computes the MSE
mse_ = MSE(
//Y[p],
DataOut.x[p],
DataOut.y[p],
//Yestimate
Yestimate.x,
Yestimate.y
);
// Accumulate the MSE
mse_total = mse_total + mse_;
// Backward MSE gradient
backward(
//Yestimate,
Yestimate.x,
Yestimate.y,
//Y[p],
DataOut.x[p], //Outwant
DataOut.y[p], //Outwant
//X[p],
DataIn.v[p],
DataIn.a[p],
lambda,
mu
);
}
// Return normalized train MSE
double P1=P;
return (mse_total /P1);
}
//
int main()
{
double mse_train;
double mse_test;
double mse_;
double Yestimate.x;
double Yestimate.y;
//*****************************
//
createMLP();
//*****************************
//
createDatasets();
//for (int i = 0; i< N_PATTERNS_TRAIN; i++) printf(" TRN: x=[%f %f] y=[%f %f]\n", X[i].v, X[i].a, Y[i].x, Y[i].y);
//for (int i = N_PATTERNS_TRAIN; i<(N_PATTERNS_TRAIN + N_PATTERNS_TEST); i++) printf(" TST: x=[%f %f] y=[%f %f]\n", X[i].v, X[i].a, Y[i].x, Y[i].y);
Print("Stochastic gradient training:");
//
int pf_train = FileOpen("mse_train_mt4.txt",FILE_CSV|FILE_WRITE,' ');//FileOpen("mse_train.txt", "w");
int pf_test = FileOpen("mse_test_mt4.txt",FILE_CSV|FILE_WRITE,' ');//FileOpen("mse_test.txt", "w");
int iter;
for(iter = 1 ; iter <= max_iterations ; iter++)
{
//*****************************
//
// train
mse_train = train(N_PATTERNS_TRAIN);
FileWrite(pf_train,mse_train);
//*****************************
//
// test
mse_test = 0.0;
// For each test pattern
for(int i = N_PATTERNS_TRAIN ; i < (N_PATTERNS_TRAIN + N_PATTERNS_TEST) ; i++)
{
Yestimate.x = 0.0;
Yestimate.y = 0.0;
// forward current pattern into the MLP
//forward(X[i], &Yestimate);
forward(
//X[p],
DataIn.v[i],
DataIn.a[i],
//&Yestimate
Yestimate.x,
Yestimate.y
);
// computes MSE
//mse_ = MSE(Y[i], Yestimate);
mse_ = MSE(
//Y[p],
DataOut.x[i],
DataOut.y[i],
//Yestimate
Yestimate.x,
Yestimate.y
);
// accumulate MSE
mse_test += mse_;
}
// Normalize the MSE
mse_test /= N_PATTERNS_TEST;
FileWrite(pf_test,mse_test);
Print("->",iter);
//fflush(stdout);
if (mse_train < mse_min) break;
}
Print("\n");
//
FileClose(pf_test);
FileClose(pf_train);
//*****************************
// Print info about training
//
Print("Number of iterations = ", iter);
Print("Final MSE train = ", mse_train);
Print("Final MSE test = ", mse_test);
Print("End of program.");
return (0);
}
Comments