DEFUN_DLD ( pso_conversion_code, args, nargout,
"-*- texinfo -*-\n\
@deftypefn {Function File} {} pso_conversion_code (@var{target_val,max_lambda})\n\
The output of this test function should be half the input target_val.\n\
@end deftypefn" )
{
octave_value_list retval_list ;
int nargin = args.length () ;
// check the number of input arguments
if ( nargin != 2 )
{
error ( "Invalid number of arguments." ) ;
return retval_list ;
}
if ( args(0).length () != 1 )
{
error ( "Invalid target_val. Should be a single value." ) ;
return retval_list ;
}
if ( args(1).length () != 1 )
{
error ( "Invalid max_lambda value. Should be a single value for the maximum 'guess'." ) ;
return retval_list ;
}
if (error_state)
{
error ( "Invalid arguments." ) ;
return retval_list ;
}
// end of input checking
double target_val = args(0).double_value() ;
double max_lambda = args(1).double_value() ;
double loocv_value ;
// the the pso algorithm
int no_iterations_until_cease = 25 ;
int no_of_particles = 100 ;
double global_best = std::numeric_limits::infinity() ;
double global_best_lambda = 0.0 ; // initially set to unregularised
ColumnVector local_best( no_of_particles , 1 ) ; local_best.fill( global_best ) ;
ColumnVector local( no_of_particles , 1 ) ;
ColumnVector local_best_so_far( no_of_particles , 1 ) ;
ColumnVector velocity( no_of_particles , 1 ) ; velocity.fill( 0.0 ) ; // particle velocity vector
// A Mersenne Twister random number generator can be declared with a simple from the #include "MersenneTwister.h"
MTRand mtrand1 ;
// values for the random updating process
double r1 ;
double r2 ;
// an inertial constant. Good values are usually slightly less than 1. Or it could be randomly initialized for each particle.
double w_ic = 0.9 ;
// c1 and c2 are constants that say how much the particle is directed towards good positions. They represent a "cognitive" and a "social" component, respectively,
// in that they affect how much the particle's personal best and the global best (respectively) influence its movement. Usually we take c_1, c_2 approx = 2.
// Or they could be randomly initialized for each particle.
double c1 = 2.0 ;
double c2 = 2.0 ;
// fill the local vector with initial random values < max_lambda, temporarily using r1
for ( octave_idx_type ii (0) ; ii < no_of_particles ; ii++ )
{
r1 = mtrand1.randDblExc () ;
local(ii) = r1 * max_lambda ;
}
int while_counter = 0 ;
while ( while_counter < no_iterations_until_cease )
{
// loop once over local_best and local vectors
for ( octave_idx_type jj (0) ; jj < no_of_particles ; jj++ )
{
// Replace this code box as necessary
//*************************************************************//
// //
// fitness function evaluation //
loocv_value = local(jj) * local(jj) - target_val * local(jj) ; //
// //
//*************************************************************//
// check if the local_best has improved
if ( loocv_value < local_best(jj) )
{
// update local_best and local_best_so_far vector if it has
local_best(jj) = loocv_value ;
local_best_so_far(jj) = local(jj) ;
}
// check if the above local_best has also improved the global_best
if ( local_best(jj) < global_best )
{
// update global_best and global_best_lambda if it has
global_best = local_best(jj) ;
global_best_lambda = local_best_so_far(jj) ;
while_counter = 0 ;
}
} // end of loop once over local_best and local vectors
// now update the particle velocity and position vectors
for ( octave_idx_type jj (0) ; jj < no_of_particles ; jj++ )
{
r1 = mtrand1.randDblExc () ;
r2 = mtrand1.randDblExc () ;
velocity(jj) = w_ic * velocity(jj) + c1 * r1 * ( local_best_so_far(jj) - local(jj) ) + c2 * r2 * ( global_best - local(jj) ) ;
local(jj) = local(jj) + velocity(jj) ;
} // end of particle velocity and position vectors updates loop
while_counter += 1 ;
} // end of main while loop
retval_list(1) = global_best ;
retval_list(0) = global_best_lambda ;
return retval_list ;
} // end of function
In the "commented" function section there is the same test function as in the vectorised code in my previous post. In real life application, of course, this code would be replaced - the above is just a test of my conversion of the pso algorithm from Octave to C++ code.I've been looking at pso because I think I can easily use it as a simple Hyperparameter optimisation tool to tune the regularisation of the weights in my proposed neural net trading system. The reason I chose the fitness function I did in the above code is simply that it has a global minimum, which is what neural net training is all about - minimisation of an error function.
No comments:
Post a Comment