Gradient descent: Difference between revisions
→{{header|ALGOL 68}}: Use the actual gradient.
(→{{header|ALGOL 68}}: Use the actual gradient.) |
|||
Line 14:
=={{header|ALGOL 68}}==
{{Trans|Go}}▼
{{works with|ALGOL 68G|Any - tested with release 2.8.3.win32}}
{{Trans|Go}} modified to use the actual gradient function -
<lang algol68>PROC steepest descent = ( REF[]LONG REAL x, LONG REAL alphap, tolerance )VOID:
BEGIN
LONG REAL alpha := alphap;
LONG REAL g0 := g( x ); # Initial estimate of result. #
# Calculate initial gradient. #
[ LWB x : UPB x ]LONG REAL fi := grad g( x
# Calculate initial norm. #
LONG REAL del g := 0.0;
Line 38 ⟶ 37:
x[i] -:= b * fi[i]
OD;
h /:= 2;▼
# Calculate next gradient. #
fi := grad g( x
# Calculate next norm. #
del g := 0;
Line 47 ⟶ 45:
OD;
del g := long sqrt( del g );
g0 := g1
FI
OD
END # steepest descent # ;
# calculates the gradient of g(p). #
# The derivitives wrt x and y are (as in the Fortran sample ): #
PROC grad g = ( []LONG REAL x, LONG REAL h )[]LONG REAL:▼
# g' wrt x = 2( x - 1 )e^( - ( y^2 ) ) - 4xe^( -2( x^2) )y( y + 2 ) #
# g' wrt y = ( -2( x-1 )^2ye^( - (y^2) )) + e^(-2( x^2 ) )( y + 2 ) + e^( -2( x^2 ) )y #
BEGIN
[ LWB
LONG REAL
- 4 * x * long exp( -2 * ( x * x ) ) * y
+ long exp( -2 * x * x ) * ( y + 2 )
+ long exp( -2 * x * x ) * y;
z
END # grad g # ;
# Function for which minimum is to be found. #
# g( x, y ) = ( ( x - 1 )^2 )e^( - ( x^2 ) ) + y( y + 2 )e^( - 2(x^2)) #
PROC g = ( []LONG REAL x )LONG REAL:
( x[ 0 ] - 1 )
|