Gradient descent: Difference between revisions

Added Algol 68
(Added Wren)
(Added Algol 68)
Line 12:
[https://books.google.co.uk/books?id=dFHvBQAAQBAJ&pg=PA543&lpg=PA543&dq=c%23+steepest+descent+method+to+find+minima+of+two+variable+function&source=bl&ots=TCyD-ts9ui&sig=ACfU3U306Og2fOhTjRv2Ms-BW00IhomoBg&hl=en&sa=X&ved=2ahUKEwitzrmL3aXjAhWwVRUIHSEYCU8Q6AEwCXoECAgQAQ#v=onepage&q=c%23%20steepest%20descent%20method%20to%20find%20minima%20of%20two%20variable%20function&f=false This book excerpt] shows sample C# code for solving this task.
<br><br>
 
=={{header|ALGOL 68}}==
{{Trans|Go}}
{{works with|ALGOL 68G|Any - tested with release 2.8.3.win32}}
There appear to be a range of answers presented here... the results calculated by this sample differ from the Go sample
but agree with the Fortran sample and the Julia sample to 6 places.
<lang algol68>PROC steepest descent = ( REF[]LONG REAL x, LONG REAL alphap, tolerance )VOID:
BEGIN
LONG REAL alpha := alphap;
LONG REAL h := tolerance;
LONG REAL g0 := g( x ); # Initial estimate of result. #
# Calculate initial gradient. #
[ LWB x : UPB x ]LONG REAL fi := grad g( x, h );
# Calculate initial norm. #
LONG REAL del g := 0.0;
FOR i FROM LWB x TO UPB x DO
del g +:= fi[ i ] * fi[ i ]
OD;
del g := long sqrt( del g );
LONG REAL b := alpha / del g;
# Iterate until value is <= tolerance. #
WHILE del g > tolerance DO
# Calculate next value. #
FOR i FROM LWB x TO UPB x DO
x[i] -:= b * fi[i]
OD;
h /:= 2;
# Calculate next gradient. #
fi := grad g( x, h );
# Calculate next norm. #
del g := 0;
FOR i FROM LWB x TO UPB x DO
del g +:= fi[ i ] * fi[ i ]
OD;
del g := long sqrt( del g );
b := alpha / del g;
# Calculate next value. #
LONG REAL g1 := g( x );
# Adjust parameter. #
IF g1 > g0 THEN
alpha /:= 2
ELSE
g0 := g1
FI
OD
END # steepest descent # ;
# Provides a rough calculation of gradient g(x). #
PROC grad g = ( []LONG REAL x, LONG REAL h )[]LONG REAL:
BEGIN
[ LWB x : UPB x ]LONG REAL z;
[ LWB x : UPB x ]LONG REAL y := x[ AT 0 ];
LONG REAL g0 := g( x );
FOR i FROM LWB x TO UPB x DO
y[ i ] +:= h;
z[ i ] := ( g( y ) - g0 ) / h
OD;
z
END # grad g # ;
# Function for which minimum is to be found. #
PROC g = ( []LONG REAL x )LONG REAL:
( x[ 0 ] - 1 )
* ( x[ 0 ] - 1 )
* long exp( - x[ 1 ] * x[ 1 ] ) + x[ 1 ] * ( x[ 1 ] + 2 )
* long exp( - 2 * x[ 0 ] * x[ 0 ] )
;
BEGIN
LONG REAL tolerance := 0.0000006;
LONG REAL alpha := 0.1;
[ 0 : 1 ]LONG REAL x := ( []LONG REAL( 0.1, -1 ) )[ AT 0 ]; # Initial guess of location of minimum. #
steepest descent( x, alpha, tolerance );
print( ( "Testing steepest descent method:", newline ) );
print( ( "The minimum is at x[0] = ", fixed( x[ 0 ], -10, 6 ), ", x[1] = ", fixed( x[ 1 ], -10, 6 ), newline ) )
END</lang>
{{out}}
<pre>
Testing steepest descent method:
The minimum is at x[0] = 0.107627, x[1] = -1.223260
</pre>
 
=={{header|Fortran}}==
3,026

edits