Gradient descent: Difference between revisions

Content added Content deleted
m (→‎{{header|Perl 6}}: more mindless golfing)
Line 202: Line 202:
use v6.d;
use v6.d;


sub steepestDescent(@x, $alpha is copy, $tolerance) {
sub steepestDescent(@x, $alpha is copy, $h is copy) {
my \N = +@x ; my $h = $tolerance ;
my $g0 = g(@x) ; # Initial estimate of result.


my @fi = gradG(@x, $h) ; # Calculate initial gradient
my $g0 = g(@x) ; # Initial estimate of result.


# Calculate initial norm.
my @fi = gradG(@x, $h, $g0) ; # Calculate initial gradient
my $b = $alpha / (my $delG = (map {@fi[$_]²}, ^N).sum ).sqrt;


# Calculate initial norm.
while ( $delG > $tolerance ) { # Iterate until value is <= tolerance.
my $b = $alpha / sqrt(my $delG = sum(map {$_²}, @fi));
# Calculate next value.
for ^N { @x[$_] -= $b * @fi[$_] }
$h /= 2;


@fi = gradG(@x, $h); # Calculate next gradient.
while ( $delG > $h ) { # Iterate until value is <= tolerance.


# Calculate next norm.
for @fi.kv -> $i, $j { @x[$i] -= $b * $j } # Calculate next value.
$b = $alpha / ($delG = (map {@fi[$_]²}, ^N).sum ).sqrt;


my $g1 = g(@x); # Calculate next value.
@fi = gradG(@x, $h /= 2, g(@x)); # Calculate next gradient.


$g1 > $g0 ?? ( $alpha /= 2 ) !! ( $g0 = $g1 ) # Adjust parameter.
$b = $alpha / sqrt($delG = sum(map {$_²}, @fi) ); # Calculate next norm.

}
my $g1 = g(@x); # Calculate next value.

$g1 > $g0 ?? ( $alpha /= 2 ) !! ( $g0 = $g1 ) # Adjust parameter.
}
}
}


sub gradG(@x, $h) { # Provides a rough calculation of gradient g(x).
sub gradG(@x is copy, $h, $g0) { # gives a rough calculation of gradient g(x).
my \N = +@x ; my @y = @x ; my $g0 = g(@x) ;
return map { $_ += $h ; (g(@x) - $g0) / $h }, @x
return map { @y[$_] += $h ; (g(@y) - $g0) / $h }, ^N
}
}


# Function for which minimum is to be found.
# Function for which minimum is to be found.
sub g(\x) { (x[0]-1)² * (-x[1]²).exp + x[1]*(x[1]+2) * (-2*x[0]²).exp }
sub g(\x) { (x[0]-1)² * exp(-x[1]²) + x[1]*(x[1]+2) * exp(-2*x[0]²) }