Gradient descent: Difference between revisions

m
→‎{{header|Perl 6}}: more mindless golfing
m (→‎{{header|Perl 6}}: more mindless golfing)
Line 202:
use v6.d;
 
sub steepestDescent(@x, $alpha is copy, $toleranceh is copy) {
my \N = +@x ; my $h = $tolerance ;
my $g0 = g(@x) ; # Initial estimate of result.
 
my @fi$g0 = gradGg(@x, $h) ; # Initial Calculateestimate initialof gradientresult.
 
my @fi = gradG(@x, $h, $g0) ; # Calculate initial norm.gradient
my $b = $alpha / (my $delG = (map {@fi[$_]²}, ^N).sum ).sqrt;
 
# Calculate nextinitial valuenorm.
while ( $delG > $tolerance ) { # Iterate until value is <= tolerance.
my $b = $alpha / sqrt(my $delG = sum(map {@fi[$_]²}, ^N@fi).sum ).sqrt;
# Calculate next value.
for ^N { @x[$_] -= $b * @fi[$_] }
$h /= 2;
 
while ( $delG @fi = gradG(@x,> $h ); { # CalculateIterate nextuntil value is <= gradienttolerance.
 
for @fi.kv -> $i, $j { @x[$i] -= $b * $j } # Calculate next normvalue.
$b = $alpha / ($delG = (map {@fi[$_]²}, ^N).sum ).sqrt;
 
@fi my= gradG(@x, $g1h /= 2, g(@x)); # Calculate next valuegradient.
 
$g1b >= $g0alpha ??/ sqrt( $alphadelG /= 2 ) !! sum(map {$g0_²}, = $g1@fi) ) ; # AdjustCalculate next parameternorm.
 
}
my $g0g1 = g(@x) ; # Initial estimateCalculate ofnext resultvalue.
 
$g1 > $g0 ?? ( $alpha /= 2 ) !! ( $g0 = $g1 ) # Adjust parameter.
}
}
 
sub gradG(@x is copy, $h, $g0) { # Providesgives a rough calculation of gradient g(x).
myreturn \Nmap ={ $_ +@x= $h ; my @y = (g(@x) ; my- $g0) =/ $h }, g(@x) ;
return map { @y[$_] += $h ; (g(@y) - $g0) / $h }, ^N
}
 
# Function for which minimum is to be found.
sub g(\x) { (x[0]-1)² * exp(-x[1]²).exp + x[1]*(x[1]+2) * exp(-2*x[0]²).exp }
 
 
354

edits