Gradient descent: Difference between revisions

Content added Content deleted
(Added Perl example)
Line 209: Line 209:


# Calculate initial norm.
# Calculate initial norm.
my $delG = 0;
my $b = $alpha / (my $delG = (map {@fi[$_]²}, ^N).sum ).sqrt;
for ^N { $delG += @fi[$_]² }
my $b = $alpha / $delG.sqrt;


while ( $delG > $tolerance ) { # Iterate until value is <= tolerance.
while ( $delG > $tolerance ) { # Iterate until value is <= tolerance.
Line 221: Line 219:


# Calculate next norm.
# Calculate next norm.
$delG = 0;
$b = $alpha / ($delG = (map {@fi[$_]²}, ^N).sum ).sqrt;
for ^N { $delG += @fi[$_]² }
$b = $alpha / $delG.sqrt;


my $g1 = g(@x); # Calculate next value.
my $g1 = g(@x); # Calculate next value.
Line 232: Line 228:


sub gradG(@x, $h) { # Provides a rough calculation of gradient g(x).
sub gradG(@x, $h) { # Provides a rough calculation of gradient g(x).
my \N = +@x ; my ( @y , @z );
my \N = +@x ; my @y = @x ; my $g0 = g(@x) ;
return map { @y[$_] += $h ; (g(@y) - $g0) / $h }, ^N
@y = @x;
my $g0 = g(@x);
for ^N { @y[$_] += $h ; @z[$_] = (g(@y) - $g0) / $h }
return @z
}
}