Polynomial regression

Find an approximating polynomial of known degree for a given data.

Task
Polynomial regression
You are encouraged to solve this task according to the task description, using any language you may know.

Example: For input data:

x = {0,  1,  2,  3,  4,  5,  6,   7,   8,   9,   10};
y = {1,  6,  17, 34, 57, 86, 121, 162, 209, 262, 321};

The approximating polynomial is:

3 x2 + 2 x + 1

Here, the polynomial's coefficients are (3, 2, 1).

This task is intended as a subtask for Measure relative performance of sorting algorithms implementations.

11l

Translation of: Swift
F average(arr)
   R sum(arr) / Float(arr.len)

F poly_regression(x, y)
   V xm = average(x)
   V ym = average(y)
   V x2m = average(x.map(i -> i * i))
   V x3m = average(x.map(i -> i ^ 3))
   V x4m = average(x.map(i -> i ^ 4))
   V xym  = average(zip(x, y).map((i, j) -> i * j))
   V x2ym = average(zip(x, y).map((i, j) -> i * i * j))
   V sxx = x2m - xm * xm
   V sxy = xym - xm * ym
   V sxx2 = x3m - xm * x2m
   V sx2x2 = x4m - x2m * x2m
   V sx2y = x2ym - x2m * ym
   V b = (sxy * sx2x2 - sx2y * sxx2) / (sxx * sx2x2 - sxx2 * sxx2)
   V c = (sx2y * sxx - sxy * sxx2) / (sxx * sx2x2 - sxx2 * sxx2)
   V a = ym - b * xm - c * x2m

   F abc(xx)
      R (@a + @b * xx) + (@c * xx * xx)

   print("y = #. + #.x + #.x^2\n".format(a, b, c))
   print(‘ Input  Approximation’)
   print(‘ x   y     y1’)

   L(i) 0 .< x.len
      print(‘#2 #3  #3.1’.format(x[i], y[i], abc(i)))

V x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
V y = [1, 6, 17, 34, 57, 86, 121, 162, 209, 262, 321]
poly_regression(x, y)
Output:
y = 1 + 2x + 3x^2

 Input  Approximation
 x   y     y1
 0   1    1.0
 1   6    6.0
 2  17   17.0
 3  34   34.0
 4  57   57.0
 5  86   86.0
 6 121  121.0
 7 162  162.0
 8 209  209.0
 9 262  262.0
10 321  321.0

Ada

with Ada.Numerics.Real_Arrays;  use Ada.Numerics.Real_Arrays;

function Fit (X, Y : Real_Vector; N : Positive) return Real_Vector is
   A : Real_Matrix (0..N, X'Range);  -- The plane
begin
   for I in A'Range (2) loop
      for J in A'Range (1) loop
         A (J, I) := X (I)**J;
      end loop;
   end loop;
   return Solve (A * Transpose (A), A * Y);
end Fit;

The function Fit implements least squares approximation of a function defined in the points as specified by the arrays xi and yi. The basis φj is xj, j=0,1,..,N. The implementation is straightforward. First the plane matrix A is created. Ajij(xi). Then the linear problem AATc=Ay is solved. The result cj are the coefficients. Constraint_Error is propagated when dimensions of X and Y differ or else when the problem is ill-defined.

Example

with Fit;
with Ada.Float_Text_IO;  use Ada.Float_Text_IO;

procedure Fitting is
   C : constant Real_Vector :=
          Fit
          (  (0.0, 1.0,  2.0,  3.0,  4.0,  5.0,   6.0,   7.0,   8.0,   9.0,  10.0),
             (1.0, 6.0, 17.0, 34.0, 57.0, 86.0, 121.0, 162.0, 209.0, 262.0, 321.0),
             2
          );
begin
   Put (C (0), Aft => 3, Exp => 0);
   Put (C (1), Aft => 3, Exp => 0);
   Put (C (2), Aft => 3, Exp => 0);
end Fitting;
Output:
 1.000 2.000 3.000

ALGOL 68

Translation of: Ada
Works with: ALGOL 68 version Standard - lu decomp and lu solve are from the GSL library


Works with: ALGOL 68G version Any - tested with release mk15-0.8b.fc9.i386
MODE FIELD = REAL;

MODE
  VEC = [0]FIELD,
  MAT = [0,0]FIELD;

PROC VOID raise index error := VOID: (
  print(("stop", new line));
  stop
);

COMMENT from http://rosettacode.org/wiki/Matrix_Transpose#ALGOL_68 END COMMENT
OP ZIP = ([,]FIELD in)[,]FIELD:(
  [2 LWB in:2 UPB in,1 LWB in:1UPB in]FIELD out;
  FOR i FROM LWB in TO UPB in DO
     out[,i]:=in[i,]
  OD;
  out
);

COMMENT from http://rosettacode.org/wiki/Matrix_multiplication#ALGOL_68 END COMMENT
OP * = (VEC a,b)FIELD: ( # basically the dot product #
    FIELD result:=0;
    IF LWB a/=LWB b OR UPB a/=UPB b THEN raise index error FI;
    FOR i FROM LWB a TO UPB a DO result+:= a[i]*b[i] OD;
    result
  );

OP * = (VEC a, MAT b)VEC: ( # overload vector times matrix #
    [2 LWB b:2 UPB b]FIELD result;
    IF LWB a/=LWB b OR UPB a/=UPB b THEN raise index error FI;
    FOR j FROM 2 LWB b TO 2 UPB b DO result[j]:=a*b[,j] OD;
    result
  );

OP * = (MAT a, b)MAT: ( # overload matrix times matrix #
     [LWB a:UPB a, 2 LWB b:2 UPB b]FIELD result;
     IF 2 LWB a/=LWB b OR 2 UPB a/=UPB b THEN raise index error FI;
     FOR k FROM LWB result TO UPB result DO result[k,]:=a[k,]*b OD;
     result
   );

COMMENT from http://rosettacode.org/wiki/Pyramid_of_numbers#ALGOL_68 END COMMENT
OP / = (VEC a, MAT b)VEC: ( # vector division #
  [LWB a:UPB a,1]FIELD transpose a;
  transpose a[,1]:=a;
  (transpose a/b)[,1]
);

OP / = (MAT a, MAT b)MAT:( # matrix division #
  [LWB b:UPB b]INT p ;
  INT sign;
  [,]FIELD lu = lu decomp(b, p, sign);
  [LWB a:UPB a, 2 LWB a:2 UPB a]FIELD out;
  FOR col FROM 2 LWB a TO 2 UPB a DO
    out[,col] := lu solve(b, lu, p, a[,col]) [@LWB out[,col]]
  OD;
  out
);

FORMAT int repr = $g(0)$,
       real repr = $g(-7,4)$;

PROC fit =  (VEC x, y, INT order)VEC:
BEGIN
   [0:order, LWB x:UPB x]FIELD a;  # the plane #
   FOR i FROM 2 LWB a TO 2 UPB a  DO
      FOR j FROM LWB a TO UPB a DO
         a [j, i] := x [i]**j
      OD
   OD;
   ( y * ZIP a ) / ( a * ZIP a )
END # fit #;

PROC print polynomial = (VEC x)VOID: (
   BOOL empty := TRUE;
   FOR i FROM UPB x BY -1 TO LWB x DO
     IF x[i] NE 0 THEN
       IF x[i] > 0 AND NOT empty THEN print ("+") FI;
       empty := FALSE;
       IF x[i] NE 1 OR i=0 THEN
         IF ENTIER x[i] = x[i] THEN
           printf((int repr, x[i]))
         ELSE
           printf((real repr, x[i]))
         FI
       FI;
       CASE i+1 IN
         SKIP,print(("x"))
       OUT
         printf(($"x**"g(0)$,i))
       ESAC
     FI
   OD;
   IF empty THEN print("0") FI;
   print(new line)
);

fitting: BEGIN
   VEC c =
          fit
          (  (0.0, 1.0,  2.0,  3.0,  4.0,  5.0,   6.0,   7.0,   8.0,   9.0,  10.0),
             (1.0, 6.0, 17.0, 34.0, 57.0, 86.0, 121.0, 162.0, 209.0, 262.0, 321.0),
             2
          );
   print polynomial(c);
   VEC d =
          fit
          ( (0, 1, 2, 3, 4, 5, 6, 7, 8, 9),
            (2.7, 2.8, 31.4, 38.1, 58.0, 76.2, 100.5, 130.0, 149.3, 180.0),
            2
          );
   print polynomial(d)
END # fitting #
Output:
3x**2+2x+1
 1.0848x**2+10.3552x-0.6164

AutoHotkey

Translation of: Lua
regression(xa,ya){
	n := xa.Count()
	xm := ym := x2m := x3m := x4m := xym := x2ym := 0
	
	loop % n {
		i := A_Index
		xm := xm + xa[i]
		ym := ym + ya[i]
		x2m := x2m + xa[i] * xa[i]
		x3m := x3m + xa[i] * xa[i] * xa[i]
		x4m := x4m + xa[i] * xa[i] * xa[i] * xa[i]
		xym := xym + xa[i] * ya[i]
		x2ym := x2ym + xa[i] * xa[i] * ya[i]
	}
	
	xm := xm / n
	ym := ym / n
	x2m := x2m / n
	x3m := x3m / n
	x4m := x4m / n
	xym := xym / n
	x2ym := x2ym / n

	sxx := x2m - xm * xm
	sxy := xym - xm * ym
	sxx2 := x3m - xm * x2m
	sx2x2 := x4m - x2m * x2m
	sx2y := x2ym - x2m * ym

	b := (sxy * sx2x2 - sx2y * sxx2) / (sxx * sx2x2 - sxx2 * sxx2)
	c := (sx2y * sxx - sxy * sxx2) / (sxx * sx2x2 - sxx2 * sxx2)
	a := ym - b * xm - c * x2m
	
	result := "Input`tApproximation`nx   y`ty1`n"
	loop % n
		i := A_Index, result .= xa[i] ", " ya[i] "`t" eval(a, b, c, xa[i]) "`n"
	return "y = " c "x^2" " + " b "x + " a "`n`n" result
}
eval(a,b,c,x){
	return a + (b + c*x) * x
}

Examples:

xa := [0, 1, 2 , 3 , 4 , 5 , 6  , 7  , 8  , 9  , 10]
ya := [1, 6, 17, 34, 57, 86, 121, 162, 209, 262, 321]
MsgBox % result := regression(xa, ya)
return
Output:
y = 3.000000x^2 + 2.000000x + 1.000000

Input	Approximation
x   y	y1
0, 1	1.000000
1, 6	6.000000
2, 17	17.000000
3, 34	34.000000
4, 57	57.000000
5, 86	86.000000
6, 121	121.000000
7, 162	162.000000
8, 209	209.000000
9, 262	262.000000
10, 321	321.000000

AWK

Translation of: Lua
BEGIN{
  i = 0;
  xa[i] =  0; i++;
  xa[i] =  1; i++;
  xa[i] =  2; i++;
  xa[i] =  3; i++;
  xa[i] =  4; i++;
  xa[i] =  5; i++;
  xa[i] =  6; i++;
  xa[i] =  7; i++;
  xa[i] =  8; i++;
  xa[i] =  9; i++;
  xa[i] = 10; i++;
  i = 0;
  ya[i] =  1; i++;
  ya[i] =  6; i++;
  ya[i] = 17; i++;
  ya[i] = 34; i++;
  ya[i] = 57; i++;
  ya[i] = 86; i++;
  ya[i] =121; i++;
  ya[i] =162; i++;
  ya[i] =209; i++;
  ya[i] =262; i++;
  ya[i] =321; i++;
  exit;
}
{
  # (nothing to do)
}
END{
  a = 0; b = 0; c = 0;  # globals - will change by regression()
  regression(xa,ya);

  printf("y = %6.2f x^2 + %6.2f x + %6.2f\n",c,b,a);
  printf("%-13s %-8s\n","Input","Approximation");
  printf("%-6s %-6s %-8s\n","x","y","y^")
  for (i=0;i<length(xa);i++) {
    printf("%6.1f %6.1f %8.3f\n",xa[i],ya[i],eval(a,b,c,xa[i]));
  }
}

function eval(a,b,c,x) {
  return a+b*x+c*x*x;
}
                           # locals
function regression(x,y,   n,xm,ym,x2m,x3m,x4m,xym,x2ym,sxx,sxy,sxx2,sx2x2,sx2y) {
  n = 0
  xm = 0.0;
  ym = 0.0;
  x2m = 0.0;
  x3m = 0.0;
  x4m = 0.0;
  xym = 0.0;
  x2ym = 0.0;

  for (i in x) {
    xm  += x[i];
    ym  += y[i];
    x2m += x[i] * x[i];
    x3m += x[i] * x[i] * x[i];
    x4m += x[i] * x[i] * x[i] * x[i];
    xym += x[i] * y[i];
    x2ym += x[i] * x[i] * y[i];
    n++;
  }
  xm = xm / n;
  ym = ym / n;
  x2m = x2m / n;
  x3m = x3m / n;
  x4m = x4m / n;
  xym = xym / n;
  x2ym = x2ym / n;

  sxx = x2m - xm * xm;
  sxy = xym - xm * ym;
  sxx2 = x3m - xm * x2m;
  sx2x2 = x4m - x2m * x2m;
  sx2y = x2ym - x2m * ym;

  b = (sxy  * sx2x2 - sx2y * sxx2) / (sxx * sx2x2 - sxx2 * sxx2);
  c = (sx2y * sxx   - sxy  * sxx2) / (sxx * sx2x2 - sxx2 * sxx2);
  a = ym - b * xm - c * x2m;
}
Output:
y =   3.00 x^2 +   2.00 x +   1.00
Input         Approximation
x      y      y^
   0.0    1.0    1.000
   1.0    6.0    6.000
   2.0   17.0   17.000
   3.0   34.0   34.000
   4.0   57.0   57.000
   5.0   86.0   86.000
   6.0  121.0  121.000
   7.0  162.0  162.000
   8.0  209.0  209.000
   9.0  262.0  262.000
  10.0  321.0  321.000

BBC BASIC

The code listed below is good for up to 10000 data points and fits an order-5 polynomial, so the test data for this task is hardly challenging!

      INSTALL @lib$+"ARRAYLIB"
      
      Max% = 10000
      DIM vector(5), matrix(5,5)
      DIM x(Max%), x2(Max%), x3(Max%), x4(Max%), x5(Max%)
      DIM x6(Max%), x7(Max%), x8(Max%), x9(Max%), x10(Max%)
      DIM y(Max%), xy(Max%), x2y(Max%), x3y(Max%), x4y(Max%), x5y(Max%)
      
      npts% = 11
      x() = 0,  1,  2,  3,  4,  5,  6,   7,   8,   9,   10
      y() = 1,  6,  17, 34, 57, 86, 121, 162, 209, 262, 321
      
      sum_x = SUM(x())
      x2()  = x() * x()   : sum_x2  = SUM(x2())
      x3()  = x() * x2()  : sum_x3  = SUM(x3())
      x4()  = x2() * x2() : sum_x4  = SUM(x4())
      x5()  = x2() * x3() : sum_x5  = SUM(x5())
      x6()  = x3() * x3() : sum_x6  = SUM(x6())
      x7()  = x3() * x4() : sum_x7  = SUM(x7())
      x8()  = x4() * x4() : sum_x8  = SUM(x8())
      x9()  = x4() * x5() : sum_x9  = SUM(x9())
      x10() = x5() * x5() : sum_x10 = SUM(x10())
      
      sum_y = SUM(y())
      xy()  = x() * y()   : sum_xy  = SUM(xy())
      x2y() = x2() * y()  : sum_x2y = SUM(x2y())
      x3y() = x3() * y()  : sum_x3y = SUM(x3y())
      x4y() = x4() * y()  : sum_x4y = SUM(x4y())
      x5y() = x5() * y()  : sum_x5y = SUM(x5y())
      
      matrix() = \
      \ npts%,  sum_x,   sum_x2,  sum_x3,  sum_x4,  sum_x5, \
      \ sum_x,  sum_x2,  sum_x3,  sum_x4,  sum_x5,  sum_x6, \
      \ sum_x2, sum_x3,  sum_x4,  sum_x5,  sum_x6,  sum_x7, \
      \ sum_x3, sum_x4,  sum_x5,  sum_x6,  sum_x7,  sum_x8, \
      \ sum_x4, sum_x5,  sum_x6,  sum_x7,  sum_x8,  sum_x9, \
      \ sum_x5, sum_x6,  sum_x7,  sum_x8,  sum_x9,  sum_x10
      
      vector() = \
      \ sum_y,  sum_xy,  sum_x2y, sum_x3y, sum_x4y, sum_x5y
      
      PROC_invert(matrix())
      vector() = matrix().vector()
      
      @% = &2040A
      PRINT "Polynomial coefficients = "
      FOR term% = 5 TO 0 STEP -1
        PRINT ;vector(term%) " * x^" STR$(term%)
      NEXT
Output:
Polynomial coefficients =
0.0000 * x^5
-0.0000 * x^4
0.0002 * x^3
2.9993 * x^2
2.0012 * x^1
0.9998 * x^0

C

Include file (to make the code reusable easily) named polifitgsl.h

#ifndef _POLIFITGSL_H
#define _POLIFITGSL_H
#include <gsl/gsl_multifit.h>
#include <stdbool.h>
#include <math.h>
bool polynomialfit(int obs, int degree, 
		   double *dx, double *dy, double *store); /* n, p */
#endif

Implementation (the examples here helped alot to code this quickly):

#include "polifitgsl.h"

bool polynomialfit(int obs, int degree, 
		   double *dx, double *dy, double *store) /* n, p */
{
  gsl_multifit_linear_workspace *ws;
  gsl_matrix *cov, *X;
  gsl_vector *y, *c;
  double chisq;

  int i, j;

  X = gsl_matrix_alloc(obs, degree);
  y = gsl_vector_alloc(obs);
  c = gsl_vector_alloc(degree);
  cov = gsl_matrix_alloc(degree, degree);

  for(i=0; i < obs; i++) {
    for(j=0; j < degree; j++) {
      gsl_matrix_set(X, i, j, pow(dx[i], j));
    }
    gsl_vector_set(y, i, dy[i]);
  }

  ws = gsl_multifit_linear_alloc(obs, degree);
  gsl_multifit_linear(X, y, c, cov, &chisq, ws);

  /* store result ... */
  for(i=0; i < degree; i++)
  {
    store[i] = gsl_vector_get(c, i);
  }

  gsl_multifit_linear_free(ws);
  gsl_matrix_free(X);
  gsl_matrix_free(cov);
  gsl_vector_free(y);
  gsl_vector_free(c);
  return true; /* we do not "analyse" the result (cov matrix mainly)
		  to know if the fit is "good" */
}

Testing:

#include <stdio.h>

#include "polifitgsl.h"

#define NP 11
double x[] = {0,  1,  2,  3,  4,  5,  6,   7,   8,   9,   10};
double y[] = {1,  6,  17, 34, 57, 86, 121, 162, 209, 262, 321};

#define DEGREE 3
double coeff[DEGREE];

int main()
{
  int i;

  polynomialfit(NP, DEGREE, x, y, coeff);
  for(i=0; i < DEGREE; i++) {
    printf("%lf\n", coeff[i]);
  }
  return 0;
}
Output:
1.000000
2.000000
3.000000

C#

Library: Math.Net
        public static double[] Polyfit(double[] x, double[] y, int degree)
        {
            // Vandermonde matrix
            var v = new DenseMatrix(x.Length, degree + 1);
            for (int i = 0; i < v.RowCount; i++)
                for (int j = 0; j <= degree; j++) v[i, j] = Math.Pow(x[i], j);
            var yv = new DenseVector(y).ToColumnMatrix();
            QR<double> qr = v.QR();
            // Math.Net doesn't have an "economy" QR, so:
            // cut R short to square upper triangle, then recompute Q
            var r = qr.R.SubMatrix(0, degree + 1, 0, degree + 1);
            var q = v.Multiply(r.Inverse());
            var p = r.Inverse().Multiply(q.TransposeThisAndMultiply(yv));
            return p.Column(0).ToArray();
        }

Example:

        static void Main(string[] args)
        {
            const int degree = 2;
            var x = new[] {0.0, 1.0,  2.0,  3.0,  4.0,  5.0,   6.0,   7.0,   8.0,   9.0,  10.0};
            var y = new[] {1.0, 6.0, 17.0, 34.0, 57.0, 86.0, 121.0, 162.0, 209.0, 262.0, 321.0};
            var p = Polyfit(x, y, degree);
            foreach (var d in p) Console.Write("{0} ",d);
            Console.WriteLine();
            for (int i = 0; i < x.Length; i++ )
                Console.WriteLine("{0} => {1} diff {2}", x[i], Polynomial.Evaluate(x[i], p), y[i] - Polynomial.Evaluate(x[i], p));
            Console.ReadKey(true);
        }

C++

Translation of: Java
#include <algorithm>
#include <iostream>
#include <numeric>
#include <vector>

void polyRegression(const std::vector<int>& x, const std::vector<int>& y) {
    int n = x.size();
    std::vector<int> r(n);
    std::iota(r.begin(), r.end(), 0);
    double xm = std::accumulate(x.begin(), x.end(), 0.0) / x.size();
    double ym = std::accumulate(y.begin(), y.end(), 0.0) / y.size();
    double x2m = std::transform_reduce(r.begin(), r.end(), 0.0, std::plus<double>{}, [](double a) {return a * a; }) / r.size();
    double x3m = std::transform_reduce(r.begin(), r.end(), 0.0, std::plus<double>{}, [](double a) {return a * a * a; }) / r.size();
    double x4m = std::transform_reduce(r.begin(), r.end(), 0.0, std::plus<double>{}, [](double a) {return a * a * a * a; }) / r.size();

    double xym = std::transform_reduce(x.begin(), x.end(), y.begin(), 0.0, std::plus<double>{}, std::multiplies<double>{});
    xym /= fmin(x.size(), y.size());

    double x2ym = std::transform_reduce(x.begin(), x.end(), y.begin(), 0.0, std::plus<double>{}, [](double a, double b) { return a * a * b; });
    x2ym /= fmin(x.size(), y.size());

    double sxx = x2m - xm * xm;
    double sxy = xym - xm * ym;
    double sxx2 = x3m - xm * x2m;
    double sx2x2 = x4m - x2m * x2m;
    double sx2y = x2ym - x2m * ym;

    double b = (sxy * sx2x2 - sx2y * sxx2) / (sxx * sx2x2 - sxx2 * sxx2);
    double c = (sx2y * sxx - sxy * sxx2) / (sxx * sx2x2 - sxx2 * sxx2);
    double a = ym - b * xm - c * x2m;

    auto abc = [a, b, c](int xx) {
        return a + b * xx + c * xx*xx;
    };

    std::cout << "y = " << a << " + " << b << "x + " << c << "x^2" << std::endl;
    std::cout << " Input  Approximation" << std::endl;
    std::cout << " x   y     y1" << std::endl;

    auto xit = x.cbegin();
    auto xend = x.cend();
    auto yit = y.cbegin();
    auto yend = y.cend();
    while (xit != xend && yit != yend) {
        printf("%2d %3d  %5.1f\n", *xit, *yit, abc(*xit));
        xit = std::next(xit);
        yit = std::next(yit);
    }
}

int main() {
    using namespace std;

    vector<int> x(11);
    iota(x.begin(), x.end(), 0);

    vector<int> y{ 1, 6, 17, 34, 57, 86, 121, 162, 209, 262, 321 };

    polyRegression(x, y);

    return 0;
}
Output:
y = 1 + 2x + 3x^2
 Input  Approximation
 x   y     y1
 0   1    1.0
 1   6    6.0
 2  17   17.0
 3  34   34.0
 4  57   57.0
 5  86   86.0
 6 121  121.0
 7 162  162.0
 8 209  209.0
 9 262  262.0
10 321  321.0

Common Lisp

Uses the routine (lsqr A b) from Multiple regression and (mtp A) from Matrix transposition.

;; Least square fit of a polynomial of order n the x-y-curve.
(defun polyfit (x y n)
  (let* ((m (cadr (array-dimensions x)))
         (A (make-array `(,m ,(+ n 1)) :initial-element 0)))
    (loop for i from 0 to (- m 1) do
          (loop for j from 0 to n do
                (setf (aref A i j)
                      (expt (aref x 0 i) j))))
    (lsqr A (mtp y))))

Example:

(let ((x (make-array '(1 11) :initial-contents '((0 1 2 3 4 5 6 7 8 9 10))))
      (y (make-array '(1 11) :initial-contents '((1 6 17 34 57 86 121 162 209 262 321)))))
  (polyfit x y 2))

#2A((0.9999999999999759d0) (2.000000000000005d0) (3.0d0))

D

Translation of: Kotlin
import std.algorithm;
import std.range;
import std.stdio;

auto average(R)(R r) {
    auto t = r.fold!("a+b", "a+1")(0, 0);
    return cast(double) t[0] / t[1];
}

void polyRegression(int[] x, int[] y) {
    auto n = x.length;
    auto r = iota(0, n).array;
    auto xm = x.average();
    auto ym = y.average();
    auto x2m = r.map!"a*a".average();
    auto x3m = r.map!"a*a*a".average();
    auto x4m = r.map!"a*a*a*a".average();
    auto xym = x.zip(y).map!"a[0]*a[1]".average();
    auto x2ym = x.zip(y).map!"a[0]*a[0]*a[1]".average();

    auto sxx = x2m - xm * xm;
    auto sxy = xym - xm * ym;
    auto sxx2 = x3m - xm * x2m;
    auto sx2x2 = x4m - x2m * x2m;
    auto sx2y = x2ym - x2m * ym;

    auto b = (sxy * sx2x2 - sx2y * sxx2) / (sxx * sx2x2 - sxx2 * sxx2);
    auto c = (sx2y * sxx - sxy * sxx2) / (sxx * sx2x2 - sxx2 * sxx2);
    auto a = ym - b * xm - c * x2m;

    real abc(int xx) {
        return a + b * xx + c * xx * xx;
    }

    writeln("y = ", a, " + ", b, "x + ", c, "x^2");
    writeln(" Input  Approximation");
    writeln(" x   y     y1");
    foreach (i; 0..n) {
        writefln("%2d %3d  %5.1f", x[i], y[i], abc(x[i]));
    }
}

void main() {
    auto x = iota(0, 11).array;
    auto y = [1, 6, 17, 34, 57, 86, 121, 162, 209, 262, 321];
    polyRegression(x, y);
}
Output:
y = 1 + 2x + 3x^2
 Input  Approximation
 x   y     y1
 0   1    1.0
 1   6    6.0
 2  17   17.0
 3  34   34.0
 4  57   57.0
 5  86   86.0
 6 121  121.0
 7 162  162.0
 8 209  209.0
 9 262  262.0
10 321  321.0

EasyLang

Translation of: Lua
func eval a b c x .
   return a + (b + c * x) * x
.
proc regression xa[] ya[] . .
   n = len xa[]
   for i = 1 to n
      xm = xm + xa[i]
      ym = ym + ya[i]
      x2m = x2m + xa[i] * xa[i]
      x3m = x3m + xa[i] * xa[i] * xa[i]
      x4m = x4m + xa[i] * xa[i] * xa[i] * xa[i]
      xym = xym + xa[i] * ya[i]
      x2ym = x2ym + xa[i] * xa[i] * ya[i]
   .
   xm = xm / n
   ym = ym / n
   x2m = x2m / n
   x3m = x3m / n
   x4m = x4m / n
   xym = xym / n
   x2ym = x2ym / n
   # 
   sxx = x2m - xm * xm
   sxy = xym - xm * ym
   sxx2 = x3m - xm * x2m
   sx2x2 = x4m - x2m * x2m
   sx2y = x2ym - x2m * ym
   # 
   b = (sxy * sx2x2 - sx2y * sxx2) / (sxx * sx2x2 - sxx2 * sxx2)
   c = (sx2y * sxx - sxy * sxx2) / (sxx * sx2x2 - sxx2 * sxx2)
   a = ym - b * xm - c * x2m
   print "y = " & a & " + " & b & "x + " & c & "x^2"
   numfmt 0 3
   for i = 1 to n
      print xa[i] & "  " & ya[i] & " " & eval a b c xa[i]
   .
.
xa[] = [ 0 1 2 3 4 5 6 7 8 9 10 ]
ya[] = [ 1 6 17 34 57 86 121 162 209 262 321 ]
regression xa[] ya[]

Emacs Lisp

Library: Calc
(let ((x '(0 1 2 3 4 5 6 7 8 9 10))
      (y '(1 6 17 34 57 86 121 162 209 262 321)))
  (calc-eval "fit(a*x^2+b*x+c,[x],[a,b,c],[$1 $2])" nil (cons 'vec x) (cons 'vec y)))
Output:
"3. x^2 + 1.99999999996 x + 1.00000000006"

Fortran

Library: LAPACK
module fitting
contains

  function polyfit(vx, vy, d)
    implicit none
    integer, intent(in)                   :: d
    integer, parameter                    :: dp = selected_real_kind(15, 307)
    real(dp), dimension(d+1)              :: polyfit
    real(dp), dimension(:), intent(in)    :: vx, vy
   
    real(dp), dimension(:,:), allocatable :: X
    real(dp), dimension(:,:), allocatable :: XT
    real(dp), dimension(:,:), allocatable :: XTX
   
    integer :: i, j
   
    integer     :: n, lda, lwork
    integer :: info
    integer, dimension(:), allocatable :: ipiv
    real(dp), dimension(:), allocatable :: work
   
    n = d+1
    lda = n
    lwork = n
   
    allocate(ipiv(n))
    allocate(work(lwork))
    allocate(XT(n, size(vx)))
    allocate(X(size(vx), n))
    allocate(XTX(n, n))
   
    ! prepare the matrix
    do i = 0, d
       do j = 1, size(vx)
          X(j, i+1) = vx(j)**i
       end do
    end do
   
    XT  = transpose(X)
    XTX = matmul(XT, X)
   
    ! calls to LAPACK subs DGETRF and DGETRI
    call DGETRF(n, n, XTX, lda, ipiv, info)
    if ( info /= 0 ) then
       print *, "problem"
       return
    end if
    call DGETRI(n, XTX, lda, ipiv, work, lwork, info)
    if ( info /= 0 ) then
       print *, "problem"
       return
    end if
    
    polyfit = matmul( matmul(XTX, XT), vy)
    
    deallocate(ipiv)
    deallocate(work)
    deallocate(X)
    deallocate(XT)
    deallocate(XTX)
  
  end function
 
end module

Example

program PolynomalFitting
  use fitting
  implicit none
 
  ! let us test it
  integer, parameter      :: degree = 2
  integer, parameter      :: dp = selected_real_kind(15, 307)
  integer                 :: i
  real(dp), dimension(11) :: x = (/ (i,i=0,10) /)
  real(dp), dimension(11) :: y = (/ 1,   6,  17,  34, &
                                   57,  86, 121, 162, &
                                   209, 262, 321 /)
  real(dp), dimension(degree+1) :: a
 
  a = polyfit(x, y, degree)
 
  write (*, '(F9.4)') a

end program
Output:

(lower powers first, so this seems the opposite of the Python output)

   1.0000
   2.0000
   3.0000

FreeBASIC

General regressions for different polynomials, here it is for degree 2, (3 terms).

#Include "crt.bi"  'for rounding only

Type vector
    Dim As Double element(Any)
End Type

Type matrix
    Dim As Double element(Any,Any)
    Declare Function inverse() As matrix
    Declare Function transpose() As matrix
    private:
    Declare Function GaussJordan(As vector) As vector
End Type

'mult operators
Operator *(m1 As matrix,m2 As matrix) As matrix
    Dim rows As Integer=Ubound(m1.element,1)
    Dim columns As Integer=Ubound(m2.element,2)
    If Ubound(m1.element,2)<>Ubound(m2.element,1) Then
        Print "Can't do"
        Exit Operator
    End If
    Dim As matrix ans
    Redim ans.element(rows,columns)
    Dim rxc As Double
    For r As Integer=1 To rows
        For c As Integer=1 To columns
            rxc=0
            For k As Integer = 1 To Ubound(m1.element,2)
                rxc=rxc+m1.element(r,k)*m2.element(k,c)
            Next k
            ans.element(r,c)=rxc
        Next c
    Next r
    Operator= ans
End Operator

Operator *(m1 As matrix,m2 As vector) As vector
    Dim rows As Integer=Ubound(m1.element,1)
    Dim columns As Integer=Ubound(m2.element,2)
    If Ubound(m1.element,2)<>Ubound(m2.element) Then
        Print "Can't do"
        Exit Operator
    End If
    Dim As vector ans
    Redim ans.element(rows)
    Dim rxc As Double
    For r As Integer=1 To rows
        rxc=0
        For k As Integer = 1 To Ubound(m1.element,2)
            rxc=rxc+m1.element(r,k)*m2.element(k)
        Next k
        ans.element(r)=rxc
    Next r
    Operator= ans
End Operator

Function matrix.transpose() As matrix
    Dim As matrix b
    Redim b.element(1 To Ubound(this.element,2),1 To Ubound(this.element,1))
    For i As Long=1 To Ubound(this.element,1)
        For j As Long=1 To Ubound(this.element,2)
            b.element(j,i)=this.element(i,j)
        Next
    Next
    Return b
End Function

Function matrix.GaussJordan(rhs As vector) As vector
    Dim As Integer n=Ubound(rhs.element)
    Dim As vector ans=rhs,r=rhs
    Dim As matrix b=This
    #macro pivot(num)
        For p1 As Integer  = num To n - 1
            For p2 As Integer  = p1 + 1 To n
                If Abs(b.element(p1,num))<Abs(b.element(p2,num)) Then
                    Swap r.element(p1),r.element(p2)
                    For g As Integer=1 To n
                        Swap b.element(p1,g),b.element(p2,g)
                    Next g
                End If
            Next p2
        Next p1
    #endmacro
    For k As Integer=1 To n-1
        pivot(k)
        For row As Integer =k To n-1
            If b.element(row+1,k)=0 Then Exit For
            Var f=b.element(k,k)/b.element(row+1,k)
            r.element(row+1)=r.element(row+1)*f-r.element(k)
            For g As Integer=1 To n
                b.element((row+1),g)=b.element((row+1),g)*f-b.element(k,g)
            Next g
        Next row
    Next k
    'back substitute
    For z As Integer=n To 1 Step -1
        ans.element(z)=r.element(z)/b.element(z,z)
        For j As Integer = n To z+1 Step -1
            ans.element(z)=ans.element(z)-(b.element(z,j)*ans.element(j)/b.element(z,z))
        Next j
    Next    z
    Function = ans
End Function

Function matrix.inverse() As matrix
    Var ub1=Ubound(this.element,1),ub2=Ubound(this.element,2)
    Dim As matrix ans
    Dim As vector temp,null_
    Redim temp.element(1 To ub1):Redim null_.element(1 To ub1)
    Redim ans.element(1 To ub1,1 To ub2)
    For a As Integer=1 To ub1
        temp=null_
        temp.element(a)=1
        temp=GaussJordan(temp)
        For b As Integer=1 To ub1
            ans.element(b,a)=temp.element(b)
        Next b
    Next a
    Return ans
End Function

'vandermode of x
Function vandermonde(x_values() As Double,w As Long) As matrix
    Dim As matrix mat
    Var n=Ubound(x_values)
    Redim mat.element(1 To n,1 To w)
    For a As Integer=1 To n
        For b As Integer=1 To w
            mat.element(a,b)=x_values(a)^(b-1)
        Next b
    Next a
    Return mat
End Function

'main preocedure
Sub regress(x_values() As Double,y_values() As Double,ans() As Double,n As Long)
    Redim ans(1 To Ubound(x_values))
    Dim As matrix m1= vandermonde(x_values(),n)
    Dim As matrix T=m1.transpose
    Dim As vector y
    Redim y.element(1 To Ubound(ans))
    For n As Long=1 To Ubound(y_values)
        y.element(n)=y_values(n)
    Next n
    Dim As vector result=(((T*m1).inverse)*T)*y
    Redim Preserve ans(1 To n)
    For n As Long=1 To Ubound(ans)
        ans(n)=result.element(n)
    Next n
End Sub

'Evaluate a polynomial at x
Function polyeval(Coefficients() As Double,Byval x As Double) As Double
    Dim As Double acc
    For i As Long=Ubound(Coefficients) To Lbound(Coefficients) Step -1
        acc=acc*x+Coefficients(i)
    Next i
    Return acc
End Function

Function CRound(Byval x As Double,Byval precision As Integer=30) As String
    If precision>30 Then precision=30
    Dim As zstring * 40 z:Var s="%." &str(Abs(precision)) &"f"
    sprintf(z,s,x)
    If Val(z) Then Return Rtrim(Rtrim(z,"0"),".")Else Return "0"
End Function

Function show(a() As Double,places as long=10) As String
    Dim As String s,g
    For n As Long=Lbound(a) To Ubound(a)
        If n<3 Then g="" Else g="^"+Str(n-1)
        if val(cround(a(n),places))<>0 then
            s+= Iif(Sgn(a(n))>=0,"+","")+cround(a(n),places)+ Iif(n=Lbound(a),"","*x"+g)+" "
        end if
    Next n
    Return s
End Function


dim as double x(1 to ...)={0,  1,  2,  3,  4,  5,  6,   7,   8,   9,   10}
dim as double y(1 to ...)={1,  6,  17, 34, 57, 86, 121, 162, 209, 262, 321}

Redim As Double ans()
regress(x(),y(),ans(),3)

print show(ans())
sleep
Output:
+1 +2*x +3*x^2

GAP

PolynomialRegression := function(x, y, n)
	local a;
	a := List([0 .. n], i -> List(x, s -> s^i));
	return TransposedMat((a * TransposedMat(a))^-1 * a * TransposedMat([y]))[1];
end;

x := [0,  1,  2,  3,  4,  5,  6,   7,   8,   9,   10];
y := [1,  6,  17, 34, 57, 86, 121, 162, 209, 262, 321];

# Return coefficients in ascending degree order
PolynomialRegression(x, y, 2);
# [ 1, 2, 3 ]

gnuplot

# The polynomial approximation
f(x) = a*x**2 + b*x + c

# Initial values for parameters
a = 0.1
b = 0.1
c = 0.1

# Fit f to the following data by modifying the variables a, b, c
fit f(x) '-' via a, b, c
   0   1
   1   6
   2  17
   3  34
   4  57
   5  86
   6 121
   7 162
   8 209
   9 262
  10 321
e

print sprintf("\n --- \n Polynomial fit: %.4f x^2 + %.4f x + %.4f\n", a, b, c)

Go

Library gonum/matrix

package main

import (
	"fmt"
	"log"

	"gonum.org/v1/gonum/mat"
)

func main() {
	var (
		x = []float64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
		y = []float64{1, 6, 17, 34, 57, 86, 121, 162, 209, 262, 321}

		degree = 2

		a = Vandermonde(x, degree+1)
		b = mat.NewDense(len(y), 1, y)
		c = mat.NewDense(degree+1, 1, nil)
	)

	var qr mat.QR
	qr.Factorize(a)

	const trans = false
	err := qr.SolveTo(c, trans, b)
	if err != nil {
		log.Fatalf("could not solve QR: %+v", err)
	}
	fmt.Printf("%.3f\n", mat.Formatted(c))
}

func Vandermonde(a []float64, d int) *mat.Dense {
	x := mat.NewDense(len(a), d, nil)
	for i := range a {
		for j, p := 0, 1.0; j < d; j, p = j+1, p*a[i] {
			x.Set(i, j, p)
		}
	}
	return x
}
Output:
⎡1.000⎤
⎢2.000⎥
⎣3.000⎦

Library go.matrix

Least squares solution using QR decomposition and package go.matrix.

package main

import (
    "fmt"

    "github.com/skelterjohn/go.matrix"
)

var xGiven = []float64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
var yGiven = []float64{1, 6, 17, 34, 57, 86, 121, 162, 209, 262, 321}
var degree = 2

func main() {
    m := len(yGiven)
    n := degree + 1
    y := matrix.MakeDenseMatrix(yGiven, m, 1)
    x := matrix.Zeros(m, n)
    for i := 0; i < m; i++ {
        ip := float64(1)
        for j := 0; j < n; j++ {
            x.Set(i, j, ip)
            ip *= xGiven[i]
        }
    }

    q, r := x.QR()
    qty, err := q.Transpose().Times(y)
    if err != nil {
        fmt.Println(err)
        return
    }
    c := make([]float64, n)
    for i := n - 1; i >= 0; i-- {
        c[i] = qty.Get(i, 0)
        for j := i + 1; j < n; j++ {
            c[i] -= c[j] * r.Get(i, j)
        }
        c[i] /= r.Get(i, i)
    }
    fmt.Println(c)
}
Output:

(lowest order coefficient first)

[0.9999999999999758 2.000000000000015 2.999999999999999]

Haskell

Uses module Matrix.LU from hackageDB DSP

import Data.List
import Data.Array
import Control.Monad
import Control.Arrow
import Matrix.LU

ppoly p x = map (x**) p

polyfit d ry = elems $ solve mat vec  where
   mat = listArray ((1,1), (d,d)) $ liftM2 concatMap ppoly id [0..fromIntegral $ pred d]
   vec = listArray (1,d) $ take d ry
Output:

in GHCi

*Main> polyfit 3 [1,6,17,34,57,86,121,162,209,262,321]
[1.0,2.0,3.0]

HicEst

REAL :: n=10, x(n), y(n), m=3, p(m)

   x = (0,  1,  2,  3,  4,  5,  6,   7,   8,   9,   10)
   y = (1,  6,  17, 34, 57, 86, 121, 162, 209, 262, 321)

   p = 2 ! initial guess for the polynom's coefficients

   SOLVE(NUL=Theory()-y(nr), Unknown=p, DataIdx=nr, Iters=iterations)

   WRITE(ClipBoard, Name) p, iterations

FUNCTION Theory()
   ! called by the solver of the SOLVE function. All variables are global
   Theory = p(1)*x(nr)^2 + p(2)*x(nr) + p(3)
 END
Output:
SOLVE performs a (nonlinear) least-square fit (Levenberg-Marquardt): 
p(1)=2.997135145; p(2)=2.011348347; p(3)=0.9906627242; iterations=19;

Hy

(import [numpy [polyfit]])

(setv x (range 11))
(setv y [1 6 17 34 57 86 121 162 209 262 321])

(print (polyfit x y 2))

J

   Y=:1 6 17 34 57 86 121 162 209 262 321
   (%. ^/~@x:@i.@#) Y
1 2 3 0 0 0 0 0 0 0 0

Note that this implementation does not use floating point numbers, so we do not introduce floating point errors. Using exact arithmetic has a speed penalty, but for small problems like this it is inconsequential.

The above solution fits a polynomial of order 11 (or, more specifically, a polynomial whose order matches the length of its argument sequence). If the order of the polynomial is known to be 3 (as is implied in the task description) then the following solution is probably preferable:

   Y %. (i.3) ^/~ i.#Y
1 2 3

(note that this time we used floating point numbers, so that result is approximate rather than exact - it only looks exact because of how J displays floating point numbers (by default, J assumes six digits of accuracy) - changing (i.3) to (x:i.3) would give us an exact result, if that mattered.)

Java

Translation of: D
Works with: Java version 8
import java.util.Arrays;
import java.util.function.IntToDoubleFunction;
import java.util.stream.IntStream;

public class PolynomialRegression {
    private static void polyRegression(int[] x, int[] y) {
        int n = x.length;
        double xm = Arrays.stream(x).average().orElse(Double.NaN);
        double ym = Arrays.stream(y).average().orElse(Double.NaN);
        double x2m = Arrays.stream(x).map(a -> a * a).average().orElse(Double.NaN);
        double x3m = Arrays.stream(x).map(a -> a * a * a).average().orElse(Double.NaN);
        double x4m = Arrays.stream(x).map(a -> a * a * a * a).average().orElse(Double.NaN);
        double xym = 0.0;
        for (int i = 0; i < x.length && i < y.length; ++i) {
            xym += x[i] * y[i];
        }
        xym /= Math.min(x.length, y.length);
        double x2ym = 0.0;
        for (int i = 0; i < x.length && i < y.length; ++i) {
            x2ym += x[i] * x[i] * y[i];
        }
        x2ym /= Math.min(x.length, y.length);

        double sxx = x2m - xm * xm;
        double sxy = xym - xm * ym;
        double sxx2 = x3m - xm * x2m;
        double sx2x2 = x4m - x2m * x2m;
        double sx2y = x2ym - x2m * ym;

        double b = (sxy * sx2x2 - sx2y * sxx2) / (sxx * sx2x2 - sxx2 * sxx2);
        double c = (sx2y * sxx - sxy * sxx2) / (sxx * sx2x2 - sxx2 * sxx2);
        double a = ym - b * xm - c * x2m;

        IntToDoubleFunction abc = (int xx) -> a + b * xx + c * xx * xx;

        System.out.println("y = " + a + " + " + b + "x + " + c + "x^2");
        System.out.println(" Input  Approximation");
        System.out.println(" x   y     y1");
        for (int i = 0; i < n; ++i) {
            System.out.printf("%2d %3d  %5.1f\n", x[i], y[i], abc.applyAsDouble(x[i]));
        }
    }

    public static void main(String[] args) {
        int[] x = IntStream.range(0, 11).toArray();
        int[] y = new int[]{1, 6, 17, 34, 57, 86, 121, 162, 209, 262, 321};
        polyRegression(x, y);
    }
}
Output:
y = 1.0 + 2.0x + 3.0x^2
 Input  Approximation
 x   y     y1
 0   1    1.0
 1   6    6.0
 2  17   17.0
 3  34   34.0
 4  57   57.0
 5  86   86.0
 6 121  121.0
 7 162  162.0
 8 209  209.0
 9 262  262.0
10 321  321.0

jq

Adapted from Wren

Works with jq, the C implementation of jq

Works with gojq, the Go implementation of jq

Works with jaq, the Rust implementation of jq

def mean: add/length;

def inner_product($y):
  . as $x
  | reduce range(0; length) as $i (0; . + ($x[$i] * $y[$i]));

# $x and $y should be arrays of the same length
# Emit { a, b, c, z}
# Attempt to avoid overflow
def polynomialRegression($x; $y):
  ($x | length) as $length
  | ($length * $length) as $l2
  | ($x  | map(./$length)) as $xs
  | ($xs | add) as $xm
  | ($y  | mean) as $ym
  | ($xs | map(. * .) | add * $length) as $x2m
  | ($x  | map( (./$length) * . * .) | add) as $x3m
  | ($xs | map(. * . | (.*.) ) | add * $l2 * $length) as $x4m
  | ($xs | inner_product($y)) as $xym
  | ($xs | map(. * .) | inner_product($y) * $length) as $x2ym
  
  | ($x2m - $xm * $xm) as $sxx
  | ($xym - $xm * $ym) as $sxy
  | ($x3m - $xm * $x2m) as $sxx2
  | ($x4m - $x2m * $x2m) as $sx2x2
  | ($x2ym - $x2m * $ym) as $sx2y  
  | {z:  ([$x,$y] | transpose) }
  | .b = ($sxy * $sx2x2 - $sx2y * $sxx2) / ($sxx * $sx2x2 - $sxx2 * $sxx2)
  | .c = ($sx2y * $sxx - $sxy * $sxx2) / ($sxx * $sx2x2 - $sxx2 * $sxx2)
  | .a = $ym - .b * $xm - .c * $x2m ;

# Input: {a,b,c,z}
def report:
  def lpad($len): tostring | ($len - length) as $l | (" " * $l) + .;
  def abc($x):  .a + .b * $x + .c * $x * $x;
  def print($p): "\($p[0] | lpad(3)) \($p[1] | lpad(4)) \(abc($p[0]) | lpad(5))";
  
  "y = \(.a) + \(.b)x + \(.c)x^2\n",
  "  Input   Approximation",
  "  x    y     y\u0302",
   print(.z[]) ;
  
def x: [range(0;11)];
def y: [1, 6, 17, 34, 57, 86, 121, 162, 209, 262, 321];

polynomialRegression(x; y)
| report
Output:
y = 1 + 2x + 3x^2

  Input   Approximation
  x    y     ŷ
  0    1     1
  1    6     6
  2   17    17
  3   34    34
  4   57    57
  5   86    86
  6  121   121
  7  162   162
  8  209   209
  9  262   262
 10  321   321

Julia

Works with: Julia version 0.6

The least-squares fit problem for a degree n can be solved with the built-in backslash operator (coefficients in increasing order of degree):

polyfit(x::Vector, y::Vector, deg::Int) = collect(v ^ p for v in x, p in 0:deg) \ y

x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
y = [1, 6, 17, 34, 57, 86, 121, 162, 209, 262, 321]
@show polyfit(x, y, 2)
Output:
polyfit(x, y, 2) = [1.0, 2.0, 3.0]

Kotlin

Translation of: REXX
// version 1.1.51

fun polyRegression(x: IntArray, y: IntArray) {
    val xm = x.average()
    val ym = y.average()    
    val x2m = x.map { it * it }.average()
    val x3m = x.map { it * it * it }.average()
    val x4m = x.map { it * it * it * it }.average()
    val xym = x.zip(y).map { it.first * it.second }.average()
    val x2ym = x.zip(y).map { it.first * it.first * it.second }.average()

    val sxx = x2m - xm * xm
    val sxy = xym - xm * ym
    val sxx2 = x3m - xm * x2m
    val sx2x2 = x4m - x2m * x2m
    val sx2y = x2ym - x2m * ym

    val b = (sxy * sx2x2 - sx2y * sxx2) / (sxx * sx2x2 - sxx2 * sxx2)
    val c = (sx2y * sxx - sxy * sxx2) / (sxx * sx2x2 - sxx2 * sxx2)
    val a = ym - b * xm - c * x2m

    fun abc(xx: Int) = a + b * xx + c * xx * xx

    println("y = $a + ${b}x + ${c}x^2\n")
    println(" Input  Approximation")
    println(" x   y     y1")
    for ((xi, yi) in x zip y) {
        System.out.printf("%2d %3d  %5.1f\n", xi, yi, abc(xi))
    }
}

fun main() {
    val x = IntArray(11) { it }
    val y = intArrayOf(1, 6, 17, 34, 57, 86, 121, 162, 209, 262, 321)
    polyRegression(x, y)
}
Output:
y = 1.0 + 2.0x + 3.0x^2

 Input  Approximation
 x   y     y1
 0   1    1.0
 1   6    6.0
 2  17   17.0
 3  34   34.0
 4  57   57.0
 5  86   86.0
 6 121  121.0
 7 162  162.0
 8 209  209.0
 9 262  262.0
10 321  321.0

Lua

Translation of: Modula-2
function eval(a,b,c,x)
    return a + (b + c * x) * x
end

function regression(xa,ya)
    local n = #xa

    local xm = 0.0
    local ym = 0.0
    local x2m = 0.0
    local x3m = 0.0
    local x4m = 0.0
    local xym = 0.0
    local x2ym = 0.0

    for i=1,n do
        xm = xm + xa[i]
        ym = ym + ya[i]
        x2m = x2m + xa[i] * xa[i]
        x3m = x3m + xa[i] * xa[i] * xa[i]
        x4m = x4m + xa[i] * xa[i] * xa[i] * xa[i]
        xym = xym + xa[i] * ya[i]
        x2ym = x2ym + xa[i] * xa[i] * ya[i]
    end
    xm = xm / n
    ym = ym / n
    x2m = x2m / n
    x3m = x3m / n
    x4m = x4m / n
    xym = xym / n
    x2ym = x2ym / n

    local sxx = x2m - xm * xm
    local sxy = xym - xm * ym
    local sxx2 = x3m - xm * x2m
    local sx2x2 = x4m - x2m * x2m
    local sx2y = x2ym - x2m * ym

    local b = (sxy * sx2x2 - sx2y * sxx2) / (sxx * sx2x2 - sxx2 * sxx2)
    local c = (sx2y * sxx - sxy * sxx2) / (sxx * sx2x2 - sxx2 * sxx2)
    local a = ym - b * xm - c * x2m

    print("y = "..a.." + "..b.."x + "..c.."x^2")
    
    for i=1,n do
        print(string.format("%2d %3d  %3d", xa[i], ya[i], eval(a, b, c, xa[i])))
    end
end

local xa = {0, 1,  2,  3,  4,  5,   6,   7,   8,   9,  10}
local ya = {1, 6, 17, 34, 57, 86, 121, 162, 209, 262, 321}
regression(xa, ya)
Output:
y = 1 + 2x + 3x^2
 0   1    1
 1   6    6
 2  17   17
 3  34   34
 4  57   57
 5  86   86
 6 121  121
 7 162  162
 8 209  209
 9 262  262
10 321  321

Maple

with(CurveFitting);
PolynomialInterpolation([[0, 1], [1, 6], [2, 17], [3, 34], [4, 57], [5, 86], [6, 121], [7, 162], [8, 209], [9, 262], [10, 321]], 'x');

Result:

3*x^2+2*x+1

Mathematica /Wolfram Language

Using the built-in "Fit" function.

data = Transpose@{Range[0, 10], {1, 6, 17, 34, 57, 86, 121, 162, 209, 262, 321}};
Fit[data, {1, x, x^2}, x]

Second version: using built-in "InterpolatingPolynomial" function.

Simplify@InterpolatingPolynomial[{{0, 1}, {1, 6}, {2, 17}, {3, 34}, {4, 57}, {5, 86}, {6, 121}, {7, 162}, {8, 209}, {9, 262}, {10, 321}}, x]

WolframAlpha version:

curve fit  (0,1),  (1,6),  (2,17),  (3,34),  (4,57),  (5,86),  (6,121),   (7,162),   (8,209),   (9,262),   (10,321)

Result:

1 + 2x + 3x^2

MATLAB

Matlab has a built-in function "polyfit(x,y,n)" which performs this task. The arguments x and y are vectors which are parametrized by the index suck that   and the argument n is the order of the polynomial you want to fit. The output of this function is the coefficients of the polynomial which best fit these x,y value pairs.

>> x = [0,  1,  2,  3,  4,  5,  6,   7,   8,   9,   10];
>> y = [1,  6,  17, 34, 57, 86, 121, 162, 209, 262, 321];
>> polyfit(x,y,2)

ans =

   2.999999999999998   2.000000000000019   0.999999999999956

МК-61/52

Part 1:

ПC	С/П	ПD	ИП9	+	П9	ИПC	ИП5	+	П5
ИПC	x^2	П2	ИП6	+	П6	ИП2	ИПC	*	ИП7
+	П7	ИП2	x^2	ИП8	+	П8	ИПC	ИПD	*
ИПA	+	ПA	ИП2	ИПD	*	ИПB	+	ПB	ИПD
КИП4	С/П	БП	00

Input: В/О x1 С/П y1 С/П x2 С/П y2 С/П ...

Part 2:

ИП5	ПC	ИП6	ПD	П2	ИП7	П3	ИП4	ИПD	*
ИПC	ИП5	*	-	ПD	ИП4	ИП7	*	ИПC	ИП6
*	-	П7	ИП4	ИПA	*	ИПC	ИП9	*	-
ПA	ИП4	ИП3	*	ИП2	ИП5	*	-	П3	ИП4
ИП8	*	ИП2	ИП6	*	-	П8	ИП4	ИПB	*
ИП2	ИП9	*	-	ИПD	*	ИП3	ИПA	*	-
ИПD	ИП8	*	ИП7	ИП3	*	-	/	ПB	ИПA
ИПB	ИП7	*	-	ИПD	/	ПA	ИП9	ИПB	ИП6
*	-	ИПA	ИП5	*	-	ИП4	/	П9	С/П

Result: Р9 = a0, РA = a1, РB = a2.

Modula-2

MODULE PolynomialRegression;
FROM FormatString IMPORT FormatString;
FROM RealStr IMPORT RealToStr;
FROM Terminal IMPORT WriteString,WriteLn,ReadChar;

PROCEDURE Eval(a,b,c,x : REAL) : REAL;
BEGIN
    RETURN a + b*x + c*x*x;
END Eval;

PROCEDURE Regression(x,y : ARRAY OF INTEGER);
VAR
    n,i : INTEGER;
    xm,x2m,x3m,x4m : REAL;
    ym : REAL;
    xym,x2ym : REAL;
    sxx,sxy,sxx2,sx2x2,sx2y : REAL;
    a,b,c : REAL;
    buf : ARRAY[0..63] OF CHAR;
BEGIN
    n := SIZE(x)/SIZE(INTEGER);

    xm := 0.0;
    ym := 0.0;
    x2m := 0.0;
    x3m := 0.0;
    x4m := 0.0;
    xym := 0.0;
    x2ym := 0.0;
    FOR i:=0 TO n-1 DO
        xm := xm + FLOAT(x[i]);
        ym := ym + FLOAT(y[i]);
        x2m := x2m + FLOAT(x[i]) * FLOAT(x[i]);
        x3m := x3m + FLOAT(x[i]) * FLOAT(x[i]) * FLOAT(x[i]);
        x4m := x4m + FLOAT(x[i]) * FLOAT(x[i]) * FLOAT(x[i]) * FLOAT(x[i]);
        xym := xym + FLOAT(x[i]) * FLOAT(y[i]);
        x2ym := x2ym + FLOAT(x[i]) * FLOAT(x[i]) * FLOAT(y[i]);
    END;
    xm := xm / FLOAT(n);
    ym := ym / FLOAT(n);
    x2m := x2m / FLOAT(n);
    x3m := x3m / FLOAT(n);
    x4m := x4m / FLOAT(n);
    xym := xym / FLOAT(n);
    x2ym := x2ym / FLOAT(n);

    sxx := x2m - xm * xm;
    sxy := xym - xm * ym;
    sxx2 := x3m - xm * x2m;
    sx2x2 := x4m - x2m * x2m;
    sx2y := x2ym - x2m * ym;

    b := (sxy * sx2x2 - sx2y * sxx2) / (sxx * sx2x2 - sxx2 * sxx2);
    c := (sx2y * sxx - sxy * sxx2) / (sxx * sx2x2 - sxx2 * sxx2);
    a := ym - b * xm - c * x2m;

    WriteString("y = ");
    RealToStr(a, buf);
    WriteString(buf);
    WriteString(" + ");
    RealToStr(b, buf);
    WriteString(buf);
    WriteString("x + ");
    RealToStr(c, buf);
    WriteString(buf);
    WriteString("x^2");
    WriteLn;

    FOR i:=0 TO n-1 DO
        FormatString("%2i %3i  ", buf, x[i], y[i]);
        WriteString(buf);
        RealToStr(Eval(a,b,c,FLOAT(x[i])), buf);
        WriteString(buf);
        WriteLn;
    END;
END Regression;

TYPE R = ARRAY[0..10] OF INTEGER;
VAR
    x,y : R;
BEGIN
    x := R{0,1,2,3,4,5,6,7,8,9,10};
    y := R{1,6,17,34,57,86,121,162,209,262,321};
    Regression(x,y);

    ReadChar;
END PolynomialRegression.

Nim

Translation of: Kotlin
import lenientops, sequtils, stats, strformat

proc polyRegression(x, y: openArray[int]) =

  let xm = mean(x)
  let ym = mean(y)
  let x2m = mean(x.mapIt(it * it))
  let x3m = mean(x.mapIt(it * it * it))
  let x4m = mean(x.mapIt(it * it * it * it))
  let xym = mean(zip(x, y).mapIt(it[0] * it[1]))
  let x2ym = mean(zip(x, y).mapIt(it[0] * it[0] * it[1]))

  let sxx = x2m - xm * xm
  let sxy = xym - xm * ym
  let sxx2 = x3m - xm * x2m
  let sx2x2 = x4m - x2m * x2m
  let sx2y = x2ym - x2m * ym

  let b = (sxy * sx2x2 - sx2y * sxx2) / (sxx * sx2x2 - sxx2 * sxx2)
  let c = (sx2y * sxx - sxy * sxx2) / (sxx * sx2x2 - sxx2 * sxx2)
  let a = ym - b * xm - c * x2m

  func abc(x: int): float = a + b * x + c * x * x

  echo &"y = {a} + {b}x + {c}x²\n"
  echo " Input  Approximation"
  echo " x   y     y1"
  for (xi, yi) in zip(x, y):
    echo &"{xi:2} {yi:3}  {abc(xi):5}"


let x = toSeq(0..10)
let y = [1, 6, 17, 34, 57, 86, 121, 162, 209, 262, 321]
polyRegression(x, y)
Output:
y = 1.0 + 2.0x + 3.0x²

 Input  Approximation
 x   y     y1
 0   1      1
 1   6      6
 2  17     17
 3  34     34
 4  57     57
 5  86     86
 6 121    121
 7 162    162
 8 209    209
 9 262    262
10 321    321

OCaml

Translation of: Kotlin
Library: Base
open Base
open Stdio

let mean fa =
  let open Float in
  (Array.reduce_exn fa ~f:(+)) / (of_int (Array.length fa))

let regression xs ys =
  let open Float in
  let xm = mean xs in
  let ym = mean ys in
  let x2m = Array.map xs ~f:(fun x -> x * x) |> mean in
  let x3m = Array.map xs ~f:(fun x -> x * x * x) |> mean in
  let x4m = Array.map xs ~f:(fun x -> let x2 = x * x in x2 * x2) |> mean in
  let xzipy = Array.zip_exn xs ys in
  let xym = Array.map xzipy ~f:(fun (x, y) -> x * y) |> mean in
  let x2ym = Array.map xzipy ~f:(fun (x, y) -> x * x * y) |> mean in

  let sxx = x2m - xm * xm in
  let sxy = xym - xm * ym in
  let sxx2 = x3m - xm * x2m in
  let sx2x2 = x4m - x2m * x2m in
  let sx2y = x2ym - x2m * ym in

  let b = (sxy * sx2x2 - sx2y * sxx2) / (sxx * sx2x2 - sxx2 * sxx2) in
  let c = (sx2y * sxx - sxy * sxx2) / (sxx * sx2x2 - sxx2 * sxx2) in
  let a = ym - b * xm - c * x2m in

  let abc xx = a + b * xx + c * xx * xx in

  printf "y = %.1f + %.1fx + %.1fx^2\n\n" a b c;
  printf " Input  Approximation\n";
  printf " x   y     y1\n";
  Array.iter xzipy ~f:(fun (xi, yi) ->
    printf "%2g %3g  %5.1f\n" xi yi (abc xi)
  )

let () =
  let x = Array.init 11 ~f:Float.of_int in
  let y = [| 1.; 6.; 17.; 34.; 57.; 86.; 121.; 162.; 209.; 262.; 321. |] in
  regression x y
Output:
y = 1.0 + 2.0x + 3.0x^2

 Input  Approximation
 x   y     y1
 0   1    1.0
 1   6    6.0
 2  17   17.0
 3  34   34.0
 4  57   57.0
 5  86   86.0
 6 121  121.0
 7 162  162.0
 8 209  209.0
 9 262  262.0
10 321  321.0

Octave

x = [0:10];
y = [1,   6,  17,  34,  57,  86, 121, 162, 209, 262, 321];
coeffs = polyfit(x, y, 2)

PARI/GP

Lagrange interpolating polynomial:

polinterpolate([0,1,2,3,4,5,6,7,8,9,10],[1,6,17,34,57,86,121,162,209,262,321])

In newer versions, this can be abbreviated:

polinterpolate([0..10],[1,6,17,34,57,86,121,162,209,262,321])
Output:
3*x^2 + 2*x + 1

Least-squares fit:

V=[1,6,17,34,57,86,121,162,209,262,321]~;
M=matrix(#V,3,i,j,(i-1)^(j-1));Polrev(matsolve(M~*M,M~*V))

Code thanks to Bill Allombert

Output:
3*x^2 + 2*x + 1

Least-squares polynomial fit in its own function:

lsf(X,Y,n)=my(M=matrix(#X,n+1,i,j,X[i]^(j-1))); Polrev(matsolve(M~*M,M~*Y~))
lsf([0..10], [1,6,17,34,57,86,121,162,209,262,321], 2)

Perl

This code identical to that of Multiple regression task.

use strict;
use warnings;
use Statistics::Regression;

my @x = <0 1 2 3 4 5 6 7 8 9 10>;
my @y = <1 6 17 34 57 86 121 162 209 262 321>;

my @model = ('const', 'X', 'X**2');

my $reg = Statistics::Regression->new( '', [@model] );
$reg->include( $y[$_], [ 1.0, $x[$_], $x[$_]**2 ]) for 0..@y-1;
my @coeff = $reg->theta();

printf "%-6s %8.3f\n", $model[$_], $coeff[$_] for 0..@model-1;
Output:
const     1.000
X         2.000
X**2      3.000

PDL Alternative:

#!/usr/bin/perl -w
use strict;

use PDL;
use PDL::Math;
use PDL::Fit::Polynomial;

my $x = float [0,  1,  2,  3,  4,  5,  6,   7,   8,   9,   10];
my $y = float [1,  6,  17, 34, 57, 86, 121, 162, 209, 262, 321];
# above will output:  3.00000037788248 * $x**2 + 1.99999750988868 * $x + 1.00000180493936

# $x = float [ 0,   1,   2,    3,    4,    5,    6,     7,     8,     9];
# $y = float [ 2.7, 2.8, 31.4, 38.1, 58.0, 76.2, 100.5, 130.0, 149.3, 180.0];
# above correctly returns: " 1.08484845125187 * $x**2 + 10.3551513321297 * $x-0.616363852007752 "

my ($yfit, $coeffs) = fitpoly1d $x, $y, 3;      # 3rd degree

foreach (reverse(0..$coeffs->dim(0)-1)) {
  print " +" unless(($coeffs->at($_) <0) || $_==$coeffs->dim(0)-1); # let the unary minus replace the + operator
  print " ";
  print $coeffs->at($_);
  print " * \$x" if($_);
  print "**$_" if($_>1);
  print "\n" unless($_)
}
Output:
 3.00000037788248 * $x**2 + 1.99999750988868 * $x + 1.00000180493936

Phix

Translation of: REXX
Library: Phix/online
Library: Phix/pGUI

You can run this online here.

-- demo\rosetta\Polynomial_regression.exw
with javascript_semantics
constant x = {0,1,2,3,4,5,6,7,8,9,10},
         y = {1,6,17,34,57,86,121,162,209,262,321},
         n = length(x)
 
function regression()
    atom xm = 0, ym = 0, x2m = 0, x3m = 0, x4m = 0, xym = 0, x2ym = 0
    for i=1 to n do
        atom xi = x[i],
             yi = y[i]
        xm += xi
        ym += yi
        x2m += power(xi,2)
        x3m += power(xi,3)
        x4m += power(xi,4)
        xym += xi*yi
        x2ym += power(xi,2)*yi
    end for
    xm /= n
    ym /= n
    x2m /= n
    x3m /= n
    x4m /= n
    xym /= n
    x2ym /= n
    atom Sxx = x2m-power(xm,2),
         Sxy = xym-xm*ym,
         Sxx2 = x3m-xm*x2m,
         Sx2x2 = x4m-power(x2m,2),
         Sx2y = x2ym-x2m*ym,
         B = (Sxy*Sx2x2-Sx2y*Sxx2)/(Sxx*Sx2x2-power(Sxx2,2)),
         C = (Sx2y*Sxx-Sxy*Sxx2)/(Sxx*Sx2x2-power(Sxx2,2)),
         A = ym-B*xm-C*x2m
    return {C,B,A}
end function
 
atom {a,b,c} = regression()
 
function f(atom x)
    return a*x*x+b*x+c
end function
 
printf(1,"y=%gx^2+%gx+%g\n",{a,b,c})
printf(1,"\n  x   y  f(x)\n")
for i=1 to n do
    printf(1," %2d %3d   %3g\n",{x[i],y[i],f(x[i])})
end for

-- And a simple plot (re-using x,y from above)

include pGUI.e
include IupGraph.e
 
function get_data(Ihandle graph)
    integer {w,h} = IupGetIntInt(graph,"DRAWSIZE")
    IupSetInt(graph,"YTICK",iff(h<240?iff(h<150?80:40):20))
    return {{x,y,CD_RED}}
end function

IupOpen()
Ihandle graph = IupGraph(get_data,"RASTERSIZE=640x440")
IupSetAttributes(graph,"XTICK=1,XMIN=0,XMAX=10")
IupSetAttributes(graph,"YTICK=20,YMIN=0,YMAX=320")
Ihandle dlg = IupDialog(graph,`TITLE="simple plot"`)
IupSetAttributes(dlg,"MINSIZE=245x150")
IupShow(dlg)
if platform()!=JS then 
     IupMainLoop()
     IupClose()
end if
Output:

(plus a simple graphical plot, as per Racket)

y=3x^2+2x+1

  x   y  f(x)
  0   1     1
  1   6     6
  2  17    17
  3  34    34
  4  57    57
  5  86    86
  6 121   121
  7 162   162
  8 209   209
  9 262   262
 10 321   321

PowerShell

function qr([double[][]]$A) {
    $m,$n = $A.count, $A[0].count
    $pm,$pn = ($m-1), ($n-1)
    [double[][]]$Q = 0..($m-1) | foreach{$row = @(0) * $m; $row[$_] = 1; ,$row} 
    [double[][]]$R = $A | foreach{$row = $_; ,@(0..$pn | foreach{$row[$_]})}
    foreach ($h in 0..$pn) { 
        [double[]]$u = $R[$h..$pm] | foreach{$_[$h]} 
        [double]$nu = $u | foreach {[double]$sq = 0} {$sq += $_*$_} {[Math]::Sqrt($sq)} 
        $u[0] -= if ($u[0] -lt 1) {$nu} else {-$nu}
        [double]$nu = $u | foreach {$sq = 0} {$sq += $_*$_} {[Math]::Sqrt($sq)} 
        [double[]]$u = $u | foreach { $_/$nu}
        [double[][]]$v = 0..($u.Count - 1) | foreach{$i = $_; ,($u | foreach{2*$u[$i]*$_})}
        [double[][]]$CR = $R | foreach{$row = $_; ,@(0..$pn | foreach{$row[$_]})}
        [double[][]]$CQ = $Q | foreach{$row = $_; ,@(0..$pm | foreach{$row[$_]})}
        foreach ($i in  $h..$pm) {
            foreach ($j in  $h..$pn) {
                $R[$i][$j] -=  $h..$pm | foreach {[double]$sum = 0} {$sum += $v[$i-$h][$_-$h]*$CR[$_][$j]} {$sum}
            }
        }
        if (0 -eq $h)  {
            foreach ($i in  $h..$pm) {
                foreach ($j in  $h..$pm) {
                    $Q[$i][$j] -=  $h..$pm | foreach {$sum = 0} {$sum += $v[$i][$_]*$CQ[$_][$j]} {$sum}
                }
            }
        } else  {
            $p = $h-1
            foreach ($i in  $h..$pm) {
                foreach ($j in  0..$p) {
                    $Q[$i][$j] -=  $h..$pm | foreach {$sum = 0} {$sum += $v[$i-$h][$_-$h]*$CQ[$_][$j]} {$sum}
                }
                foreach ($j in  $h..$pm) {
                    $Q[$i][$j] -=  $h..$pm | foreach {$sum = 0} {$sum += $v[$i-$h][$_-$h]*$CQ[$_][$j]} {$sum}
                }
            }
        }
    }
    foreach ($i in  0..$pm) {
        foreach ($j in  $i..$pm) {$Q[$i][$j],$Q[$j][$i] = $Q[$j][$i],$Q[$i][$j]}
    }
    [PSCustomObject]@{"Q" = $Q; "R" = $R}
}

function leastsquares([Double[][]]$A,[Double[]]$y) {
    $QR = qr $A
    [Double[][]]$Q = $QR.Q
    [Double[][]]$R = $QR.R
    $m,$n = $A.count, $A[0].count
    [Double[]]$z = foreach ($j in  0..($m-1)) { 
            0..($m-1) | foreach {$sum = 0} {$sum += $Q[$_][$j]*$y[$_]} {$sum}
    }
    [Double[]]$x = @(0)*$n
    for ($i = $n-1; $i -ge 0; $i--) {
        for ($j = $i+1; $j -lt $n; $j++) {
            $z[$i] -= $x[$j]*$R[$i][$j]
        }
        $x[$i] = $z[$i]/$R[$i][$i]
    }
    $x
}

function polyfit([Double[]]$x,[Double[]]$y,$n) {
    $m = $x.Count 
    [Double[][]]$A = 0..($m-1) | foreach{$row = @(1) * ($n+1); ,$row} 
    for ($i = 0; $i -lt $m; $i++) {
        for ($j = $n-1; 0 -le $j; $j--) {
            $A[$i][$j] = $A[$i][$j+1]*$x[$i]
        }
    }
    leastsquares $A $y
}

function show($m) {$m | foreach {write-host "$_"}}

$A = @(@(12,-51,4), @(6,167,-68), @(-4,24,-41))
$x = @(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
$y = @(1, 6, 17, 34, 57, 86, 121, 162, 209, 262, 321)
"polyfit "
"X^2 X constant"
"$(polyfit $x $y 2)"
Output:
polyfit 
X^2 X constant
3 1.99999999999998 1.00000000000005

Python

Library: NumPy
>>> x = [0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10]
>>> y = [1,   6,  17,  34,  57,  86, 121, 162, 209, 262, 321]
>>> coeffs = numpy.polyfit(x,y,deg=2)
>>> coeffs
array([ 3.,  2.,  1.])

Substitute back received coefficients.

>>> yf = numpy.polyval(numpy.poly1d(coeffs), x)
>>> yf
array([   1.,    6.,   17.,   34.,   57.,   86.,  121.,  162.,  209., 262.,  321.])

Find max absolute error:

>>> '%.1g' % max(y-yf)
'1e-013'

Example

For input arrays `x' and `y':

>>> x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
>>> y = [2.7, 2.8, 31.4, 38.1, 58.0, 76.2, 100.5, 130.0, 149.3, 180.0]
>>> p = numpy.poly1d(numpy.polyfit(x, y, deg=2), variable='N')
>>> print p
       2
1.085 N + 10.36 N - 0.6164

Thus we confirm once more that for already sorted sequences the considered quick sort implementation has quadratic dependence on sequence length (see Example section for Python language on Query Performance page).

R

The easiest (and most robust) approach to solve this in R is to use the base package's lm function which will find the least squares solution via a QR decomposition:

x <- c(0,  1,  2,  3,  4,  5,  6,   7,   8,   9,   10)
y <- c(1,  6,  17, 34, 57, 86, 121, 162, 209, 262, 321)
coef(lm(y ~ x + I(x^2)))
Output:
(Intercept)           x      I(x^2) 
          1           2           3 

Alternately, use poly:

coef(lm(y ~ poly(x, 2, raw=T)))
Output:
         (Intercept) poly(x, 2, raw = T)1 poly(x, 2, raw = T)2 
                   1                    2                    3

Racket

#lang racket
(require math plot)

(define xs '(0 1  2  3  4  5   6   7   8   9  10))
(define ys '(1 6 17 34 57 86 121 162 209 262 321))

(define (fit x y n)
  (define Y (->col-matrix y))
  (define V (vandermonde-matrix x (+ n 1)))
  (define VT (matrix-transpose V))
  (matrix->vector (matrix-solve (matrix* VT V) (matrix* VT Y))))
  
(define ((poly v) x)
  (for/sum ([c v] [i (in-naturals)])
    (* c (expt x i))))

(plot (list (points   (map vector xs ys))
            (function (poly (fit xs ys 2)))))
Output:

 

Raku

(formerly Perl 6) We'll use a Clifford algebra library. Very slow.

Rationale (in French for some reason):

Le système d'équations peut s'écrire :  , où on cherche  . On considère   et on répartit chaque équation sur chaque dimension:

 

Posons alors :

 

Le système d'équations devient :  .

D'où :  

use MultiVector;

constant @x1 = <0 1 2 3 4 5 6 7 8 9 10>;
constant @y = <1 6 17 34 57 86 121 162 209 262 321>;

constant $x0 = [+] @e[^@x1];
constant $x1 = [+] @x1 Z* @e;
constant $x2 = [+] @x1 »**» 2  Z* @e;

constant $y  = [+] @y Z* @e;

.say for
  $y$x1$x2/($x0$x1$x2),
  $y$x2$x0/($x1$x2$x0),
  $y$x0$x1/($x2$x0$x1);
Output:
1
2
3

REXX

/* REXX ---------------------------------------------------------------
* Implementation of http://keisan.casio.com/exec/system/14059932254941
*--------------------------------------------------------------------*/
xl='0 1  2  3  4  5   6   7   8   9  10'
yl='1 6 17 34 57 86 121 162 209 262 321'
n=11
Do i=1 To n
  Parse Var xl x.i xl
  Parse Var yl y.i yl
  End
xm=0
ym=0
x2m=0
x3m=0
x4m=0
xym=0
x2ym=0
Do i=1 To n
  xm=xm+x.i
  ym=ym+y.i
  x2m=x2m+x.i**2
  x3m=x3m+x.i**3
  x4m=x4m+x.i**4
  xym=xym+x.i*y.i
  x2ym=x2ym+(x.i**2)*y.i
  End
xm =xm /n
ym =ym /n
x2m=x2m/n
x3m=x3m/n
x4m=x4m/n
xym=xym/n
x2ym=x2ym/n
Sxx=x2m-xm**2
Sxy=xym-xm*ym
Sxx2=x3m-xm*x2m
Sx2x2=x4m-x2m**2
Sx2y=x2ym-x2m*ym
B=(Sxy*Sx2x2-Sx2y*Sxx2)/(Sxx*Sx2x2-Sxx2**2)
C=(Sx2y*Sxx-Sxy*Sxx2)/(Sxx*Sx2x2-Sxx2**2)
A=ym-B*xm-C*x2m
Say 'y='a'+'||b'*x+'c'*x**2'
Say ' Input  "Approximation"'
Say ' x   y     y1'
Do i=1 To 11
  Say right(x.i,2) right(y.i,3) format(fun(x.i),5,3)
  End
Exit
fun:
  Parse Arg x
  Return a+b*x+c*x**2
Output:
y=1+2*x+3*x**2
 Input  "Approximation"
 x   y     y1
 0   1     1.000
 1   6     6.000
 2  17    17.000
 3  34    34.000
 4  57    57.000
 5  86    86.000
 6 121   121.000
 7 162   162.000
 8 209   209.000
 9 262   262.000
10 321   321.000

RPL

Translation of: Ada
≪ 1 + → x y n 
   ≪ { } n + x SIZE + 0 CON
      1 x SIZE FOR j
         1 n FOR k
            { } k + j + x j GET k 1 - ^ PUT
      NEXT NEXT
      DUP y * SWAP DUP TRN * /
      @ the following lines convert the resulting vector into a polynomial equation
      DUP 'x' STO 1 GET
      2 x SIZE FOR j 'X' * x j GET + NEXT
      EXPAN COLCT
≫ ≫ 'FIT' STO
[1 2 3 4 5 6 7 8 9 10] [1 6 17 34 57 86 121 162 209 262 321] 2 FIT
Output:
1: '3+2*X+1*X^2'

Ruby

require 'matrix'

def regress x, y, degree
  x_data = x.map { |xi| (0..degree).map { |pow| (xi**pow).to_r } }

  mx = Matrix[*x_data]
  my = Matrix.column_vector(y)

  ((mx.t * mx).inv * mx.t * my).transpose.to_a[0].map(&:to_f)
end

Testing:

p regress([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10],
          [1, 6, 17, 34, 57, 86, 121, 162, 209, 262, 321],
          2)
Output:
[1.0, 2.0, 3.0]

Scala

Output:

See it yourself by running in your browser Scastie (remote JVM).

Works with: Scala version 2.13
object PolynomialRegression extends App {
  private def xy = Seq(1, 6, 17, 34, 57, 86, 121, 162, 209, 262, 321).zipWithIndex.map(_.swap)

  private def polyRegression(xy: Seq[(Int, Int)]): Unit = {
    val r = xy.indices

    def average[U](ts: Iterable[U])(implicit num: Numeric[U]) = num.toDouble(ts.sum) / ts.size

    def x3m: Double = average(r.map(a => a * a * a))
    def x4m: Double = average(r.map(a => a * a * a * a))
    def x2ym = xy.reduce((a, x) => (a._1 + x._1 * x._1 * x._2, 0))._1.toDouble / xy.size
    def xym = xy.reduce((a, x) => (a._1 + x._1 * x._2, 0))._1.toDouble / xy.size

    val x2m: Double = average(r.map(a => a * a))
    val (xm, ym) = (average(xy.map(_._1)), average(xy.map(_._2)))
    val (sxx, sxy) = (x2m - xm * xm, xym - xm * ym)
    val sxx2: Double = x3m - xm * x2m
    val sx2x2: Double = x4m - x2m * x2m
    val sx2y: Double = x2ym - x2m * ym
    val c: Double = (sx2y * sxx - sxy * sxx2) / (sxx * sx2x2 - sxx2 * sxx2)
    val b: Double = (sxy * sx2x2 - sx2y * sxx2) / (sxx * sx2x2 - sxx2 * sxx2)
    val a: Double = ym - b * xm - c * x2m

    def abc(xx: Int) = a + b * xx + c * xx * xx

    println(s"y = $a + ${b}x + ${c}x^2")
    println(" Input  Approximation")
    println(" x   y     y1")
    xy.foreach {el => println(f"${el._1}%2d ${el._2}%3d  ${abc(el._1)}%5.1f")}
  }

  polyRegression(xy)

}

Sidef

Translation of: Ruby
func regress(x, y, degree) {
    var A = Matrix.build(x.len, degree+1, {|i,j|
        x[i]**j
    })

    var B = Matrix.column_vector(y...)
    ((A.transpose * A)**(-1) * A.transpose * B).transpose[0]
}

func poly(x) {
    3*x**2 + 2*x + 1
}

var coeff = regress(
    10.of { _ },
    10.of { poly(_) },
    2
)

say coeff
Output:
[1, 2, 3]

Stata

See Factor variables in Stata help for explanations on the c.x##c.x syntax.

. clear
. input x y
0 1
1 6
2 17
3 34
4 57
5 86
6 121
7 162
8 209
9 262
10 321
end

. regress y c.x##c.x

      Source |       SS           df       MS      Number of obs   =        11
-------------+----------------------------------   F(2, 8)         =         .
       Model |      120362         2       60181   Prob > F        =         .
    Residual |           0         8           0   R-squared       =    1.0000
-------------+----------------------------------   Adj R-squared   =    1.0000
       Total |      120362        10     12036.2   Root MSE        =         0

------------------------------------------------------------------------------
           y |      Coef.   Std. Err.      t    P>|t|     [95% Conf. Interval]
-------------+----------------------------------------------------------------
           x |          2          .        .       .            .           .
             |
     c.x#c.x |          3          .        .       .            .           .
             |
       _cons |          1          .        .       .            .           .
------------------------------------------------------------------------------

Swift

Translation of: Kotlin
let x = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
let y = [1, 6, 17, 34, 57, 86, 121, 162, 209, 262, 321]

func average(_ input: [Int]) -> Int {
    return input.reduce(0, +) / input.count
}

func polyRegression(x: [Int], y: [Int]) {
    let xm = average(x)
    let ym = average(y)
    let x2m = average(x.map { $0 * $0 })
    let x3m = average(x.map { $0 * $0 * $0 })
    let x4m = average(x.map { $0 * $0 * $0 * $0 })
    let xym = average(zip(x,y).map { $0 * $1 })
    let x2ym = average(zip(x,y).map { $0 * $0 * $1 })

    let sxx = x2m - xm * xm
    let sxy = xym - xm * ym
    let sxx2 = x3m - xm * x2m
    let sx2x2 = x4m - x2m * x2m
    let sx2y = x2ym - x2m * ym
 
    let b = (sxy * sx2x2 - sx2y * sxx2) / (sxx * sx2x2 - sxx2 * sxx2)
    let c = (sx2y * sxx - sxy * sxx2) / (sxx * sx2x2 - sxx2 * sxx2)
    let a = ym - b * xm - c * x2m

    func abc(xx: Int) -> Int {
        return (a + b * xx) + (c * xx * xx)
    }
    
    print("y = \(a) + \(b)x + \(c)x^2\n")
    print(" Input  Approximation")
    print(" x   y     y1")
    
    for i in 0 ..< x.count {
        let result = Double(abc(xx: i))
        print(String(format: "%2d %3d  %5.1f", x[i], y[i], result))
    }
}

polyRegression(x: x, y: y)
Output:
y = 1 + 2x + 3x^2

 Input  Approximation
 x   y     y1
 0   1    1.0
 1   6    6.0
 2  17   17.0
 3  34   34.0
 4  57   57.0
 5  86   86.0
 6 121  121.0
 7 162  162.0
 8 209  209.0
 9 262  262.0
10 321  321.0

Tcl

Library: Tcllib (Package: math::linearalgebra)
package require math::linearalgebra

proc build.matrix {xvec degree} {
    set sums [llength $xvec]
    for {set i 1} {$i <= 2*$degree} {incr i} {
        set sum 0
        foreach x $xvec {
            set sum [expr {$sum + pow($x,$i)}] 
        }
        lappend sums $sum
    }

    set order [expr {$degree + 1}]
    set A [math::linearalgebra::mkMatrix $order $order 0]
    for {set i 0} {$i <= $degree} {incr i} {
        set A [math::linearalgebra::setrow A $i [lrange $sums $i $i+$degree]]
    }
    return $A
}

proc build.vector {xvec yvec degree} {
    set sums [list]
    for {set i 0} {$i <= $degree} {incr i} {
        set sum 0
        foreach x $xvec y $yvec {
            set sum [expr {$sum + $y * pow($x,$i)}] 
        }
        lappend sums $sum
    }

    set x [math::linearalgebra::mkVector [expr {$degree + 1}] 0]
    for {set i 0} {$i <= $degree} {incr i} {
        set x [math::linearalgebra::setelem x $i [lindex $sums $i]]
    }
    return $x
}

# Now, to solve the example from the top of this page
set x {0   1   2   3   4   5   6   7   8   9  10}
set y {1   6  17  34  57  86 121 162 209 262 321}

# build the system A.x=b
set degree 2
set A [build.matrix $x $degree]
set b [build.vector $x $y $degree]
# solve it
set coeffs [math::linearalgebra::solveGauss $A $b]
# show results
puts $coeffs

This will print:

1.0000000000000207 1.9999999999999958 3.0

which is a close approximation to the correct solution.

TI-83 BASIC

DelVar X
seq(X,X,0,10) → L1
{1,6,17,34,57,86,121,162,209,262,321} → L2
QuadReg L1,L2
Output:
y=ax²+bx+c
a=3
b=2
c=1


TI-89 BASIC

DelVar x
seq(x,x,0,10) → xs
{1,6,17,34,57,86,121,162,209,262,321} → ys
QuadReg xs,ys
Disp regeq(x)

seq(expr,var,low,high) evaluates expr with var bound to integers from low to high and returns a list of the results. is the assignment operator. QuadReg, "quadratic regression", does the fit and stores the details in a number of standard variables, including regeq, which receives the fitted quadratic (polynomial) function itself. We then apply that function to the (undefined as ensured by DelVar) variable x to obtain the expression in terms of x, and display it.

Output:

3.·x2 + 2.·x + 1.

Ursala

Library: LAPACK

The fit function defined below returns the coefficients of an nth-degree polynomial in order of descending degree fitting the lists of inputs x and outputs y. The real work is done by the dgelsd function from the lapack library. Ursala provides a simplified interface to this library whereby the data can be passed as lists rather than arrays, and all memory management is handled automatically.

#import std
#import nat
#import flo

(fit "n") ("x","y") = ..dgelsd\"y" (gang \/*pow float*x iota successor "n")* "x"

test program:

x = <0.,1.,2.,3.,4.,5.,6.,7.,8.,9.,10.>
y = <1.,6.,17.,34.,57.,86.,121.,162.,209.,262.,321.>

#cast %eL

example = fit2(x,y)
Output:
<3.000000e+00,2.000000e+00,1.000000e+00>

VBA

Excel VBA has built in capability for line estimation.

Option Base 1
Private Function polynomial_regression(y As Variant, x As Variant, degree As Integer) As Variant
    Dim a() As Double
    ReDim a(UBound(x), 2)
    For i = 1 To UBound(x)
        For j = 1 To degree
            a(i, j) = x(i) ^ j
        Next j
    Next i
    polynomial_regression = WorksheetFunction.LinEst(WorksheetFunction.Transpose(y), a, True, True)
End Function
Public Sub main()
    x = [{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}]
    y = [{1, 6, 17, 34, 57, 86, 121, 162, 209, 262, 321}]
    result = polynomial_regression(y, x, 2)
    Debug.Print "coefficients   : ";
    For i = UBound(result, 2) To 1 Step -1
        Debug.Print Format(result(1, i), "0.#####"),
    Next i
    Debug.Print
    Debug.Print "standard errors: ";
    For i = UBound(result, 2) To 1 Step -1
        Debug.Print Format(result(2, i), "0.#####"),
    Next i
    Debug.Print vbCrLf
    Debug.Print "R^2 ="; result(3, 1)
    Debug.Print "F   ="; result(4, 1)
    Debug.Print "Degrees of freedom:"; result(4, 2)
    Debug.Print "Standard error of y estimate:"; result(3, 2)
End Sub
Output:
coefficients   : 1,         2,            3,            
standard errors: 0,         0,            0,            

R^2 = 1 
F   = 7,70461300500498E+31 
Degrees of freedom: 8 
Standard error of y estimate: 2,79482284961344E-14 

Wren

Translation of: REXX
Library: Wren-math
Library: Wren-seq
Library: Wren-fmt
import "./math" for Nums
import "./seq" for Lst
import "./fmt" for Fmt

var polynomialRegression = Fn.new { |x, y|
    var xm   = Nums.mean(x)
    var ym   = Nums.mean(y)
    var x2m  = Nums.mean(x.map { |e| e * e })
    var x3m  = Nums.mean(x.map { |e| e * e * e })
    var x4m  = Nums.mean(x.map { |e| e * e * e * e })
    var z    = Lst.zip(x, y)
    var xym  = Nums.mean(z.map { |p| p[0] * p[1] })
    var x2ym = Nums.mean(z.map { |p| p[0] * p[0] * p[1] })

    var sxx   = x2m - xm * xm
    var sxy   = xym - xm * ym
    var sxx2  = x3m - xm * x2m
    var sx2x2 = x4m - x2m * x2m
    var sx2y  = x2ym - x2m * ym

    var b = (sxy * sx2x2 - sx2y * sxx2) / (sxx * sx2x2 - sxx2 * sxx2)
    var c = (sx2y * sxx - sxy * sxx2) / (sxx * sx2x2 - sxx2 * sxx2)
    var a = ym - b * xm - c * x2m

    var abc = Fn.new { |xx| a + b * xx + c * xx * xx }

    System.print("y = %(a) + %(b)x + %(c)x^2\n")
    System.print(" Input  Approximation")
    System.print(" x   y     y1")
    for (p in z) Fmt.print("$2d $3d  $5.1f", p[0], p[1], abc.call(p[0]))
}

var x = List.filled(11, 0)
for (i in 1..10) x[i] = i
var y = [1, 6, 17, 34, 57, 86, 121, 162, 209, 262, 321]
polynomialRegression.call(x, y)
Output:
y = 1 + 2x + 3x^2

 Input  Approximation
 x   y     y1
 0   1    1.0
 1   6    6.0
 2  17   17.0
 3  34   34.0
 4  57   57.0
 5  86   86.0
 6 121  121.0
 7 162  162.0
 8 209  209.0
 9 262  262.0
10 321  321.0

zkl

Using the GNU Scientific Library

var [const] GSL=Import("zklGSL");	// libGSL (GNU Scientific Library)
xs:=GSL.VectorFromData(0,  1,  2,  3,  4,  5,   6,   7,   8,   9,  10);
ys:=GSL.VectorFromData(1,  6, 17, 34, 57, 86, 121, 162, 209, 262, 321);
v :=GSL.polyFit(xs,ys,2);
v.format().println();
GSL.Helpers.polyString(v).println();
GSL.Helpers.polyEval(v,xs).format().println();
Output:
1.00,2.00,3.00
 1 + 2x + 3x^2 
1.00,6.00,17.00,34.00,57.00,86.00,121.00,162.00,209.00,262.00,321.00

Or, using lists:

Translation of: Common Lisp

Uses the code from Multiple regression#zkl.

Example:

polyfit(T(T(0.0,1.0,2.0,3.0,4.0,5.0,6.0,7.0,8.0,9.0,10.0)), 
        T(T(1.0,6.0,17.0,34.0,57.0,86.0,121.0,162.0,209.0,262.0,321.0)), 2)
.flatten().println();
Output:
L(1,2,3)