Jump to content

Canny edge detector

From Rosetta Code
Task
Canny edge detector
You are encouraged to solve this task according to the task description, using any language you may know.
Task

Write a program that performs so-called canny edge detection on an image.


A possible algorithm consists of the following steps:

  1. Noise reduction.   May be performed by Gaussian filter.
     
  2. Compute intensity gradient   (matrices and )   and its magnitude   :
             
    May be performed by convolution of an image with Sobel operators.
     
  3. Non-maximum suppression.  
    For each pixel compute the orientation of intensity gradient vector:   .    
    Transform   angle   to one of four directions:   0, 45, 90, 135 degrees.    
    Compute new array   :     if        
    where     is the current pixel,     and     are the two neighbour pixels in the direction of gradient,  
    then     ,       otherwise   .  
    Nonzero pixels in resulting array correspond to local maxima of     in direction   .
     
  4. Tracing edges with hysteresis.  
    At this stage two thresholds for the values of     are introduced:     and   .  
    Starting from pixels with   ,  
    find all paths of pixels with     and put them to the resulting image.



C

The following program reads an 8 bits per pixel grayscale BMP file and saves the result to `out.bmp'. Compile with `-lm'.

#include <stdint.h>
#include <stdio.h>
#include <stdlib.h>
#include <float.h>
#include <math.h>
#include <string.h>
#include <stdbool.h>
#include <assert.h>

#define MAX_BRIGHTNESS 255

// C99 doesn't define M_PI (GNU-C99 does)
#define M_PI 3.14159265358979323846264338327

/*
 * Loading part taken from
 * http://www.vbforums.com/showthread.php?t=261522
 * BMP info:
 * http://en.wikipedia.org/wiki/BMP_file_format
 *
 * Note: the magic number has been removed from the bmpfile_header_t
 * structure since it causes alignment problems
 *     bmpfile_magic_t should be written/read first
 * followed by the
 *     bmpfile_header_t
 * [this avoids compiler-specific alignment pragmas etc.]
 */

typedef struct {
    uint8_t magic[2];
} bmpfile_magic_t;

typedef struct {
    uint32_t filesz;
    uint16_t creator1;
    uint16_t creator2;
    uint32_t bmp_offset;
} bmpfile_header_t;

typedef struct {
    uint32_t header_sz;
    int32_t  width;
    int32_t  height;
    uint16_t nplanes;
    uint16_t bitspp;
    uint32_t compress_type;
    uint32_t bmp_bytesz;
    int32_t  hres;
    int32_t  vres;
    uint32_t ncolors;
    uint32_t nimpcolors;
} bitmap_info_header_t;

typedef struct {
    uint8_t r;
    uint8_t g;
    uint8_t b;
    uint8_t nothing;
} rgb_t;

// Use short int instead `unsigned char' so that we can
// store negative values.
typedef short int pixel_t;

pixel_t *load_bmp(const char *filename,
                  bitmap_info_header_t *bitmapInfoHeader)
{
    FILE *filePtr = fopen(filename, "rb");
    if (filePtr == NULL) {
        perror("fopen()");
        return NULL;
    }

    bmpfile_magic_t mag;
    if (fread(&mag, sizeof(bmpfile_magic_t), 1, filePtr) != 1) {
        fclose(filePtr);
        return NULL;
    }

    // verify that this is a bmp file by check bitmap id
    // warning: dereferencing type-punned pointer will break
    // strict-aliasing rules [-Wstrict-aliasing]
    if (*((uint16_t*)mag.magic) != 0x4D42) {
        fprintf(stderr, "Not a BMP file: magic=%c%c\n",
                mag.magic[0], mag.magic[1]);
        fclose(filePtr);
        return NULL;
    }

    bmpfile_header_t bitmapFileHeader; // our bitmap file header
    // read the bitmap file header
    if (fread(&bitmapFileHeader, sizeof(bmpfile_header_t),
              1, filePtr) != 1) {
        fclose(filePtr);
        return NULL;
    }

    // read the bitmap info header
    if (fread(bitmapInfoHeader, sizeof(bitmap_info_header_t),
              1, filePtr) != 1) {
        fclose(filePtr);
        return NULL;
    }

    if (bitmapInfoHeader->compress_type != 0)
        fprintf(stderr, "Warning, compression is not supported.\n");

    // move file point to the beginning of bitmap data
    if (fseek(filePtr, bitmapFileHeader.bmp_offset, SEEK_SET)) {
        fclose(filePtr);
        return NULL;
    }

    // allocate enough memory for the bitmap image data
    pixel_t *bitmapImage = malloc(bitmapInfoHeader->bmp_bytesz *
                                  sizeof(pixel_t));

    // verify memory allocation
    if (bitmapImage == NULL) {
        fclose(filePtr);
        return NULL;
    }

    // read in the bitmap image data
    size_t pad, count=0;
    unsigned char c;
    pad = 4*ceil(bitmapInfoHeader->bitspp*bitmapInfoHeader->width/32.) - bitmapInfoHeader->width;
    for(size_t i=0; i<bitmapInfoHeader->height; i++){
	    for(size_t j=0; j<bitmapInfoHeader->width; j++){
		    if (fread(&c, sizeof(unsigned char), 1, filePtr) != 1) {
			    fclose(filePtr);
			    return NULL;
		    }
		    bitmapImage[count++] = (pixel_t) c;
	    }
	    fseek(filePtr, pad, SEEK_CUR);
    }

    // If we were using unsigned char as pixel_t, then:
    // fread(bitmapImage, 1, bitmapInfoHeader->bmp_bytesz, filePtr);

    // close file and return bitmap image data
    fclose(filePtr);
    return bitmapImage;
}

// Return: true on error.
bool save_bmp(const char *filename, const bitmap_info_header_t *bmp_ih,
              const pixel_t *data)
{
    FILE* filePtr = fopen(filename, "wb");
    if (filePtr == NULL)
        return true;

    bmpfile_magic_t mag = {{0x42, 0x4d}};
    if (fwrite(&mag, sizeof(bmpfile_magic_t), 1, filePtr) != 1) {
        fclose(filePtr);
        return true;
    }

    const uint32_t offset = sizeof(bmpfile_magic_t) +
                            sizeof(bmpfile_header_t) +
                            sizeof(bitmap_info_header_t) +
                            ((1U << bmp_ih->bitspp) * 4);

    const bmpfile_header_t bmp_fh = {
        .filesz = offset + bmp_ih->bmp_bytesz,
        .creator1 = 0,
        .creator2 = 0,
        .bmp_offset = offset
    };

    if (fwrite(&bmp_fh, sizeof(bmpfile_header_t), 1, filePtr) != 1) {
        fclose(filePtr);
        return true;
    }
    if (fwrite(bmp_ih, sizeof(bitmap_info_header_t), 1, filePtr) != 1) {
        fclose(filePtr);
        return true;
    }

    // Palette
    for (size_t i = 0; i < (1U << bmp_ih->bitspp); i++) {
        const rgb_t color = {(uint8_t)i, (uint8_t)i, (uint8_t)i};
        if (fwrite(&color, sizeof(rgb_t), 1, filePtr) != 1) {
            fclose(filePtr);
            return true;
        }
    }

    // We use int instead of uchar, so we can't write img
    // in 1 call any more.
    // fwrite(data, 1, bmp_ih->bmp_bytesz, filePtr);

    // Padding: http://en.wikipedia.org/wiki/BMP_file_format#Pixel_storage
    size_t pad = 4*ceil(bmp_ih->bitspp*bmp_ih->width/32.) - bmp_ih->width;
    unsigned char c;
    for(size_t i=0; i < bmp_ih->height; i++) {
	    for(size_t j=0; j < bmp_ih->width; j++) {
		    c = (unsigned char) data[j + bmp_ih->width*i];
		    if (fwrite(&c, sizeof(char), 1, filePtr) != 1) {
			    fclose(filePtr);
			    return true;
		    }
	    }
	    c = 0;
	    for(size_t j=0; j<pad; j++)
		    if (fwrite(&c, sizeof(char), 1, filePtr) != 1) {
			    fclose(filePtr);
			    return true;
		    }
    }

    fclose(filePtr);
    return false;
}

// if normalize is true, map pixels to range 0..MAX_BRIGHTNESS
void convolution(const pixel_t *in, pixel_t *out, const float *kernel,
                 const int nx, const int ny, const int kn,
                 const bool normalize)
{
    assert(kn % 2 == 1);
    assert(nx > kn && ny > kn);
    const int khalf = kn / 2;
    float min = FLT_MAX, max = -FLT_MAX;

    if (normalize)
        for (int m = khalf; m < nx - khalf; m++)
            for (int n = khalf; n < ny - khalf; n++) {
                float pixel = 0.0;
                size_t c = 0;
                for (int j = -khalf; j <= khalf; j++)
                    for (int i = -khalf; i <= khalf; i++) {
                        pixel += in[(n - j) * nx + m - i] * kernel[c];
                        c++;
                    }
                if (pixel < min)
                    min = pixel;
                if (pixel > max)
                    max = pixel;
                }

    for (int m = khalf; m < nx - khalf; m++)
        for (int n = khalf; n < ny - khalf; n++) {
            float pixel = 0.0;
            size_t c = 0;
            for (int j = -khalf; j <= khalf; j++)
                for (int i = -khalf; i <= khalf; i++) {
                    pixel += in[(n - j) * nx + m - i] * kernel[c];
                    c++;
                }

            if (normalize)
                pixel = MAX_BRIGHTNESS * (pixel - min) / (max - min);
            out[n * nx + m] = (pixel_t)pixel;
        }
}

/*
 * gaussianFilter:
 * http://www.songho.ca/dsp/cannyedge/cannyedge.html
 * determine size of kernel (odd #)
 * 0.0 <= sigma < 0.5 : 3
 * 0.5 <= sigma < 1.0 : 5
 * 1.0 <= sigma < 1.5 : 7
 * 1.5 <= sigma < 2.0 : 9
 * 2.0 <= sigma < 2.5 : 11
 * 2.5 <= sigma < 3.0 : 13 ...
 * kernelSize = 2 * int(2*sigma) + 3;
 */
void gaussian_filter(const pixel_t *in, pixel_t *out,
                     const int nx, const int ny, const float sigma)
{
    const int n = 2 * (int)(2 * sigma) + 3;
    const float mean = (float)floor(n / 2.0);
    float kernel[n * n]; // variable length array

    fprintf(stderr, "gaussian_filter: kernel size %d, sigma=%g\n",
            n, sigma);
    size_t c = 0;
    for (int i = 0; i < n; i++)
        for (int j = 0; j < n; j++) {
            kernel[c] = exp(-0.5 * (pow((i - mean) / sigma, 2.0) +
                                    pow((j - mean) / sigma, 2.0)))
                        / (2 * M_PI * sigma * sigma);
            c++;
        }

    convolution(in, out, kernel, nx, ny, n, true);
}

/*
 * Links:
 * http://en.wikipedia.org/wiki/Canny_edge_detector
 * http://www.tomgibara.com/computer-vision/CannyEdgeDetector.java
 * http://fourier.eng.hmc.edu/e161/lectures/canny/node1.html
 * http://www.songho.ca/dsp/cannyedge/cannyedge.html
 *
 * Note: T1 and T2 are lower and upper thresholds.
 */
pixel_t *canny_edge_detection(const pixel_t *in,
                              const bitmap_info_header_t *bmp_ih,
                              const int tmin, const int tmax,
                              const float sigma)
{
    const int nx = bmp_ih->width;
    const int ny = bmp_ih->height;

    pixel_t *G = calloc(nx * ny * sizeof(pixel_t), 1);
    pixel_t *after_Gx = calloc(nx * ny * sizeof(pixel_t), 1);
    pixel_t *after_Gy = calloc(nx * ny * sizeof(pixel_t), 1);
    pixel_t *nms = calloc(nx * ny * sizeof(pixel_t), 1);
    pixel_t *out = malloc(bmp_ih->bmp_bytesz * sizeof(pixel_t));

    if (G == NULL || after_Gx == NULL || after_Gy == NULL ||
        nms == NULL || out == NULL) {
        fprintf(stderr, "canny_edge_detection:"
                " Failed memory allocation(s).\n");
        exit(1);
    }

    gaussian_filter(in, out, nx, ny, sigma);

    const float Gx[] = {-1, 0, 1,
                        -2, 0, 2,
                        -1, 0, 1};

    convolution(out, after_Gx, Gx, nx, ny, 3, false);

    const float Gy[] = { 1, 2, 1,
                         0, 0, 0,
                        -1,-2,-1};

    convolution(out, after_Gy, Gy, nx, ny, 3, false);

    for (int i = 1; i < nx - 1; i++)
        for (int j = 1; j < ny - 1; j++) {
            const int c = i + nx * j;
            // G[c] = abs(after_Gx[c]) + abs(after_Gy[c]);
            G[c] = (pixel_t)hypot(after_Gx[c], after_Gy[c]);
        }

    // Non-maximum suppression, straightforward implementation.
    for (int i = 1; i < nx - 1; i++)
        for (int j = 1; j < ny - 1; j++) {
            const int c = i + nx * j;
            const int nn = c - nx;
            const int ss = c + nx;
            const int ww = c + 1;
            const int ee = c - 1;
            const int nw = nn + 1;
            const int ne = nn - 1;
            const int sw = ss + 1;
            const int se = ss - 1;

            const float dir = (float)(fmod(atan2(after_Gy[c],
                                                 after_Gx[c]) + M_PI,
                                           M_PI) / M_PI) * 8;

            if (((dir <= 1 || dir > 7) && G[c] > G[ee] &&
                 G[c] > G[ww]) || // 0 deg
                ((dir > 1 && dir <= 3) && G[c] > G[nw] &&
                 G[c] > G[se]) || // 45 deg
                ((dir > 3 && dir <= 5) && G[c] > G[nn] &&
                 G[c] > G[ss]) || // 90 deg
                ((dir > 5 && dir <= 7) && G[c] > G[ne] &&
                 G[c] > G[sw]))   // 135 deg
                nms[c] = G[c];
            else
                nms[c] = 0;
        }

    // Reuse array
    // used as a stack. nx*ny/2 elements should be enough.
    int *edges = (int*) after_Gy;
    memset(out, 0, sizeof(pixel_t) * nx * ny);
    memset(edges, 0, sizeof(pixel_t) * nx * ny);

    // Tracing edges with hysteresis . Non-recursive implementation.
    size_t c = 1;
    for (int j = 1; j < ny - 1; j++)
        for (int i = 1; i < nx - 1; i++) {
            if (nms[c] >= tmax && out[c] == 0) { // trace edges
                out[c] = MAX_BRIGHTNESS;
                int nedges = 1;
                edges[0] = c;

                do {
                    nedges--;
                    const int t = edges[nedges];

                    int nbs[8]; // neighbours
                    nbs[0] = t - nx;     // nn
                    nbs[1] = t + nx;     // ss
                    nbs[2] = t + 1;      // ww
                    nbs[3] = t - 1;      // ee
                    nbs[4] = nbs[0] + 1; // nw
                    nbs[5] = nbs[0] - 1; // ne
                    nbs[6] = nbs[1] + 1; // sw
                    nbs[7] = nbs[1] - 1; // se

                    for (int k = 0; k < 8; k++)
                        if (nms[nbs[k]] >= tmin && out[nbs[k]] == 0) {
                            out[nbs[k]] = MAX_BRIGHTNESS;
                            edges[nedges] = nbs[k];
                            nedges++;
                        }
                } while (nedges > 0);
            }
            c++;
        }

    free(after_Gx);
    free(after_Gy);
    free(G);
    free(nms);

    return out;
}

int main(const int argc, const char ** const argv)
{
    if (argc < 2) {
        printf("Usage: %s image.bmp\n", argv[0]);
        return 1;
    }

    static bitmap_info_header_t ih;
    const pixel_t *in_bitmap_data = load_bmp(argv[1], &ih);
    if (in_bitmap_data == NULL) {
        fprintf(stderr, "main: BMP image not loaded.\n");
        return 1;
    }

    printf("Info: %d x %d x %d\n", ih.width, ih.height, ih.bitspp);

    const pixel_t *out_bitmap_data =
        canny_edge_detection(in_bitmap_data, &ih, 45, 50, 1.0f);
    if (out_bitmap_data == NULL) {
        fprintf(stderr, "main: failed canny_edge_detection.\n");
        return 1;
    }

    if (save_bmp("out.bmp", &ih, out_bitmap_data)) {
        fprintf(stderr, "main: BMP image not saved.\n");
        return 1;
    }

    free((pixel_t*)in_bitmap_data);
    free((pixel_t*)out_bitmap_data);
    return 0;
}

D

Translation of: C

This version retains some of the style of the original C version. This code is faster than the C version, even with the DMD compiler. This version loads and saves PGM images, using the module of the Grayscale image Task.

import core.stdc.stdio, std.math, std.typecons, std.string, std.conv,
       std.algorithm, std.ascii, std.array, bitmap, grayscale_image;

enum maxBrightness = 255;

alias Pixel = short;
alias IntT = typeof(size_t.init.signed);

// If normalize is true, map pixels to range 0...maxBrightness.
void convolution(bool normalize)(in Pixel[] inp, Pixel[] outp,
                                 in float[] kernel,
                                 in IntT nx, in IntT ny, in IntT kn)
pure nothrow @nogc in {
    assert(kernel.length == kn ^^ 2);
    assert(kn % 2 == 1);
    assert(nx > kn && ny > kn);
    assert(inp.length == outp.length);
} body {
    //immutable IntT kn = sqrti(kernel.length);
    immutable IntT khalf = kn / 2;

    static if (normalize) {
        float pMin = float.max, pMax = -float.max;

        foreach (immutable m; khalf .. nx - khalf) {
            foreach (immutable n; khalf .. ny - khalf) {
                float pixel = 0.0;
                size_t c;
                foreach (immutable j; -khalf .. khalf + 1) {
                    foreach (immutable i; -khalf .. khalf + 1) {
                        pixel += inp[(n - j) * nx + m - i] * kernel[c];
                        c++;
                    }
                }

                if (pixel < pMin) pMin = pixel;
                if (pixel > pMax) pMax = pixel;
            }
        }
    }

    foreach (immutable m; khalf .. nx - khalf) {
        foreach (immutable n; khalf .. ny - khalf) {
            float pixel = 0.0;
            size_t c;
            foreach (immutable j; -khalf .. khalf + 1) {
                foreach (immutable i; -khalf .. khalf + 1) {
                    pixel += inp[(n - j) * nx + m - i] * kernel[c];
                    c++;
                }
            }

            static if (normalize)
                pixel = maxBrightness * (pixel - pMin) / (pMax - pMin);
            outp[n * nx + m] = cast(Pixel)pixel;
        }
    }
}


void gaussianFilter(in Pixel[] inp, Pixel[] outp,
                    in IntT nx, in IntT ny, in float sigma)
pure nothrow in {
    assert(inp.length == outp.length);
} body {
    immutable IntT n = 2 * cast(IntT)(2 * sigma) + 3;
    immutable float mean = floor(n / 2.0);
    auto kernel = new float[n * n];

    debug fprintf(stderr,
                  "gaussianFilter: kernel size %d, sigma=%g\n",
                  n, sigma);

    size_t c;
    foreach (immutable i; 0 .. n) {
        foreach (immutable j; 0 .. n) {
            kernel[c] = exp(-0.5 * (((i - mean) / sigma) ^^ 2 +
                                    ((j - mean) / sigma) ^^ 2))
                        / (2 * PI * sigma * sigma);
            c++;
        }
    }

    convolution!true(inp, outp, kernel, nx, ny, n);
}


Image!Pixel cannyEdgeDetection(in Image!Pixel inp,
                               in IntT tMin, in IntT tMax,
                               in float sigma)
pure nothrow in {
    assert(inp !is null);
} body {
    immutable IntT nx = inp.nx.signed;
    immutable IntT ny = inp.ny.signed;
    auto outp = new Pixel[nx * ny];

    gaussianFilter(inp.image, outp, nx, ny, sigma);

    static immutable float[] Gx = [-1, 0, 1,
                                   -2, 0, 2,
                                   -1, 0, 1];
    auto after_Gx = new Pixel[nx * ny];
    convolution!false(outp, after_Gx, Gx, nx, ny, 3);

    static immutable float[] Gy = [ 1, 2, 1,
                                    0, 0, 0,
                                   -1,-2,-1];
    auto after_Gy = new Pixel[nx * ny];
    convolution!false(outp, after_Gy, Gy, nx, ny, 3);

    auto G = new Pixel[nx * ny];
    foreach (i; 1 .. nx - 1)
        foreach (j; 1 .. ny - 1) {
            immutable size_t c = i + nx * j;
            G[c] = cast(Pixel)hypot(after_Gx[c], after_Gy[c]);
        }

    // Non-maximum suppression, straightforward implementation.
    auto nms = new Pixel[nx * ny];
    foreach (immutable i; 1 .. nx - 1)
        foreach (immutable j; 1 .. ny - 1) {
            immutable IntT c = i + nx * j,
                           nn = c - nx,
                           ss = c + nx,
                           ww = c + 1,
                           ee = c - 1,
                           nw = nn + 1,
                           ne = nn - 1,
                           sw = ss + 1,
                           se = ss - 1;

            immutable aux = atan2(double(after_Gy[c]),
                                  double(after_Gx[c])) + PI;
            immutable float dir = float((aux % PI) / PI) * 8;

            if (((dir <= 1 || dir > 7) && G[c] > G[ee] &&
                 G[c] > G[ww]) || // 0 deg.
                ((dir > 1 && dir <= 3) && G[c] > G[nw] &&
                 G[c] > G[se]) || // 45 deg.
                ((dir > 3 && dir <= 5) && G[c] > G[nn] &&
                 G[c] > G[ss]) || // 90 deg.
                ((dir > 5 && dir <= 7) && G[c] > G[ne] &&
                 G[c] > G[sw]))   // 135 deg.
                nms[c] = G[c];
            else
                nms[c] = 0;
        }

    // Reuse array used as a stack. nx*ny/2 elements should be enough.
    IntT[] edges = (cast(IntT*)after_Gy.ptr)[0 .. after_Gy.length / 2];
    outp[] = Pixel.init;
    edges[] = 0;

    // Tracing edges with hysteresis. Non-recursive implementation.
    size_t c = 1;
    foreach (immutable j; 1 .. ny - 1) {
        foreach (immutable i; 1 .. nx - 1) {
            if (nms[c] >= tMax && outp[c] == 0) { // Trace edges.
                outp[c] = maxBrightness;
                IntT nedges = 1;
                edges[0] = c;

                do {
                    nedges--;
                    immutable IntT t = edges[nedges];

                    immutable IntT[8] neighbours = [
                        t - nx,      // nn
                        t + nx,      // ss
                        t + 1,       // ww
                        t - 1,       // ee
                        t - nx + 1,  // nw
                        t - nx - 1,  // ne
                        t + nx + 1,  // sw
                        t + nx - 1]; // se

                    foreach (immutable n; neighbours)
                        if (nms[n] >= tMin && outp[n] == 0) {
                            outp[n] = maxBrightness;
                            edges[nedges] = n;
                            nedges++;
                        }
                } while (nedges > 0);
            }
            c++;
        }
    }

    return Image!Pixel.fromData(outp, nx, ny);
}


void main(in string[] args) {
    immutable fileName = (args.length == 2) ? args[1] : "lena.pgm";
    Image!Pixel imIn;
    imIn = imIn.loadPGM(fileName);
    printf("Image size: %d x %d\n", imIn.nx, imIn.ny);
    imIn.cannyEdgeDetection(45, 50, 1.0f).savePGM("lena_canny.pgm");
}

FreeBASIC

Translation of: Yabasic
#define MIN(a, b) iif((a) < (b), (a), (b))
#define MAX(a, b) iif((a) > (b), (a), (b))

Const MaxBrightness = 255

' Función para leer un archivo PPM
Function readPPM(nombre As String, Byref ancho As Integer, Byref alto As Integer, image() As Ubyte) As Boolean
    Dim As Integer ff
    Dim As String t, dcol
    
    If nombre = "" Then 
        Print "No PPM file name indicated."
        Return False
    End If
    
    ff = Freefile
    Open nombre For Binary As #ff
    If Err Then 
        Print "File "; nombre; " not found."
        Return False
    End If
    
    Line Input #ff, t
    If t <> "P6" Then 
        Print "File is NOT PPM P6 type."
        Close #ff
        Return False
    End If
    
    Do
        Line Input #ff, t
    Loop While Left(t, 1) = "#"
    
    Dim As Integer posic = 1
    While Mid(t, posic, 1) = " "
        posic += 1
    Wend
    ancho = Val(Mid(t, posic))
    
    While Mid(t, posic, 1) <> " "
        posic += 1
    Wend
    While Mid(t, posic, 1) = " "
        posic += 1
    Wend
    alto = Val(Mid(t, posic))
    
    Line Input #ff, dcol
    
    Redim image(0 To ancho * alto * 3 - 1)
    Get #ff, , image()
    
    Close #ff
    Return True
End Function

Dim As Integer ancho, alto
Dim image() As Ubyte

If readPPM("i:\Lena.ppm", ancho, alto, image()) Then  
    Dim As Integer newAncho = ancho, newAlto = alto
    Dim pixelsGray(newAncho - 1, newAlto - 1) As Integer
    Dim C_E_D(2, 2) As Integer
    
    ' Define edge detection filter
    Dim As Integer dato(8) => {-1, -1, -1, -1, 8, -1, -1, -1, -1}
    Dim As Integer i, j
    For i = 0 To 2
        For j = 0 To 2
            C_E_D(i, j) = dato(i * 3 + j)
        Next j
    Next i
    
    ' Convert image to grayscale
    Dim As Integer x, y, r, g, b, lumin, k
    For y = 0 To newAlto - 1
        For x = 0 To newAncho - 1
            r = image((y * newAncho + x) * 3)
            g = image((y * newAncho + x) * 3 + 1)
            b = image((y * newAncho + x) * 3 + 2)
            lumin = Int(0.2126 * r + 0.7152 * g + 0.0722 * b)
            pixelsGray(x, y) = lumin
        Next x
    Next y
    
    Dim new_image(newAncho - 1, newAlto - 1) As Integer
    Dim As Integer divisor = 1
    
    ' Apply edge detection filter
    Dim As Integer newRGB
    For y = 1 To newAlto - 2
        For x = 1 To newAncho - 2
            newRGB = 0
            For i = -1 To 1
                For j = -1 To 1
                    newRGB += C_E_D(i + 1, j + 1) * pixelsGray(x + i, y + j)
                Next j
            Next i
            new_image(x, y) = Max(Min(newRGB / divisor, 255), 0)
        Next x
    Next y
    
    ' Show the result
    Screenres newAncho, newAlto, 32
    Windowtitle ("Canny edge detector")
    For y = 0 To newAlto - 1
        For x = 0 To newAncho - 1
            k = new_image(x, y)
            Pset (x, y), Rgb(k, k, k)
        Next x
    Next y
Else
    Print "Error loading PPM file."
End If

Sleep

Go

Library: Imger

The example image for this program is the color photograph of a steam engine taken from the Wikipedia article linked to in the task description.

After applying the Canny edge detector, the resulting image is similar to but not quite the same as the Wikipedia image, probably due to differences in the parameters used though a 5×5 Gaussian filter is used in both cases.

Note that on Linux the extension of the example image file name needs to be changed from .PNG to .png in order for the library used to recognize it.

package main

import (
    ed "github.com/Ernyoke/Imger/edgedetection"
    "github.com/Ernyoke/Imger/imgio"
    "log"
)

func main() {
    img, err := imgio.ImreadRGBA("Valve_original_(1).png")
    if err != nil {
        log.Fatal("Could not read image", err)
    }

    cny, err := ed.CannyRGBA(img, 15, 45, 5)
    if err != nil {
        log.Fatal("Could not perform Canny Edge detection")
    }

    err = imgio.Imwrite(cny, "Valve_canny_(1).png")
    if err != nil {
        log.Fatal("Could not write Canny image to disk")
    }
}

J

In this solution images are represented as 2D arrays of pixels, with first and second axes representing down and right respectively. Each processing step has a specific pixel representation. In the original and Gaussian-filtered images, array elements represent monochromatic intensity values as numbers ranging from 0 (black) to 255 (white). In the intensity gradient image, gradient values are vectors, and are represented as complex numbers, with real and imaginary components representing down and right respectively.

Detected edge and non-edge points are represented as ones and zeros respectively. An edge is a set of connected edge points (points adjacent horizontally, vertically, or diagonally are considered to be connected). In the final image, each edge is represented by assigning its set of points a common unique value.

NB. 2D convolution, filtering, ...

convolve  =: 4 : 'x apply (($x) partition y)'
partition=: 2 1 3 0 |: {:@[ ]\ 2 1 0 |: {.@[ ]\ ]
apply=: [: +/ [: +/ *
max3x3 =: 3 : '(0<1{1{y) * (>./>./y)'
addborder =: (0&,@|:@|.)^:4
normalize =: ]%+/@,
attach =: 3 : 'max3x3 (3 3 partition (addborder y))'
unique =: 3 : 'y*i.$y'
connect =: 3 : 'attach^:_ unique y'

NB. on low memory devices, cropping or resampling of high-resolution images may be required
crop      =: 4 : 0
   'h w h0 w0' =: x
   |: w{. w0}. |: h{. h0}. y
)
resample  =: 4 : '|: (1{-x)(+/%#)\ |: (0{-x)(+/%#)\ y'
NB. on e. g. smartphones, image may need to be expanded for viewing
inflate1 =: 4 : 0
   'h w' =: $y
   r =: ,y
   c =: #r
   rr =: (c$x) # r
   (h,x*w)$rr
)
inflate =: 4 : '|: x inflate1 (|: x inflate1 y)'

NB. Step 1 - gaussian smoothing
step1 =: 3 : 0
   NB. Gaussian kernel (from Wikipedia article)
   <] gaussianKernel =: 5 5$2 4 5 4 2 4 9 12 9 4 5 12 15 12 5 4 9 12 9 4 2 4 5 4 2
   gaussianKernel =: gaussianKernel % 159
   gaussianKernel convolve y
)

NB. Step 2 - gradient
step2 =: 3 : 0
   <] gradientKernel =: 3 3$0 _1 0 0j_1 0 0j1 0 1 0
   gradientKernel convolve y
)

NB. Step 3 - edge detection
step3 =: 3 : 0
   NB. find the octant (eighth of circle) in which the gradient lies
   octant =: 3 : '4|(>.(_0.5+((4%(o. 1))*(12&o. y))))'
   <(i:6)(4 : 'octant (x j. y)')"0/(i:6)

   NB. is this gradient greater than [the projection of] a neighbor?
   greaterThan   =: 4 : ' (9 o.((x|.y)%y))<1'

   NB. is this gradient the greatest of immmediate colinear neighbore?
   greatestOf   =: 4 : '(x greaterThan y) *. ((-x) greaterThan y)'

   NB. relative address of neighbor relevant to grad direction
   krnl0 =. _1  0
   krnl1 =. _1 _1
   krnl2 =.  0 _1
   krnl3 =.  1 _1

   image =. y
   og =. octant image

   NB. mask for maximum gradient colinear with gradient
   ok0 =. (0=og) *. krnl0 greatestOf image
   ok1 =. (1=og) *. krnl1 greatestOf image
   ok2 =. (2=og) *. krnl2 greatestOf image
   ok3 =. (3=og) *. krnl3 greatestOf image
   image *. (ok0 +. ok1 +. ok2 +. ok3)
)

NB. Step 4 - Weak edge suppression
step4 =: 3 : 0
   magnitude =. 10&o. y
   NB. weak, strong threshholds
   NB. TODO: parameter picker algorithm or helper
   threshholds =. 1e14 1e15
   nearbyKernel =. 3 3 $ 4 1 4 # 1 0 1
   weak   =. magnitude > 0{threshholds
   strong =. magnitude > 1{threshholds
   strongs =. addborder (nearbyKernel convolve strong) > 0
   strong +. (weak *. strongs)
)

NB. given the edge points, find the edges
  step5 =: connect

canny =: step5 @ step4 @ step3 @ step2 @ step1

The above implementation solves the 'inner problem' of Canny Edge Detection in the J language, with no external dependencies. J's Qt IDE provides additional support including interfaces to image file formats, graphic displays, and the user. The following code exercises these features

The file 'valve.png' referenced in this code is from one of several Wikipedia articles on edge detection. It can be viewed at [https://upload.wikimedia.org/wikipedia/commons/2/2e/Valve_gaussian_%282%29.PNG]

require 'gl2'
coclass 'edge'
coinsert'jgl2'

PJ=: jpath '~Projects/edges/' NB. optionally install and run as project under IDE
load PJ,'canny.ijs'

run=: 3 : 0
   wd 'pc form;pn canny'
   wd 'cc txt static;cn "Canny in J";'
   wd 'cc png isidraw'
   wd 'cc inc button;cn "Next";'
   wd 'pshow'
   glclear''
   image =: readimg_jqtide_ PJ,'valve.png'
   image =: 240 360 120 150 crop image
   edges =: canny 256 | image
   ids =: }. ~.,edges
   nids =: # ids
   case =: 0
)

form_inc_button =: 3 : 0
   select. case
   case. 0 do.
      wd 'set txt text "original image";'
      img =: 255 setalpha image
   case. 1 do.
      wd 'set txt text "points on edges";'
      img =: edges>0
      img =: 1-img
      img =: img * (+/ 256^i.3) * 255
      img =: 255 setalpha img
      ix =: 0
   case. 2 do.
      wd 'set txt text "... iterating over edges with >75 points ...";'
      img =: edges=ix{ids
      whilst. (num<75) *. (ix<nids) do. 
         img =: edges=ix{ids
         num =: +/,img
         ix=:>:ix 
         if. ix=#ids do. case=:_1 end.
      end.
      img =: 1-img
      img =: img * (+/ 256^i.3) * 255
      img =: 255 setalpha img
      ix =: (#ids)|(>:ix)
   end.
   if. case<2 do. case =: >: case end.
   NB. img =: 5 inflate img      NB. might need this for high-res cellphone display
   glfill 255 128 255
   glpixels 0 0,(|.$img), ,img
   glpaint''
)

form_close=: exit bind 0

run''

Java

El código es de Tom Gibara [The code is from Tom Gibara] (http://www.tomgibara.com/)

Se implementa utilizando una sola clase Java. [It is implemented using a single Java class.]

import java.awt.image.BufferedImage;
import java.util.Arrays;

/**
 * <p><em>This software has been released into the public domain.
 * <strong>Please read the notes in this source file for additional information.
 * </strong></em></p>
 * 
 * <p>This class provides a configurable implementation of the Canny edge
 * detection algorithm. This classic algorithm has a number of shortcomings,
 * but remains an effective tool in many scenarios. <em>This class is designed
 * for single threaded use only.</em></p>
 * 
 * <p>Sample usage:</p>
 * 
 * <pre><code>
 * //create the detector
 * CannyEdgeDetector detector = new CannyEdgeDetector();
 * //adjust its parameters as desired
 * detector.setLowThreshold(0.5f);
 * detector.setHighThreshold(1f);
 * //apply it to an image
 * detector.setSourceImage(frame);
 * detector.process();
 * BufferedImage edges = detector.getEdgesImage();
 * </code></pre>
 * 
 * <p>For a more complete understanding of this edge detector's parameters
 * consult an explanation of the algorithm.</p>
 * 
 * @author Tom Gibara
 *
 */

public class CannyEdgeDetector {

	// statics
	
	private final static float GAUSSIAN_CUT_OFF = 0.005f;
	private final static float MAGNITUDE_SCALE = 100F;
	private final static float MAGNITUDE_LIMIT = 1000F;
	private final static int MAGNITUDE_MAX = (int) (MAGNITUDE_SCALE * MAGNITUDE_LIMIT);

	// fields
	
	private int height;
	private int width;
	private int picsize;
	private int[] data;
	private int[] magnitude;
	private BufferedImage sourceImage;
	private BufferedImage edgesImage;
	
	private float gaussianKernelRadius;
	private float lowThreshold;
	private float highThreshold;
	private int gaussianKernelWidth;
	private boolean contrastNormalized;

	private float[] xConv;
	private float[] yConv;
	private float[] xGradient;
	private float[] yGradient;
	
	// constructors
	
	/**
	 * Constructs a new detector with default parameters.
	 */
	
	public CannyEdgeDetector() {
		lowThreshold = 2.5f;
		highThreshold = 7.5f;
		gaussianKernelRadius = 2f;
		gaussianKernelWidth = 16;
		contrastNormalized = false;
	}

	// accessors
	
	/**
	 * The image that provides the luminance data used by this detector to
	 * generate edges.
	 * 
	 * @return the source image, or null
	 */
	
	public BufferedImage getSourceImage() {
		return sourceImage;
	}
	
	/**
	 * Specifies the image that will provide the luminance data in which edges
	 * will be detected. A source image must be set before the process method
	 * is called.
	 *  
	 * @param image a source of luminance data
	 */
	
	public void setSourceImage(BufferedImage image) {
		sourceImage = image;
	}

	/**
	 * Obtains an image containing the edges detected during the last call to
	 * the process method. The buffered image is an opaque image of type
	 * BufferedImage.TYPE_INT_ARGB in which edge pixels are white and all other
	 * pixels are black.
	 * 
	 * @return an image containing the detected edges, or null if the process
	 * method has not yet been called.
	 */
	
	public BufferedImage getEdgesImage() {
		return edgesImage;
	}
 
	/**
	 * Sets the edges image. Calling this method will not change the operation
	 * of the edge detector in any way. It is intended to provide a means by
	 * which the memory referenced by the detector object may be reduced.
	 * 
	 * @param edgesImage expected (though not required) to be null
	 */
	
	public void setEdgesImage(BufferedImage edgesImage) {
		this.edgesImage = edgesImage;
	}

	/**
	 * The low threshold for hysteresis. The default value is 2.5.
	 * 
	 * @return the low hysteresis threshold
	 */
	
	public float getLowThreshold() {
		return lowThreshold;
	}
	
	/**
	 * Sets the low threshold for hysteresis. Suitable values for this parameter
	 * must be determined experimentally for each application. It is nonsensical
	 * (though not prohibited) for this value to exceed the high threshold value.
	 * 
	 * @param threshold a low hysteresis threshold
	 */
	
	public void setLowThreshold(float threshold) {
		if (threshold < 0) throw new IllegalArgumentException();
		lowThreshold = threshold;
	}
 
	/**
	 * The high threshold for hysteresis. The default value is 7.5.
	 * 
	 * @return the high hysteresis threshold
	 */
	
	public float getHighThreshold() {
		return highThreshold;
	}
	
	/**
	 * Sets the high threshold for hysteresis. Suitable values for this
	 * parameter must be determined experimentally for each application. It is
	 * nonsensical (though not prohibited) for this value to be less than the
	 * low threshold value.
	 * 
	 * @param threshold a high hysteresis threshold
	 */
	
	public void setHighThreshold(float threshold) {
		if (threshold < 0) throw new IllegalArgumentException();
		highThreshold = threshold;
	}

	/**
	 * The number of pixels across which the Gaussian kernel is applied.
	 * The default value is 16.
	 * 
	 * @return the radius of the convolution operation in pixels
	 */
	
	public int getGaussianKernelWidth() {
		return gaussianKernelWidth;
	}
	
	/**
	 * The number of pixels across which the Gaussian kernel is applied.
	 * This implementation will reduce the radius if the contribution of pixel
	 * values is deemed negligable, so this is actually a maximum radius.
	 * 
	 * @param gaussianKernelWidth a radius for the convolution operation in
	 * pixels, at least 2.
	 */
	
	public void setGaussianKernelWidth(int gaussianKernelWidth) {
		if (gaussianKernelWidth < 2) throw new IllegalArgumentException();
		this.gaussianKernelWidth = gaussianKernelWidth;
	}

	/**
	 * The radius of the Gaussian convolution kernel used to smooth the source
	 * image prior to gradient calculation. The default value is 16.
	 * 
	 * @return the Gaussian kernel radius in pixels
	 */
	
	public float getGaussianKernelRadius() {
		return gaussianKernelRadius;
	}
	
	/**
	 * Sets the radius of the Gaussian convolution kernel used to smooth the
	 * source image prior to gradient calculation.
	 * 
	 * @return a Gaussian kernel radius in pixels, must exceed 0.1f.
	 */
	
	public void setGaussianKernelRadius(float gaussianKernelRadius) {
		if (gaussianKernelRadius < 0.1f) throw new IllegalArgumentException();
		this.gaussianKernelRadius = gaussianKernelRadius;
	}
	
	/**
	 * Whether the luminance data extracted from the source image is normalized
	 * by linearizing its histogram prior to edge extraction. The default value
	 * is false.
	 * 
	 * @return whether the contrast is normalized
	 */
	
	public boolean isContrastNormalized() {
		return contrastNormalized;
	}
	
	/**
	 * Sets whether the contrast is normalized
	 * @param contrastNormalized true if the contrast should be normalized,
	 * false otherwise
	 */
	
	public void setContrastNormalized(boolean contrastNormalized) {
		this.contrastNormalized = contrastNormalized;
	}
	
	// methods
	
	public void process() {
		width = sourceImage.getWidth();
		height = sourceImage.getHeight();
		picsize = width * height;
		initArrays();
		readLuminance();
		if (contrastNormalized) normalizeContrast();
		computeGradients(gaussianKernelRadius, gaussianKernelWidth);
		int low = Math.round(lowThreshold * MAGNITUDE_SCALE);
		int high = Math.round( highThreshold * MAGNITUDE_SCALE);
		performHysteresis(low, high);
		thresholdEdges();
		writeEdges(data);
	}
 
	// private utility methods
	
	private void initArrays() {
		if (data == null || picsize != data.length) {
			data = new int[picsize];
			magnitude = new int[picsize];

			xConv = new float[picsize];
			yConv = new float[picsize];
			xGradient = new float[picsize];
			yGradient = new float[picsize];
		}
	}
	
	//NOTE: The elements of the method below (specifically the technique for
	//non-maximal suppression and the technique for gradient computation)
	//are derived from an implementation posted in the following forum (with the
	//clear intent of others using the code):
	//  http://forum.java.sun.com/thread.jspa?threadID=546211&start=45&tstart=0
	//My code effectively mimics the algorithm exhibited above.
	//Since I don't know the providence of the code that was posted it is a
	//possibility (though I think a very remote one) that this code violates
	//someone's intellectual property rights. If this concerns you feel free to
	//contact me for an alternative, though less efficient, implementation.
	
	private void computeGradients(float kernelRadius, int kernelWidth) {
		
		//generate the gaussian convolution masks
		float kernel[] = new float[kernelWidth];
		float diffKernel[] = new float[kernelWidth];
		int kwidth;
		for (kwidth = 0; kwidth < kernelWidth; kwidth++) {
			float g1 = gaussian(kwidth, kernelRadius);
			if (g1 <= GAUSSIAN_CUT_OFF && kwidth >= 2) break;
			float g2 = gaussian(kwidth - 0.5f, kernelRadius);
			float g3 = gaussian(kwidth + 0.5f, kernelRadius);
			kernel[kwidth] = (g1 + g2 + g3) / 3f / (2f * (float) Math.PI * kernelRadius * kernelRadius);
			diffKernel[kwidth] = g3 - g2;
		}

		int initX = kwidth - 1;
		int maxX = width - (kwidth - 1);
		int initY = width * (kwidth - 1);
		int maxY = width * (height - (kwidth - 1));
		
		//perform convolution in x and y directions
		for (int x = initX; x < maxX; x++) {
			for (int y = initY; y < maxY; y += width) {
				int index = x + y;
				float sumX = data[index] * kernel[0];
				float sumY = sumX;
				int xOffset = 1;
				int yOffset = width;
				for(; xOffset < kwidth ;) {
					sumY += kernel[xOffset] * (data[index - yOffset] + data[index + yOffset]);
					sumX += kernel[xOffset] * (data[index - xOffset] + data[index + xOffset]);
					yOffset += width;
					xOffset++;
				}
				
				yConv[index] = sumY;
				xConv[index] = sumX;
			}
 
		}
 
		for (int x = initX; x < maxX; x++) {
			for (int y = initY; y < maxY; y += width) {
				float sum = 0f;
				int index = x + y;
				for (int i = 1; i < kwidth; i++)
					sum += diffKernel[i] * (yConv[index - i] - yConv[index + i]);
 
				xGradient[index] = sum;
			}
 
		}

		for (int x = kwidth; x < width - kwidth; x++) {
			for (int y = initY; y < maxY; y += width) {
				float sum = 0.0f;
				int index = x + y;
				int yOffset = width;
				for (int i = 1; i < kwidth; i++) {
					sum += diffKernel[i] * (xConv[index - yOffset] - xConv[index + yOffset]);
					yOffset += width;
				}
 
				yGradient[index] = sum;
			}
 
		}
 
		initX = kwidth;
		maxX = width - kwidth;
		initY = width * kwidth;
		maxY = width * (height - kwidth);
		for (int x = initX; x < maxX; x++) {
			for (int y = initY; y < maxY; y += width) {
				int index = x + y;
				int indexN = index - width;
				int indexS = index + width;
				int indexW = index - 1;
				int indexE = index + 1;
				int indexNW = indexN - 1;
				int indexNE = indexN + 1;
				int indexSW = indexS - 1;
				int indexSE = indexS + 1;
				
				float xGrad = xGradient[index];
				float yGrad = yGradient[index];
				float gradMag = hypot(xGrad, yGrad);

				//perform non-maximal supression
				float nMag = hypot(xGradient[indexN], yGradient[indexN]);
				float sMag = hypot(xGradient[indexS], yGradient[indexS]);
				float wMag = hypot(xGradient[indexW], yGradient[indexW]);
				float eMag = hypot(xGradient[indexE], yGradient[indexE]);
				float neMag = hypot(xGradient[indexNE], yGradient[indexNE]);
				float seMag = hypot(xGradient[indexSE], yGradient[indexSE]);
				float swMag = hypot(xGradient[indexSW], yGradient[indexSW]);
				float nwMag = hypot(xGradient[indexNW], yGradient[indexNW]);
				float tmp;
				/*
				 * An explanation of what's happening here, for those who want
				 * to understand the source: This performs the "non-maximal
				 * supression" phase of the Canny edge detection in which we
				 * need to compare the gradient magnitude to that in the
				 * direction of the gradient; only if the value is a local
				 * maximum do we consider the point as an edge candidate.
				 * 
				 * We need to break the comparison into a number of different
				 * cases depending on the gradient direction so that the
				 * appropriate values can be used. To avoid computing the
				 * gradient direction, we use two simple comparisons: first we
				 * check that the partial derivatives have the same sign (1)
				 * and then we check which is larger (2). As a consequence, we
				 * have reduced the problem to one of four identical cases that
				 * each test the central gradient magnitude against the values at
				 * two points with 'identical support'; what this means is that
				 * the geometry required to accurately interpolate the magnitude
				 * of gradient function at those points has an identical
				 * geometry (upto right-angled-rotation/reflection).
				 * 
				 * When comparing the central gradient to the two interpolated
				 * values, we avoid performing any divisions by multiplying both
				 * sides of each inequality by the greater of the two partial
				 * derivatives. The common comparand is stored in a temporary
				 * variable (3) and reused in the mirror case (4).
				 * 
				 */
				if (xGrad * yGrad <= (float) 0 /*(1)*/
					? Math.abs(xGrad) >= Math.abs(yGrad) /*(2)*/
						? (tmp = Math.abs(xGrad * gradMag)) >= Math.abs(yGrad * neMag - (xGrad + yGrad) * eMag) /*(3)*/
							&& tmp > Math.abs(yGrad * swMag - (xGrad + yGrad) * wMag) /*(4)*/
						: (tmp = Math.abs(yGrad * gradMag)) >= Math.abs(xGrad * neMag - (yGrad + xGrad) * nMag) /*(3)*/
							&& tmp > Math.abs(xGrad * swMag - (yGrad + xGrad) * sMag) /*(4)*/
					: Math.abs(xGrad) >= Math.abs(yGrad) /*(2)*/
						? (tmp = Math.abs(xGrad * gradMag)) >= Math.abs(yGrad * seMag + (xGrad - yGrad) * eMag) /*(3)*/
							&& tmp > Math.abs(yGrad * nwMag + (xGrad - yGrad) * wMag) /*(4)*/
						: (tmp = Math.abs(yGrad * gradMag)) >= Math.abs(xGrad * seMag + (yGrad - xGrad) * sMag) /*(3)*/
							&& tmp > Math.abs(xGrad * nwMag + (yGrad - xGrad) * nMag) /*(4)*/
					) {
					magnitude[index] = gradMag >= MAGNITUDE_LIMIT ? MAGNITUDE_MAX : (int) (MAGNITUDE_SCALE * gradMag);
					//NOTE: The orientation of the edge is not employed by this
					//implementation. It is a simple matter to compute it at
					//this point as: Math.atan2(yGrad, xGrad);
				} else {
					magnitude[index] = 0;
				}
			}
		}
	}
 
	//NOTE: It is quite feasible to replace the implementation of this method
	//with one which only loosely approximates the hypot function. I've tested
	//simple approximations such as Math.abs(x) + Math.abs(y) and they work fine.
	private float hypot(float x, float y) {
		return (float) Math.hypot(x, y);
	}
 
	private float gaussian(float x, float sigma) {
		return (float) Math.exp(-(x * x) / (2f * sigma * sigma));
	}
 
	private void performHysteresis(int low, int high) {
		//NOTE: this implementation reuses the data array to store both
		//luminance data from the image, and edge intensity from the processing.
		//This is done for memory efficiency, other implementations may wish
		//to separate these functions.
		Arrays.fill(data, 0);
 
		int offset = 0;
		for (int y = 0; y < height; y++) {
			for (int x = 0; x < width; x++) {
				if (data[offset] == 0 && magnitude[offset] >= high) {
					follow(x, y, offset, low);
				}
				offset++;
			}
		}
 	}
 
	private void follow(int x1, int y1, int i1, int threshold) {
		int x0 = x1 == 0 ? x1 : x1 - 1;
		int x2 = x1 == width - 1 ? x1 : x1 + 1;
		int y0 = y1 == 0 ? y1 : y1 - 1;
		int y2 = y1 == height -1 ? y1 : y1 + 1;
		
		data[i1] = magnitude[i1];
		for (int x = x0; x <= x2; x++) {
			for (int y = y0; y <= y2; y++) {
				int i2 = x + y * width;
				if ((y != y1 || x != x1)
					&& data[i2] == 0 
					&& magnitude[i2] >= threshold) {
					follow(x, y, i2, threshold);
					return;
				}
			}
		}
	}

	private void thresholdEdges() {
		for (int i = 0; i < picsize; i++) {
			data[i] = data[i] > 0 ? -1 : 0xff000000;
		}
	}
	
	private int luminance(float r, float g, float b) {
		return Math.round(0.299f * r + 0.587f * g + 0.114f * b);
	}
	
	private void readLuminance() {
		int type = sourceImage.getType();
		if (type == BufferedImage.TYPE_INT_RGB || type == BufferedImage.TYPE_INT_ARGB) {
			int[] pixels = (int[]) sourceImage.getData().getDataElements(0, 0, width, height, null);
			for (int i = 0; i < picsize; i++) {
				int p = pixels[i];
				int r = (p & 0xff0000) >> 16;
				int g = (p & 0xff00) >> 8;
				int b = p & 0xff;
				data[i] = luminance(r, g, b);
			}
		} else if (type == BufferedImage.TYPE_BYTE_GRAY) {
			byte[] pixels = (byte[]) sourceImage.getData().getDataElements(0, 0, width, height, null);
			for (int i = 0; i < picsize; i++) {
				data[i] = (pixels[i] & 0xff);
			}
		} else if (type == BufferedImage.TYPE_USHORT_GRAY) {
			short[] pixels = (short[]) sourceImage.getData().getDataElements(0, 0, width, height, null);
			for (int i = 0; i < picsize; i++) {
				data[i] = (pixels[i] & 0xffff) / 256;
			}
		} else if (type == BufferedImage.TYPE_3BYTE_BGR) {
            byte[] pixels = (byte[]) sourceImage.getData().getDataElements(0, 0, width, height, null);
            int offset = 0;
            for (int i = 0; i < picsize; i++) {
                int b = pixels[offset++] & 0xff;
                int g = pixels[offset++] & 0xff;
                int r = pixels[offset++] & 0xff;
                data[i] = luminance(r, g, b);
            }
        } else {
			throw new IllegalArgumentException("Unsupported image type: " + type);
		}
	}
 
	private void normalizeContrast() {
		int[] histogram = new int[256];
		for (int i = 0; i < data.length; i++) {
			histogram[data[i]]++;
		}
		int[] remap = new int[256];
		int sum = 0;
		int j = 0;
		for (int i = 0; i < histogram.length; i++) {
			sum += histogram[i];
			int target = sum*255/picsize;
			for (int k = j+1; k <=target; k++) {
				remap[k] = i;
			}
			j = target;
		}
		
		for (int i = 0; i < data.length; i++) {
			data[i] = remap[data[i]];
		}
	}
	
	private void writeEdges(int pixels[]) {
		//NOTE: There is currently no mechanism for obtaining the edge data
		//in any other format other than an INT_ARGB type BufferedImage.
		//This may be easily remedied by providing alternative accessors.
		if (edgesImage == null) {
			edgesImage = new BufferedImage(width, height, BufferedImage.TYPE_INT_ARGB);
		}
		edgesImage.getWritableTile(0, 0).setDataElements(0, 0, width, height, pixels);
	}
 
}

Julia

Works with: Julia version 0.6
using Images

canny_edges = canny(img, sigma = 1.4, upperThreshold = 0.80, lowerThreshold = 0.20)

Mathematica /Wolfram Language

Export["out.bmp", EdgeDetect[Import[InputString[]]]];

Mathematica uses canny edge detection by default. This seems so cheaty next to all of these giant answers...

MATLAB / Octave

There is a function in the image processing toolbox edge, that has Canny Edge Detection as one of its options.

BWImage = edge(GrayscaleImage,'canny');

Nim

Translation of: D
Library: nimPNG

We use the PNG image present on Wikipedia article as input and produce a PNG grayscale image as result.

import lenientops
import math
import nimPNG

const MaxBrightness = 255

type Pixel = int16    # Used instead of byte to be able to store negative values.

#---------------------------------------------------------------------------------------------------

func convolution*[normalize: static bool](input: seq[Pixel]; output: var seq[Pixel];
                                          kernel: seq[float]; nx, ny, kn: int) =
  ## Do a convolution.
  ## If normalize is true, map pixels to range 0...maxBrightness.

  doAssert kernel.len == kn * kn
  doAssert (kn and 1) == 1
  doAssert nx > kn and ny > kn
  doAssert input.len == output.len

  let khalf = kn div 2

  when normalize:

    var pMin = float.high
    var pMax = -float.high

    for m in khalf..<(nx - khalf):
      for n in khalf..<(ny - khalf):
        var pixel = 0.0
        var c = 0
        for j in -khalf..khalf:
          for i in -khalf..khalf:
            pixel += input[(n - j) * nx + m - i] * kernel[c]
            inc c
        if pixel < pMin:
          pMin = pixel
        if pixel > pMax:
          pMax = pixel

  for m in khalf..<(nx - khalf):
    for n in khalf..<(ny - khalf):
      var pixel = 0.0
      var c = 0
      for j in -khalf..khalf:
        for i in -khalf..khalf:
          pixel += input[(n - j) * nx + m - i] * kernel[c]
          inc c
      when normalize:
        pixel = MaxBrightness * (pixel - pMin) / (pMax - pMin)
      output[n * nx + m] = Pixel(pixel)

#---------------------------------------------------------------------------------------------------

func gaussianFilter(input: seq[Pixel]; output: var seq[Pixel]; nx, ny: int; sigma: float) =
  ## Apply a gaussian filter.

  doAssert input.len == output.len

  let n = 2 * (2 * sigma).toInt + 3
  let mean = floor(n / 2)
  var kernel = newSeq[float](n * n)

  var c = 0
  for i in 0..<n:
    for j in 0..<n:
      kernel[c] = exp(-0.5 * (((i - mean) / sigma) ^ 2 + ((j - mean) / sigma) ^ 2)) /
                  (2 * PI * sigma * sigma)
      inc c

  convolution[true](input, output, kernel, nx, ny, n)

#---------------------------------------------------------------------------------------------------

proc cannyEdgeDetection(input: seq[Pixel];
                        nx, ny: int;
                        tmin, tmax: int;
                        sigma: float): seq[byte] =



  ## Detect edges.
  var output = newSeq[Pixel](input.len)
  gaussianFilter(input, output, nx, ny, sigma)

  const Gx = @[float -1, 0, 1,
                     -2, 0, 2,
                     -1, 0, 1]
  var afterGx = newSeq[Pixel](input.len)
  convolution[false](input, afterGx, Gx, nx, ny, 3)

  const Gy = @[float  1,  2,  1,
                      0,  0,  0,
                     -1, -2, -1]
  var afterGy = newSeq[Pixel](input.len)
  convolution[false](input, afterGy, Gy, nx, ny, 3)

  var g = newSeq[Pixel](input.len)
  for i in 1..(nx - 2):
    for j in 1..(ny - 2):
      let c = i + nx * j
      g[c] = hypot(afterGx[c].toFloat, afterGy[c].toFloat).Pixel

  # Non-maximum suppression: straightforward implementation.
  var nms = newSeq[Pixel](input.len)
  for i in 1..(nx - 2):
    for j in 1..(ny - 2):
      let
        c = i + nx * j
        nn = c - nx
        ss = c + nx
        ww = c + 1
        ee = c - 1
        nw = nn + 1
        ne = nn - 1
        sw = ss + 1
        se = ss - 1
      let aux = arctan2(afterGy[c].toFloat, afterGx[c].toFloat) + PI
      let dir = aux mod PI / PI * 8
      if (((dir <= 1 or dir > 7) and g[c] > g[ee] and g[c] > g[ww]) or      # O°.
          ((dir > 1 and dir <= 3) and g[c] > g[nw] and g[c] > g[se]) or     # 45°.
          ((dir > 3 and dir <= 5) and g[c] > g[nn] and g[c] > g[ss]) or     # 90°.
          ((dir > 5 and dir <= 7) and g[c] > g[ne] and g[c] > g[sw])):      # 135°.
        nms[c] = g[c]
      else:
        nms[c] = 0

  # Tracing edges with hysteresis. Non-recursive implementation.
  var edges = newSeq[int](input.len div 2)
  for item in output.mitems: item = 0
  var c = 0
  for j in 1..(ny - 2):
    for i in 1..(nx - 2):
      inc c

      if nms[c] >= tMax and output[c] == 0:
        # Trace edges.
        output[c] = MaxBrightness
        var nedges = 1
        edges[0] = c

        while nedges > 0:
          dec nedges
          let t = edges[nedges]
          let neighbors = [t - nx,      # nn.
                           t + nx,      # ss.
                           t + 1,       # ww.
                           t - 1,       # ee.
                           t - nx + 1,  # nw.
                           t - nx - 1,  # ne.
                           t + nx + 1,  # sw.
                           t + nx - 1]  # se.

          for n in neighbors:
            if nms[n] >= tMin and output[n] == 0:
              output[n] = MaxBrightness
              edges[nedges] = n
              inc nedges

  # Store the result as a sequence of bytes.
  result = newSeqOfCap[byte](output.len)
  for val in output:
    result.add(byte(val))


#———————————————————————————————————————————————————————————————————————————————————————————————————

when isMainModule:

  const
    Input = "Valve.png"
    Output = "Valve_edges.png"

  let pngImage = loadPNG24(seq[byte], Input).get()

  # Convert to grayscale and store luminances as 16 bits signed integers.
  var pixels = newSeq[Pixel](pngImage.width * pngImage.height)
  for i in 0..pixels.high:
    pixels[i] = Pixel(0.2126 * pngImage.data[3 * i] +
                      0.7152 * pngImage.data[3 * i + 1] +
                      0.0722 * pngImage.data[3 * i + 2] + 0.5)

  # Find edges.
  let data = cannyEdgeDetection(pixels, pngImage.width, pngImage.height, 45, 50, 1.0)

  # Save result as a PNG image.
  let status = savePNG(Output, data, LCT_GREY, 8, pngImage.width, pngImage.height)
  if status.isOk:
    echo "File ", Input, " processed. Result is available in file ", Output
  else:
    echo "Error: ", status.error

Perl

Used a non-CPAN module by Sasha Kovar

# 20220120 Perl programming solution

use strict;
use warnings;

use lib '/home/hkdtam/lib';
use Image::EdgeDetect;

my $detector = Image::EdgeDetect->new();
$detector->process('./input.jpg', './output.jpg') or die; # na.cx/i/pHYdUrV.jpg

Output: (Offsite image file)

Phix

Library: Phix/pGUI

Ported from demo\Arwen32dibdemo\manip.exw (menu entry Manipulate/Filter/Detect Edges, windows-32-bit only) to pGUI.

--
-- demo\rosetta\Canny_Edge_Detection.exw
-- =====================================
--
without js -- imImage, im_width, im_height, im_pixel, IupImageRGB, 
            -- imFileImageLoadBitmap, and IupImageFromImImage()
include pGUI.e
constant TITLE = "Canny Edge Detection",
         IMGFILE = "Valve.png",
         C_E_D = {{-1, -1, -1},
                  {-1,  8, -1},
                  {-1, -1, -1}}

function detect_edges(imImage img)
    integer width = im_width(img),
            height = im_height(img)
    sequence original = repeat(repeat(0,width),height)
    integer fh = length(C_E_D),  hh=(fh-1)/2,
            fw = length(C_E_D[1]), hw=(fw-1)/2,
            divisor = max(sum(C_E_D),1)

    -- read original pixels and make them grey,
    for y=height-1 to 0 by -1 do
        for x=0 to width-1 do
            integer {c1,c2,c3} = im_pixel(img, x, y),
                    grey = floor((c1*114+c2*587+c3*299)/1000)
            original[height-y,x+1] = {grey,grey,grey}
        end for
    end for

    -- then apply an edge detection filter
    sequence new_image = original
    for y=hh+1 to height-hh-1 do
        for x=hw+1 to width-hw-1 do
            sequence newrgb = {0,0,0}
            for i=-hh to +hh do
                for j=-hw to +hw do
                    newrgb = sq_add(newrgb,sq_mul(C_E_D[i+hh+1,j+hw+1],original[y+i,x+j]))
                end for
            end for
            new_image[y,x] = sq_max(sq_min(sq_floor_div(newrgb,divisor),255),0)
        end for
    end for

    new_image = flatten(new_image) -- (as needed by IupImageRGB)
    Ihandle new_img = IupImageRGB(width, height, new_image) 
    return new_img
end function

IupOpen()
imImage im1 = imFileImageLoadBitmap(IMGFILE)
assert(im1!=NULL,"error opening "&IMGFILE)

Ihandle label1 = IupLabel(),
        label2 = IupLabel()
IupSetAttributeHandle(label1, "IMAGE", IupImageFromImImage(im1))
IupSetAttributeHandle(label2, "IMAGE", detect_edges(im1))

Ihandle dlg = IupDialog(IupHbox({label1, label2}),`TITLE="%s"`,{TITLE})
IupShow(dlg)
IupMainLoop()
IupClose()

PHP

PHP implementation

// input: r,g,b in range 0..255
function RGBtoHSV($r, $g, $b) {
	$r = $r/255.; // convert to range 0..1
	$g = $g/255.;
	$b = $b/255.;
	$cols = array("r" => $r, "g" => $g, "b" => $b);
	asort($cols, SORT_NUMERIC);
	$min = key(array_slice($cols, 1)); // "r", "g" or "b"
	$max = key(array_slice($cols, -1)); // "r", "g" or "b"

	// hue
	if($cols[$min] == $cols[$max]) {
		$h = 0;
	} else {
		if($max == "r") {
			$h = 60. * ( 0 + ( ($cols["g"]-$cols["b"]) / ($cols[$max]-$cols[$min]) ) );
		} elseif ($max == "g") {
			$h = 60. * ( 2 + ( ($cols["b"]-$cols["r"]) / ($cols[$max]-$cols[$min]) ) );
		} elseif ($max == "b") {
			$h = 60. * ( 4 + ( ($cols["r"]-$cols["g"]) / ($cols[$max]-$cols[$min]) ) );
		}
		if($h < 0) {
			$h += 360;
		}
	}

	// saturation
	if($cols[$max] == 0) {
		$s = 0;
	} else {
		$s = ( ($cols[$max]-$cols[$min])/$cols[$max] );
		$s = $s * 255;
	}

	// lightness
	$v = $cols[$max];
	$v = $v * 255;

	return(array($h, $s, $v));
}

$filename = "image.png";
$dimensions = getimagesize($filename);
$w = $dimensions[0]; // width
$h = $dimensions[1]; // height

$im = imagecreatefrompng($filename);

for($hi=0; $hi < $h; $hi++) {

	for($wi=0; $wi < $w; $wi++) {
		$rgb = imagecolorat($im, $wi, $hi);

		$r = ($rgb >> 16) & 0xFF;
		$g = ($rgb >> 8) & 0xFF;
		$b = $rgb & 0xFF;
		$hsv = RGBtoHSV($r, $g, $b);

		// compare pixel below with current pixel
		$brgb = imagecolorat($im, $wi, $hi+1);
		$br = ($brgb >> 16) & 0xFF;
		$bg = ($brgb >> 8) & 0xFF;
		$bb = $brgb & 0xFF;
		$bhsv = RGBtoHSV($br, $bg, $bb);

		// if difference in hue > 20, edge is detected
		if($hsv[2]-$bhsv[2] > 20) { 
                    imagesetpixel($im, $wi, $hi, imagecolorallocate($im, 255, 0, 0));
		} 
                else {
		    imagesetpixel($im, $wi, $hi, imagecolorallocate($im, 0, 0, 0));
		}
			
        }
        
}

header('Content-Type: image/jpeg');
imagepng($im);
imagedestroy($im);

Python

In Python, Canny edge detection would normally be done using scikit-image or OpenCV-Python. Here is an approach using numpy/scipy:

#!/bin/python
import numpy as np
from scipy.ndimage.filters import convolve, gaussian_filter
from scipy.misc import imread, imshow
	
def CannyEdgeDetector(im, blur = 1, highThreshold = 91, lowThreshold = 31):
	im = np.array(im, dtype=float) #Convert to float to prevent clipping values
 
	#Gaussian blur to reduce noise
	im2 = gaussian_filter(im, blur)

	#Use sobel filters to get horizontal and vertical gradients
	im3h = convolve(im2,[[-1,0,1],[-2,0,2],[-1,0,1]]) 
	im3v = convolve(im2,[[1,2,1],[0,0,0],[-1,-2,-1]])

	#Get gradient and direction
	grad = np.power(np.power(im3h, 2.0) + np.power(im3v, 2.0), 0.5)
	theta = np.arctan2(im3v, im3h)
	thetaQ = (np.round(theta * (5.0 / np.pi)) + 5) % 5 #Quantize direction

	#Non-maximum suppression
	gradSup = grad.copy()
	for r in range(im.shape[0]):
		for c in range(im.shape[1]):
			#Suppress pixels at the image edge
			if r == 0 or r == im.shape[0]-1 or c == 0 or c == im.shape[1] - 1:
				gradSup[r, c] = 0
				continue
			tq = thetaQ[r, c] % 4

			if tq == 0: #0 is E-W (horizontal)
				if grad[r, c] <= grad[r, c-1] or grad[r, c] <= grad[r, c+1]:
					gradSup[r, c] = 0
			if tq == 1: #1 is NE-SW
				if grad[r, c] <= grad[r-1, c+1] or grad[r, c] <= grad[r+1, c-1]:
					gradSup[r, c] = 0
			if tq == 2: #2 is N-S (vertical)
				if grad[r, c] <= grad[r-1, c] or grad[r, c] <= grad[r+1, c]:
					gradSup[r, c] = 0
			if tq == 3: #3 is NW-SE
				if grad[r, c] <= grad[r-1, c-1] or grad[r, c] <= grad[r+1, c+1]:
					gradSup[r, c] = 0

	#Double threshold
	strongEdges = (gradSup > highThreshold)

	#Strong has value 2, weak has value 1
	thresholdedEdges = np.array(strongEdges, dtype=np.uint8) + (gradSup > lowThreshold)

	#Tracing edges with hysteresis	
	#Find weak edge pixels near strong edge pixels
	finalEdges = strongEdges.copy()
	currentPixels = []
	for r in range(1, im.shape[0]-1):
		for c in range(1, im.shape[1]-1):	
			if thresholdedEdges[r, c] != 1:
				continue #Not a weak pixel
			
			#Get 3x3 patch	
			localPatch = thresholdedEdges[r-1:r+2,c-1:c+2]
			patchMax = localPatch.max()
			if patchMax == 2:
				currentPixels.append((r, c))
				finalEdges[r, c] = 1

	#Extend strong edges based on current pixels
	while len(currentPixels) > 0:
		newPix = []
		for r, c in currentPixels:
			for dr in range(-1, 2):
				for dc in range(-1, 2):
					if dr == 0 and dc == 0: continue
					r2 = r+dr
					c2 = c+dc
					if thresholdedEdges[r2, c2] == 1 and finalEdges[r2, c2] == 0:
						#Copy this weak pixel to final result
						newPix.append((r2, c2))
						finalEdges[r2, c2] = 1
		currentPixels = newPix

	return finalEdges

if __name__=="__main__":
	im = imread("test.jpg", mode="L") #Open image, convert to greyscale
	finalEdges = CannyEdgeDetector(im)
	imshow(finalEdges)

Raku

Admittedly laziness always prevails so an off-the-shelf function from ImageMagick is used instead.

cannyedge.c

#include <stdio.h>
#include <string.h>
#include <magick/MagickCore.h>

int CannyEdgeDetector(
   const char *infile, const char *outfile,
   double radius, double sigma, double lower, double upper ) {

   ExceptionInfo   *exception;
   Image           *image, *processed_image, *output;
   ImageInfo       *input_info;

   exception   = AcquireExceptionInfo();
   input_info  = CloneImageInfo((ImageInfo *) NULL);
   (void) strcpy(input_info->filename, infile);
   image       = ReadImage(input_info, exception);
   output      = NewImageList();
   processed_image = CannyEdgeImage(image,radius,sigma,lower,upper,exception);
   (void) AppendImageToList(&output, processed_image);
   (void) strcpy(output->filename, outfile);
   WriteImage(input_info, output);
                                    // after-party clean up 
   DestroyImage(image);
   output=DestroyImageList(output);
   input_info=DestroyImageInfo(input_info);
   exception=DestroyExceptionInfo(exception);
   MagickCoreTerminus();

   return 0;
}

cannyedge.raku

# 20220103 Raku programming solution
 
use NativeCall;
 
sub CannyEdgeDetector(CArray[uint8], CArray[uint8], num64, num64, num64, num64 
) returns int32 is native( '/home/hkdtam/LibCannyEdgeDetector.so' ) {*};

CannyEdgeDetector( # imagemagick.org/script/command-line-options.php#canny 
   CArray[uint8].new(  'input.jpg'.encode.list, 0), # pbs.org/wgbh/nova/next/wp-content/uploads/2013/09/fingerprint-1024x575.jpg
   CArray[uint8].new( 'output.jpg'.encode.list, 0),
   0e0, 2e0, 0.05e0, 0.05e0
)
Output:
export PKG_CONFIG_PATH=/usr/lib/pkgconfig
gcc -Wall -fPIC -shared -o LibCannyEdgeDetector.so  cannyedge.c `pkg-config --cflags --libs MagickCore`
raku -c cannyedge.raku && ./cannyedge.raku

Output: (Offsite image file)

Tcl

Library: crimp
package require crimp
package require crimp::pgm

proc readPGM {filename} {
    set f [open $filename rb]
    set data [read $f]
    close $f
    return [crimp read pgm $data]
}
proc writePGM {filename image} {
    crimp write 2file pgm-raw $filename $image
}

proc cannyFilterFile {{inputFile "lena.pgm"} {outputFile "lena_canny.pgm"}} {
    writePGM $outputFile [crimp filter canny sobel [readPGM $inputFile]]
}
cannyFilterFile {*}$argv

Wren

Translation of: C
Library: DOME
Library: Wren-check
import "dome" for Window
import "graphics" for Canvas, Color, ImageData
import "math" for Math
import "./check" for Check

var MaxBrightness = 255

class Canny {
    construct new(inFile, outFile) {
        Window.title = "Canny edge detection"
        var image1 = ImageData.load(inFile)
        var w = image1.width
        var h = image1.height
        Window.resize(w * 2 + 20, h)
        Canvas.resize(w * 2 + 20, h)
        var image2 = ImageData.create(outFile, w, h)
        var pixels = List.filled(w * h, 0)
        var ix = 0
        // convert image1 to gray scale as a list of pixels
        for (y in 0...h) {
            for (x in 0...w) {
                var c1 = image1.pget(x, y)
                var lumin = (0.2126 * c1.r + 0.7152 * c1.g + 0.0722 * c1.b).floor
                pixels[ix] = lumin
                ix = ix + 1
            }
        }

        // find edges
        var data = cannyEdgeDetection(pixels, w, h, 45, 50, 1)

        // write to image2
        ix = 0
        for (y in 0...h) {
            for (x in 0...w) {
                var d = data[ix]
                var c = Color.rgb(d, d, d)
                image2.pset(x, y, c)
                ix = ix + 1
            }
        }

        // display the two images side by side
        image1.draw(0, 0)
        image2.draw(w + 20, 0)

        // save image2 to outFile
        image2.saveToFile(outFile)
    }

    init() {}

    // If normalize is true, map pixels to range 0..MaxBrightness
    convolution(input, output, kernel, nx, ny, kn, normalize) {
        Check.ok((kn % 2) == 1)
        Check.ok(nx > kn && ny > kn)
        var khalf = (kn / 2).floor
        var min = Num.largest
        var max = -min
        if (normalize) {
            for (m in khalf...nx-khalf) {
                for (n in khalf...ny-khalf) {
                    var pixel = 0
                    var c = 0
                    for (j in -khalf..khalf) {
                        for (i in -khalf..khalf) {
                            pixel = pixel + input[(n-j)*nx + m - i] * kernel[c]
                            c = c + 1
                        }
                    }
                    if (pixel < min) min = pixel
                    if (pixel > max) max = pixel
                }
            }
        }

        for (m in khalf...nx-khalf) {
            for (n in khalf...ny-khalf) {
                var pixel = 0
                var c = 0
                for (j in -khalf..khalf) {
                    for (i in -khalf..khalf) {
                        pixel = pixel + input[(n-j)*nx + m - i] * kernel[c]
                        c = c + 1
                    }
                }
                if (normalize) pixel = MaxBrightness * (pixel - min) / (max - min)
                output[n * nx + m] = pixel.truncate
            }
        }
    }

    gaussianFilter(input, output, nx, ny, sigma) {
        var n = 2 * (2 * sigma).truncate + 3
        var mean = (n / 2).floor
        var kernel = List.filled(n * n, 0)
        System.print("Gaussian filter: kernel size = %(n), sigma = %(sigma)")
        var c = 0
        for (i in 0...n) {
            for (j in 0...n) {
                var t = (-0.5 * (((i - mean) / sigma).pow(2) + ((j - mean) / sigma).pow(2))).exp
                kernel[c] = t / (2 * Num.pi * sigma * sigma)
                c = c + 1
            }
        }
        convolution(input, output, kernel, nx, ny, n, true)
    }

    // Returns the square root of 'x' squared + 'y' squared.
    hypot(x, y) { (x*x + y*y).sqrt }

    cannyEdgeDetection(input, nx, ny, tmin, tmax, sigma) {
        var output = List.filled(input.count, 0)
        gaussianFilter(input, output, nx, ny, sigma)
        var Gx = [-1, 0, 1, -2, 0, 2, -1, 0, 1]
        var afterGx = List.filled(input.count, 0)
        convolution(output, afterGx, Gx, nx, ny, 3, false)
        var Gy = [1, 2, 1, 0, 0, 0, -1, -2, -1]
        var afterGy = List.filled(input.count, 0)
        convolution(output, afterGy, Gy, nx, ny, 3, false)
        var G = List.filled(input.count, 0)
        for (i in 1..nx-2) {
            for (j in 1..ny-2) {
                var c = i + nx * j
                G[c] = hypot(afterGx[c], afterGy[c]).floor
            }
        }

        // non-maximum suppression: straightforward implementation
        var nms = List.filled(input.count, 0)
        for (i in 1..nx-2) {
            for (j in 1..ny-2) {
                var c = i + nx * j
                var nn = c - nx
                var ss = c + nx
                var ww = c + 1
                var ee = c - 1
                var nw = nn + 1
                var ne = nn - 1
                var sw = ss + 1
                var se = ss - 1
                var temp = Math.atan(afterGy[c], afterGx[c]) + Num.pi
                var dir = (temp % Num.pi) / Num.pi * 8
                if (((dir <= 1 || dir > 7) && G[c] > G[ee] && G[c] > G[ww]) ||   // O°
                    ((dir > 1 && dir <= 3) && G[c] > G[nw] && G[c] > G[se]) ||   // 45°
                    ((dir > 3 && dir <= 5) && G[c] > G[nn] && G[c] > G[ss]) ||   // 90°
                    ((dir > 5 && dir <= 7) && G[c] > G[ne] && G[c] > G[sw])) {   // 135°
                    nms[c] = G[c]
                } else {
                    nms[c] = 0
                }
            }
        }

        // tracing edges with hysteresis: non-recursive implementation
        var edges = List.filled((input.count/2).floor, 0)
        for (i in 0...output.count) output[i] = 0
        var c = 1
        for (j in 1..ny-2) {
            for (i in 1..nx-2) {
                if (nms[c] >= tmax && output[c] == 0) {
                    // trace edges
                    output[c] = MaxBrightness
                    var nedges = 1
                    edges[0] = c
                    while (true) {
                        nedges = nedges - 1
                        var t = edges[nedges]
                        var nbs = [     // neighbors
                           t - nx,      // nn
                           t + nx,      // ss
                           t + 1,       // ww
                           t - 1,       // ee
                           t - nx + 1,  // nw
                           t - nx - 1,  // ne
                           t + nx + 1,  // sw
                           t + nx - 1   // se
                        ]
                        for (n in nbs) {
                            if (nms[n] >= tmin && output[n] == 0) {
                                output[n] = MaxBrightness
                                edges[nedges] = n
                                nedges = nedges + 1
                            }
                        }
                        if (nedges == 0) break
                    }
                }
                c = c + 1
            }
        }
        return output
    }

    update() {}

    draw(alpha) {}
}
var Game = Canny.new("Valve_original.png", "Valve_monchrome_canny.png")

Yabasic

Translation of: Phix
// Rosetta Code problem: http://rosettacode.org/wiki/Canny_edge_detector
// Adapted from Phix to Yabasic by Galileo, 01/2022

import ReadFromPPM2

MaxBrightness = 255

readPPM("Valve.ppm")
print "Be patient, please ..."

width = peek("winwidth")
height = peek("winheight")
dim pixels(width, height), C_E_D(3, 3)

data -1, -1, -1, -1,  8, -1, -1, -1, -1
for i = 0 to 2
    for j = 0 to 2
        read C_E_D(i, j)
    next
next

// convert image to gray scale
for y = 1 to height
    for x = 1 to width
        c$ = right$(getbit$(x, y, x, y), 6)
        r = dec(left$(c$, 2))
        g = dec(mid$(c$, 3, 2))
        b = dec(right$(c$, 2))
        lumin = floor(0.2126 * r + 0.7152 * g + 0.0722 * b)
        pixels(x, y) = lumin
    next
next

dim new_image(width, height)

divisor = 1

// apply an edge detection filter
    
for y = 2 to height-2
    for x = 2 to width-2
        newrgb = 0
        for i = -1 to 1
            for j = -1 to 1
                newrgb = newrgb + C_E_D(i+1, j+1) * pixels(x+i, y+j)
            next
            new_image(x, y) = max(min(newrgb / divisor,255),0)
        next
    next
next

// show result

for x = 1 to width
    for y = 1 to height
        c = new_image(x, y)
        color c, c, c
        dot x, y
    next
next
Cookies help us deliver our services. By using our services, you agree to our use of cookies.