Sandbox/Realazthat/Hough transform: Difference between revisions

From Rosetta Code
Content added Content deleted
No edit summary
No edit summary
 
Line 1: Line 1:
{{task/realazthat
{{task/realazthat
|name=
|name=Hough transform
|type=
|type=
|description=
|description=
Line 13: Line 13:
There is also a spherical Hough transform, which is more suited to identifying planes in 3D data.
There is also a spherical Hough transform, which is more suited to identifying planes in 3D data.
|algorithms=
|algorithms=
|solutions=

{{solution
|lang=C
|libraries=cairo
|platform=
|algorithm=
|translation=TCL
|code=<lang c>
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
#include <cairo.h>
#ifndef M_PI
#define M_PI 3.1415927
#endif
#define GR(X,Y) (d[(*s)*(Y)+bpp*(X)+((2)%bpp)])
#define GG(X,Y) (d[(*s)*(Y)+bpp*(X)+((1)%bpp)])
#define GB(X,Y) (d[(*s)*(Y)+bpp*(X)+((0)%bpp)])
#define SR(X,Y) (ht[4*tw*((Y)%th)+4*((X)%tw)+2])
#define SG(X,Y) (ht[4*tw*((Y)%th)+4*((X)%tw)+1])
#define SB(X,Y) (ht[4*tw*((Y)%th)+4*((X)%tw)+0])
#define RAD(A) (M_PI*((double)(A))/180.0)
uint8_t *houghtransform(uint8_t *d, int *w, int *h, int *s, int bpp)
{
int rho, theta, y, x, W = *w, H = *h;
int th = sqrt(W*W + H*H)/2.0;
int tw = 360;
uint8_t *ht = malloc(th*tw*4);
memset(ht, 0, 4*th*tw); // black bg
for(rho = 0; rho < th; rho++)
{
for(theta = 0; theta < tw/*720*/; theta++)
{
double C = cos(RAD(theta));
double S = sin(RAD(theta));
uint32_t totalred = 0;
uint32_t totalgreen = 0;
uint32_t totalblue = 0;
uint32_t totalpix = 0;
if ( theta < 45 || (theta > 135 && theta < 225) || theta > 315) {
for(y = 0; y < H; y++) {
double dx = W/2.0 + (rho - (H/2.0-y)*S)/C;
if ( dx < 0 || dx >= W ) continue;
x = floor(dx+.5);
if (x == W) continue;
totalpix++;
totalred += GR(x, y);
totalgreen += GG(x, y);
totalblue += GB(x, y);
}
} else {
for(x = 0; x < W; x++) {
double dy = H/2.0 - (rho - (x - W/2.0)*C)/S;
if ( dy < 0 || dy >= H ) continue;
y = floor(dy+.5);
if (y == H) continue;
totalpix++;
totalred += GR(x, y);
totalgreen += GG(x, y);
totalblue += GB(x, y);
}
}
if ( totalpix > 0 ) {
double dp = totalpix;
SR(theta, rho) = (int)(totalred/dp) &0xff;
SG(theta, rho) = (int)(totalgreen/dp) &0xff;
SB(theta, rho) = (int)(totalblue/dp) &0xff;
}
}
}
*h = th; // sqrt(W*W+H*H)/2
*w = tw; // 360
*s = 4*tw;
return ht;
}
int main(int argc, char **argv)
{
cairo_surface_t *inputimg = NULL;
cairo_surface_t *houghimg = NULL;
uint8_t *houghdata = NULL, *inputdata = NULL;
int w, h, s, bpp;
if ( argc < 3 ) return EXIT_FAILURE;
inputimg = cairo_image_surface_create_from_png(argv[1]);
w = cairo_image_surface_get_width(inputimg);
h = cairo_image_surface_get_height(inputimg);
s = cairo_image_surface_get_stride(inputimg);
bpp = cairo_image_surface_get_format(inputimg);
switch(bpp)
{
case CAIRO_FORMAT_ARGB32: bpp = 4; break;
case CAIRO_FORMAT_RGB24: bpp = 3; break;
case CAIRO_FORMAT_A8: bpp = 1; break;
default:
fprintf(stderr, "unsupported\n");
goto destroy;
}
inputdata = cairo_image_surface_get_data(inputimg);
houghdata = houghtransform(inputdata, &w, &h, &s, bpp);
printf("w=%d, h=%d\n", w, h);
houghimg = cairo_image_surface_create_for_data(houghdata,
CAIRO_FORMAT_RGB24,
w, h, s);
cairo_surface_write_to_png(houghimg, argv[2]);
destroy:
if (inputimg != NULL) cairo_surface_destroy(inputimg);
if (houghimg != NULL) cairo_surface_destroy(houghimg);
return EXIT_SUCCESS;
}

</lang>
}}


}}
}}

Latest revision as of 17:42, 22 October 2010

Task
Sandbox/Realazthat/Hough transform

You are encouraged to solve this task according to the task description, using any language you may know.

Implement the Hough transform, which is used as part of feature extraction with digital images. It is a tool that makes it far easier to identify straight lines in the source image, whatever their orientation.

The transform maps each point in the target image, , to the average color of the pixels on the corresponding line of the source image (in -space, where the line corresponds to points of the form ). The idea is that where there is a straight line in the original image, it corresponds to a bright (or dark, depending on the color of the background field) spot; by applying a suitable filter to the results of the transform, it is possible to extract the locations of the lines in the original image.

Sample PNG image to use for the Hough transform.

The target space actually uses polar coordinates, but is conventionally plotted on rectangular coordinates for display. There's no specification of exactly how to map polar coordinates to a flat surface for display, but a convenient method is to use one axis for and the other for , with the center of the source image being the origin.

There is also a spherical Hough transform, which is more suited to identifying planes in 3D data.

{{{solutions}}}