Verify distribution uniformity/Naive: Difference between revisions

C
(→‎{{header|Ruby}}: yield to a block instead of a potentially clumsy proc argument)
(C)
Line 17:
'''See also:'''
*[[Verify Distribution Uniformity with Chi-Squared Test‎]]
 
=={{header|C}}==
{{libheader|Judy}}
<lang c>#include <stdio.h>
#include <stdlib.h>
#include <stdbool.h>
#include <math.h>
 
#include <Judy.h>
 
bool distcheck(int (*dist)(), int n, double D)
{
Pvoid_t h = (Pvoid_t) NULL;
PWord_t value;
Pvoid_t hi = (Pvoid_t)NULL;
 
Word_t t; // temp for lazyness in using JudyLFreeArray directly
int i, j, h_length;
 
// populate hashes; the hi hash is needed to
// be able to iterate later on key(rn) - value pairs; indeed
// we could use JLF (first index in array) and then JLN (next index)
for(i=0, j=0; i < n; i++) {
int rn = dist();
JLG(value, h, rn);
if ( value == NULL ) {
JLI(value, hi, j);
*value = rn;
j++;
}
JLI(value, h, rn);
*value++;
}
 
h_length = j; // we could use JLC too to count how many idx are
// into the h array.
double target = 1.0 * n / (double)h_length;
 
for(i=0; i < h_length; i++) {
JLG(value, hi, i);
int k = *value;
JLG(value, h, k);
int v = *value; // now we have couple key-value
if ( abs(v - target) > 0.01*n*D ) {
fprintf(stderr, "distribution potentially skewed for '%d': expected '%d', got '%d'\n",
k, (int)target, v);
JLFA(t, h); JLFA(t, hi);
return false; // bad distr.
}
}
JLFA(t, h); JLFA(t, hi);
 
return true; // distr. ok
}
 
 
int frand()
{
return rand() % 10;
}
 
int main()
{
distcheck(frand, 1000000, 1);
return 0;
}</lang>
 
=={{header|Python}}==